Skip to content

Commit ff5a359

Browse files
ppwwyyxxpytorchmergebot
authored andcommitted
Fix static initialization issue for static build (pytorch#90133)
Fixes pytorch#83255 Code comes from pytorch#83258 after fixing merge conflicts. Pull Request resolved: pytorch#90133 Approved by: https://github.com/soumith, https://github.com/malfet
1 parent c8f5c19 commit ff5a359

File tree

8 files changed

+22
-0
lines changed

8 files changed

+22
-0
lines changed

aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -247,6 +247,7 @@ class QLinearInt8 final {
247247
};
248248

249249
TORCH_LIBRARY_IMPL(sparse, QuantizedCPU, m) {
250+
register_linear_params();
250251
m.impl(
251252
TORCH_SELECTIVE_NAME("sparse::qlinear"),
252253
TORCH_FN(QLinearInt8<false>::run));

aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_prepack.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -240,6 +240,7 @@ class QLinearPackWeightInt8 final {
240240
};
241241

242242
TORCH_LIBRARY_IMPL(sparse, QuantizedCPU, m) {
243+
register_linear_params();
243244
m.impl(
244245
TORCH_SELECTIVE_NAME("sparse::qlinear_prepack"),
245246
TORCH_FN(QLinearPackWeightInt8::run));

aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_unpack.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,7 @@ class QLinearUnpackWeightInt8 final {
133133
};
134134

135135
TORCH_LIBRARY_IMPL(sparse, CatchAll, m) {
136+
register_linear_params();
136137
m.impl(
137138
TORCH_SELECTIVE_NAME("sparse::qlinear_unpack"),
138139
TORCH_FN(QLinearUnpackWeightInt8::run));

aten/src/ATen/native/quantized/cpu/qlinear.cpp

+2
Original file line numberDiff line numberDiff line change
@@ -955,12 +955,14 @@ class QLinearInt8FusedQDQ final {
955955
};
956956

957957
TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
958+
register_linear_params();
958959
m.impl(TORCH_SELECTIVE_NAME("quantized::linear"), TORCH_FN(QLinearInt8<false>::run));
959960
m.impl(TORCH_SELECTIVE_NAME("quantized::linear_relu"), TORCH_FN(QLinearInt8<true>::run));
960961
m.impl(TORCH_SELECTIVE_NAME("quantized::linear_leaky_relu"), TORCH_FN(QLinearLeakyReluInt8::run));
961962
}
962963

963964
TORCH_LIBRARY_IMPL(_quantized, QuantizedCPU, m) {
965+
register_linear_params();
964966
m.impl(TORCH_SELECTIVE_NAME("_quantized::linear"), TORCH_FN(QLinearInt8<false>::run));
965967
}
966968

aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp

+2
Original file line numberDiff line numberDiff line change
@@ -662,6 +662,7 @@ class QLinearDynamicFp16 final {
662662
};
663663

664664
TORCH_LIBRARY_IMPL(quantized, CPU, m) {
665+
register_linear_params();
665666
m.impl(
666667
TORCH_SELECTIVE_NAME("quantized::linear_dynamic"),
667668
TORCH_FN(QLinearDynamicInt8<false>::run));
@@ -677,6 +678,7 @@ TORCH_LIBRARY_IMPL(quantized, CPU, m) {
677678
}
678679

679680
TORCH_LIBRARY_IMPL(_quantized, CPU, m) {
681+
register_linear_params();
680682
m.impl(
681683
TORCH_SELECTIVE_NAME("_quantized::linear_dynamic"),
682684
TORCH_FN(QLinearDynamicInt8<false>::run));

aten/src/ATen/native/quantized/cpu/qlinear_prepack.cpp

+4
Original file line numberDiff line numberDiff line change
@@ -381,20 +381,24 @@ class QLinearPackWeightFp16Legacy final {
381381
};
382382

383383
TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
384+
register_linear_params();
384385
m.impl(TORCH_SELECTIVE_NAME("quantized::linear_prepack"), TORCH_FN(QLinearPackWeightInt8::run));
385386
m.impl(TORCH_SELECTIVE_NAME("quantized::linear_prepack_legacy"), TORCH_FN(QLinearPackWeightInt8Legacy::run));
386387
}
387388

388389
TORCH_LIBRARY_IMPL(quantized, CPU, m) {
390+
register_linear_params();
389391
m.impl(TORCH_SELECTIVE_NAME("quantized::linear_prepack_fp16"), TORCH_FN(QLinearPackWeightFp16::run));
390392
m.impl(TORCH_SELECTIVE_NAME("quantized::linear_prepack_fp16_legacy"), TORCH_FN(QLinearPackWeightFp16Legacy::run));
391393
}
392394

393395
TORCH_LIBRARY_IMPL(_quantized, QuantizedCPU, m) {
396+
register_linear_params();
394397
m.impl(TORCH_SELECTIVE_NAME("_quantized::linear_prepack"), TORCH_FN(QLinearPackWeightInt8::run));
395398
}
396399

397400
TORCH_LIBRARY_IMPL(_quantized, CPU, m) {
401+
register_linear_params();
398402
m.impl(TORCH_SELECTIVE_NAME("_quantized::linear_prepack_fp16"), TORCH_FN(QLinearPackWeightFp16::run));
399403
m.impl(TORCH_SELECTIVE_NAME("_quantized::linear_prepack_fp16_legacy"), TORCH_FN(QLinearPackWeightFp16Legacy::run));
400404
}

aten/src/ATen/native/quantized/qconv_unpack.cpp

+8
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,12 @@ and /cudnn/ConvUnpackImpl.cpp, for cudnn.
2828
#include <ATen/ops/from_blob.h>
2929
#endif
3030

31+
template <int kSpatialDim = 2>
32+
int register_conv_params();
33+
34+
extern template int register_conv_params<2>();
35+
extern template int register_conv_params<3>();
36+
3137

3238
namespace at {
3339
namespace native {
@@ -192,6 +198,8 @@ unpack_quantized_prepacked_sizes_conv2d(const IValue& ivalue) {
192198
}
193199

194200
TORCH_LIBRARY_IMPL(quantized, CatchAll, m) {
201+
register_conv_params<2>();
202+
register_conv_params<3>();
195203
// conv_unpack is deprecated, please use conv2d_unpack for 2D conv.
196204
m.impl(TORCH_SELECTIVE_NAME("quantized::conv_unpack"), TORCH_FN(QConvUnpackWeightsInt8<2>::run));
197205
// We use conv2d_unpack to be consistent with conv3d_unpack

aten/src/ATen/native/quantized/qlinear_unpack.cpp

+3
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,8 @@ and /cudnn/linear_unpack_impl.cpp, for cudnn.
1313
#include <torch/custom_class.h>
1414
#include <torch/library.h>
1515

16+
int register_linear_params();
17+
1618
namespace at {
1719
namespace native {
1820
namespace {
@@ -68,6 +70,7 @@ TORCH_LIBRARY_IMPL(quantized, CPU, m) {
6870
}
6971

7072
TORCH_LIBRARY_IMPL(quantized, CatchAll, m) {
73+
register_linear_params();
7174
m.impl(TORCH_SELECTIVE_NAME("quantized::linear_unpack"), TORCH_FN(QLinearUnpackWeightInt8::run));
7275
m.impl(TORCH_SELECTIVE_NAME("quantized::linear_unpack_fp16"), TORCH_FN(QLinearUnpackWeightFp16::run));
7376
}

0 commit comments

Comments
 (0)