11
11
from hypothesis import given , settings
12
12
from scipy .stats import norm
13
13
14
+ from ._utils import assert_allclose
15
+
14
16
15
17
def generate_rois (roi_counts , im_dims ):
16
18
assert len (roi_counts ) == len (im_dims )
@@ -172,7 +174,7 @@ def bbox_transform_ref():
172
174
legacy_plus_one = True ,
173
175
)
174
176
175
- torch . testing . assert_allclose (box_out , a )
177
+ assert_allclose (box_out , a )
176
178
177
179
@given (
178
180
roi_counts = st .lists (st .integers (0 , 5 ), min_size = 1 , max_size = 10 ),
@@ -268,7 +270,7 @@ def box_with_nms_limit_ref():
268
270
)
269
271
270
272
for o , o_ref in zip (outputs , output_refs ):
271
- torch . testing . assert_allclose (o , o_ref )
273
+ assert_allclose (o , o_ref )
272
274
273
275
@given (
274
276
dim_1 = st .integers (min_value = 10 , max_value = 10 ),
@@ -314,7 +316,7 @@ def sparse_to_dense_mask_ref(return_presence_mask=False):
314
316
mask = mask ,
315
317
)
316
318
317
- torch . testing . assert_allclose (output , a )
319
+ assert_allclose (output , a )
318
320
319
321
# Testing return_presence_mask = True
320
322
output , presence_mask = sparse_to_dense_mask_ref (return_presence_mask = True )
@@ -330,8 +332,8 @@ def sparse_to_dense_mask_ref(return_presence_mask=False):
330
332
return_presence_mask = True ,
331
333
)
332
334
333
- torch . testing . assert_allclose (output , a )
334
- torch . testing . assert_allclose (presence_mask , b )
335
+ assert_allclose (output , a )
336
+ assert_allclose (presence_mask , b )
335
337
336
338
@given (
337
339
A = st .integers (min_value = 4 , max_value = 4 ),
@@ -382,8 +384,8 @@ def generate_proposals_ref():
382
384
1.0 ,
383
385
legacy_plus_one = True ,
384
386
)
385
- torch . testing . assert_allclose (rois , a )
386
- torch . testing . assert_allclose (rois_probs , b )
387
+ assert_allclose (rois , a )
388
+ assert_allclose (rois_probs , b )
387
389
388
390
@given (
389
391
bsz = st .integers (1 , 5 ),
@@ -461,9 +463,9 @@ def inference_lstm_ref():
461
463
a , b , c = torch .ops ._caffe2 .InferenceLSTM (
462
464
lstm_in , num_layers , has_biases , batch_first , is_bidirectional
463
465
)
464
- torch . testing . assert_allclose (output , a )
465
- torch . testing . assert_allclose (hidden , b )
466
- torch . testing . assert_allclose (cell , c )
466
+ assert_allclose (output , a )
467
+ assert_allclose (hidden , b )
468
+ assert_allclose (cell , c )
467
469
468
470
# Test case is using workspace.has_cuda_support and not workspace.has_gpu_support
469
471
# to exclude it from HIP because tensor interop doesn't work for HIP tensors yet
@@ -517,8 +519,8 @@ def generate_proposals_ref():
517
519
1.0 ,
518
520
legacy_plus_one = True ,
519
521
)
520
- torch . testing . assert_allclose (rois , a .cpu ())
521
- torch . testing . assert_allclose (rois_probs , b .cpu ())
522
+ assert_allclose (rois , a .cpu ())
523
+ assert_allclose (rois_probs , b .cpu ())
522
524
523
525
@given (
524
526
N = st .integers (min_value = 1 , max_value = 2 ),
@@ -567,7 +569,7 @@ def roi_align_ref(_feature, _rois):
567
569
sampling_ratio = 0 ,
568
570
aligned = False ,
569
571
)
570
- torch . testing . assert_allclose (roi_feature_ref , roi_feature .cpu ())
572
+ assert_allclose (roi_feature_ref , roi_feature .cpu ())
571
573
572
574
def test_roi_align_cpu (self ):
573
575
self ._test_roi_align (device = "cpu" )
@@ -624,7 +626,7 @@ def roi_align_ref(_feature, _rois):
624
626
sampling_ratio = 0 ,
625
627
aligned = False ,
626
628
)
627
- torch . testing . assert_allclose (roi_feature_ref , roi_feature .cpu ())
629
+ assert_allclose (roi_feature_ref , roi_feature .cpu ())
628
630
629
631
def test_roi_align_rotated_cpu (self ):
630
632
self ._test_roi_align_rotated (device = "cpu" )
@@ -674,9 +676,9 @@ def test_collect_and_distribute_fpn_rpn_proposals_op(self, roi_counts):
674
676
rois_idx_restore_int32 = fpn_outputs [- 1 ]
675
677
676
678
# [rois] + fpn_outputs should be equal to all_outputs
677
- torch . testing . assert_allclose (rois , all_outputs [0 ])
679
+ assert_allclose (rois , all_outputs [0 ])
678
680
for x , y in zip (fpn_outputs , all_outputs [1 :]):
679
- torch . testing . assert_allclose (x , y )
681
+ assert_allclose (x , y )
680
682
681
683
@given (X = hu .tensor (), fast_gelu = st .booleans ())
682
684
def _test_gelu_op (self , X , fast_gelu , device ):
@@ -688,7 +690,7 @@ def _gelu_ref(_X):
688
690
689
691
rtol = 1e-3 if fast_gelu else 1e-4
690
692
atol = 1e-5
691
- torch . testing . assert_allclose (
693
+ assert_allclose (
692
694
expected_output , actual_output .cpu (), rtol = rtol , atol = atol
693
695
)
694
696
@@ -719,7 +721,7 @@ def _lengths_ref(X, Y):
719
721
torch .tensor (data ), torch .tensor (lengths , dtype = torch .int32 )
720
722
)
721
723
722
- torch . testing . assert_allclose (expected_output , actual_output .cpu ())
724
+ assert_allclose (expected_output , actual_output .cpu ())
723
725
724
726
def _test_lengths_sum_op (self , device ):
725
727
self ._test_lengths_op ("LengthsSum" , torch .ops ._caffe2 .LengthsSum , device )
@@ -775,7 +777,7 @@ def _resize_nearest_ref(X):
775
777
height_scale = 1.5 ,
776
778
)
777
779
778
- torch . testing . assert_allclose (expected_output , actual_output .cpu ())
780
+ assert_allclose (expected_output , actual_output .cpu ())
779
781
780
782
def test_resize_nearest_op_cpu (self ):
781
783
return self ._test_resize_nearest_op ("cpu" )
@@ -838,26 +840,26 @@ def _piecewise_linear_ref(X):
838
840
binary_input ,
839
841
)
840
842
841
- torch . testing . assert_allclose (torch .tensor (expected_output ), actual_output )
843
+ assert_allclose (torch .tensor (expected_output ), actual_output )
842
844
843
845
def test_alias_with_name_is_in_place (self ):
844
846
device = "cuda" if workspace .has_cuda_support else "cpu"
845
847
x = torch .tensor ([3. , 42. ]).to (device = device )
846
848
y = torch .ops ._caffe2 .AliasWithName (x , "new_name" )
847
849
x [1 ] = 6
848
- torch . testing . assert_allclose (x , torch .tensor ([3. , 6. ]).to (device = device ))
850
+ assert_allclose (x , torch .tensor ([3. , 6. ]).to (device = device ))
849
851
# y should also change because y is alias of x
850
- torch . testing . assert_allclose (y , torch .tensor ([3. , 6. ]).to (device = device ))
852
+ assert_allclose (y , torch .tensor ([3. , 6. ]).to (device = device ))
851
853
852
854
@unittest .skipIf (not workspace .has_cuda_support , "No cuda support" )
853
855
def test_copy_between_cpu_and_gpu (self ):
854
856
x_cpu_ref = torch .tensor ([1. , 2. , 3. ])
855
857
x_gpu_ref = x_cpu_ref .to ("cuda" )
856
858
857
859
x_gpu = torch .ops ._caffe2 .CopyCPUToGPU (x_cpu_ref )
858
- torch . testing . assert_allclose (x_gpu , x_gpu_ref )
860
+ assert_allclose (x_gpu , x_gpu_ref )
859
861
x_cpu = torch .ops ._caffe2 .CopyGPUToCPU (x_gpu )
860
- torch . testing . assert_allclose (x_cpu , x_cpu_ref )
862
+ assert_allclose (x_cpu , x_cpu_ref )
861
863
862
864
def test_index_hash_op (self ):
863
865
data = np .random .randint (low = 0 , high = 1000 , size = (4 , 4 , 4 ))
@@ -873,7 +875,7 @@ def _index_hash_ref(X):
873
875
torch .tensor (data ), seed = 0 , modulo = 100
874
876
)
875
877
876
- torch . testing . assert_allclose (expected_output , actual_output .cpu ())
878
+ assert_allclose (expected_output , actual_output .cpu ())
877
879
878
880
def test_bucketize_op (self ):
879
881
data = np .random .rand (8 , 10 ).astype (np .float32 ) * 1000
@@ -889,7 +891,7 @@ def _bucketize_ref(X):
889
891
890
892
expected_output = _bucketize_ref (data )
891
893
actual_output = torch .ops ._caffe2 .Bucketize (torch .tensor (data ), boundaries )
892
- torch . testing . assert_allclose (expected_output , actual_output .cpu ())
894
+ assert_allclose (expected_output , actual_output .cpu ())
893
895
894
896
@given (X = hu .tensor (), eps = st .floats (min_value = 1e-4 , max_value = 1e-2 ))
895
897
def test_logit (self , X , eps ):
@@ -901,7 +903,7 @@ def ref(X, eps):
901
903
902
904
expected_output = ref (X , eps )
903
905
actual_output = torch .ops ._caffe2 .Logit (torch .tensor (X ), eps )
904
- torch . testing . assert_allclose (expected_output , actual_output .cpu ())
906
+ assert_allclose (expected_output , actual_output .cpu ())
905
907
906
908
def test_percentile (self ):
907
909
original_values = np .array ([[3.0 , 5.0 , 3 ], [5.0 , 1.0 , 6.0 ]]).astype (np .float32 )
@@ -926,7 +928,7 @@ def _percentile_ref(original_values, value_to_pct, lengths):
926
928
torch .tensor (value_to_pct ),
927
929
torch .tensor (lengths ),
928
930
)
929
- torch . testing . assert_allclose (expected_output , actual_output .cpu ())
931
+ assert_allclose (expected_output , actual_output .cpu ())
930
932
931
933
def test_batch_bucket_one_hot_op (self ):
932
934
data = np .array ([[2 , 3 ], [4 , 1 ], [2 , 5 ]]).astype (np .float32 )
@@ -947,7 +949,7 @@ def _batch_bucket_one_hot_ref(data, lengths, boundaries):
947
949
actual_output = torch .ops ._caffe2 .BatchBucketOneHot (
948
950
torch .tensor (data ), torch .tensor (lengths ), torch .tensor (boundaries )
949
951
)
950
- torch . testing . assert_allclose (expected_output , actual_output .cpu ())
952
+ assert_allclose (expected_output , actual_output .cpu ())
951
953
952
954
def test_gather_ranges_to_dense_op (self ):
953
955
data = np .array ([1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ])
@@ -1033,8 +1035,8 @@ def _merge_id_lists(lengths, values):
1033
1035
torch .tensor (values [1 ]),
1034
1036
]
1035
1037
)
1036
- torch . testing . assert_allclose (expected_merged_lengths , output_merged_lengths )
1037
- torch . testing . assert_allclose (expected_merged_values , output_merged_values )
1038
+ assert_allclose (expected_merged_lengths , output_merged_lengths )
1039
+ assert_allclose (expected_merged_values , output_merged_values )
1038
1040
1039
1041
def test_learning_rate (self ):
1040
1042
base_lr = 0.05
@@ -1097,7 +1099,7 @@ def test_pack_segments(self):
1097
1099
packed_tensor , _ = torch .ops ._caffe2 .PackSegments (lengths , s )
1098
1100
self .assertEqual (packed_tensor .numpy ().shape , (2 , 2 , 3 , 3 ))
1099
1101
unpacked_tensor = torch .ops ._caffe2 .UnpackSegments (lengths , packed_tensor )
1100
- torch . testing . assert_allclose (s , unpacked_tensor )
1102
+ assert_allclose (s , unpacked_tensor )
1101
1103
1102
1104
1103
1105
if __name__ == "__main__" :
0 commit comments