@@ -10775,6 +10775,10 @@ def ref_pairwise_distance(input1, input2):
10775
10775
dtypes=floating_types(),
10776
10776
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
10777
10777
supports_out=False,
10778
+ supports_forward_ad=True,
10779
+ supports_fwgrad_bwgrad=True,
10780
+ # got: Batching rule not implemented for aten::flatten.using_ints
10781
+ check_batched_forward_grad=False,
10778
10782
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
10779
10783
sample_inputs_func=sample_inputs_adaptive_max_pool1d),
10780
10784
OpInfo('nn.functional.adaptive_max_pool2d',
@@ -10792,6 +10796,10 @@ def ref_pairwise_distance(input1, input2):
10792
10796
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
10793
10797
),
10794
10798
supports_out=False,
10799
+ supports_forward_ad=True,
10800
+ supports_fwgrad_bwgrad=True,
10801
+ # got: Batching rule not implemented for aten::flatten.using_ints
10802
+ check_batched_forward_grad=False,
10795
10803
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
10796
10804
sample_inputs_func=sample_inputs_adaptive_max_pool2d),
10797
10805
OpInfo('nn.functional.adaptive_max_pool3d',
@@ -10811,6 +10819,10 @@ def ref_pairwise_distance(input1, input2):
10811
10819
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
10812
10820
),
10813
10821
supports_out=False,
10822
+ supports_forward_ad=True,
10823
+ supports_fwgrad_bwgrad=True,
10824
+ # got: Batching rule not implemented for aten::flatten.using_ints
10825
+ check_batched_forward_grad=False,
10814
10826
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
10815
10827
sample_inputs_func=sample_inputs_adaptive_max_pool3d),
10816
10828
OpInfo('nn.functional.avg_pool1d',
@@ -11201,49 +11213,54 @@ def ref_pairwise_distance(input1, input2):
11201
11213
OpInfo('nn.functional.fractional_max_pool2d',
11202
11214
supports_autograd=True,
11203
11215
supports_out=False,
11216
+ supports_forward_ad=True,
11217
+ supports_fwgrad_bwgrad=True,
11218
+ op=lambda input, *args, **kwargs:
11219
+ wrapper_set_seed(torch.nn.functional.fractional_max_pool2d, input, *args, **kwargs),
11220
+ # vmap does not support random operations
11221
+ check_batched_forward_grad=False,
11204
11222
dtypes=floating_types(),
11205
11223
dtypesIfCUDA=floating_types_and(torch.float16),
11206
11224
test_neg_view=False,
11207
11225
sample_inputs_func=sample_inputs_fractional_max_pool2d,
11208
- decorators=[
11209
- # FIXME: both derivatives are implemented incorrectly
11210
- # https://github.com/pytorch/pytorch/issues/69322
11211
- DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_grad'),
11212
- DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'),
11213
- # FIXME: produces incorrect output on non-contiguous inputs
11214
- # https://github.com/pytorch/pytorch/issues/69325
11215
- DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),
11226
+ decorators=(
11216
11227
# FIXME: AssertionError: False is not true : Tensors failed to compare as equal!
11217
11228
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
11218
11229
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
11219
11230
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270
11220
- DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
11221
- ], ),
11231
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'))),
11222
11232
OpInfo('nn.functional.fractional_max_pool3d',
11223
11233
supports_autograd=True,
11224
11234
supports_out=False,
11235
+ supports_forward_ad=True,
11236
+ supports_fwgrad_bwgrad=True,
11237
+ op=lambda input, *args, **kwargs:
11238
+ wrapper_set_seed(torch.nn.functional.fractional_max_pool3d, input, *args, **kwargs),
11239
+ # vmap does not support random operations
11240
+ check_batched_forward_grad=False,
11225
11241
dtypes=floating_types(),
11226
11242
dtypesIfCUDA=floating_types_and(torch.float16),
11227
11243
test_neg_view=False,
11228
11244
sample_inputs_func=sample_inputs_fractional_max_pool3d,
11229
- decorators=[
11245
+ decorators=(
11230
11246
# FIXME: both derivatives are implemented incorrectly
11231
11247
# https://github.com/pytorch/pytorch/issues/69322
11232
- DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_grad'),
11248
+ # RuntimeError: cannot reshape tensor of 0 elements into shape [0, 1, -1] because the
11249
+ # unspecified dimension size -1 can be any value and is ambiguous
11233
11250
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'),
11234
- # FIXME: produces incorrect output on non-contiguous inputs
11235
- # https://github.com/pytorch/pytorch/issues/69325
11236
- DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),
11237
11251
# FIXME: AssertionError: False is not true : Tensors failed to compare as equal!
11238
11252
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
11239
11253
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
11240
11254
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270
11241
- DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
11242
- ], ),
11255
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),)),
11243
11256
OpInfo('nn.functional.max_pool1d',
11244
11257
aten_name='max_pool1d',
11245
11258
supports_autograd=True,
11246
11259
supports_out=False,
11260
+ supports_forward_ad=True,
11261
+ supports_fwgrad_bwgrad=True,
11262
+ # got: Batching rule not implemented for aten::flatten.using_ints
11263
+ check_batched_forward_grad=False,
11247
11264
# TODO: add shape checks
11248
11265
assert_jit_shape_analysis=False,
11249
11266
dtypes=floating_types(),
@@ -11259,6 +11276,10 @@ def ref_pairwise_distance(input1, input2):
11259
11276
# Vmap is not happy with non-contiguous (channels_last) inputs
11260
11277
check_batched_gradgrad=False,
11261
11278
supports_out=False,
11279
+ supports_forward_ad=True,
11280
+ supports_fwgrad_bwgrad=True,
11281
+ # got: Batching rule not implemented for aten::flatten.using_ints
11282
+ check_batched_forward_grad=False,
11262
11283
assert_jit_shape_analysis=True,
11263
11284
dtypes=floating_types(),
11264
11285
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
@@ -11267,6 +11288,10 @@ def ref_pairwise_distance(input1, input2):
11267
11288
aten_name='max_pool3d',
11268
11289
supports_autograd=True,
11269
11290
supports_out=False,
11291
+ supports_forward_ad=True,
11292
+ supports_fwgrad_bwgrad=True,
11293
+ # got: Batching rule not implemented for aten::flatten.using_ints
11294
+ check_batched_forward_grad=False,
11270
11295
# TODO: add shape checks
11271
11296
assert_jit_shape_analysis=False,
11272
11297
dtypes=floating_types(),
0 commit comments