Skip to content

Commit e0c65ab

Browse files
Elias Ellisonfacebook-github-bot
Elias Ellison
authored andcommitted
Revert D23568330: [pytorch][PR] Moves some of TestTorchMathOps to OpInfos
Test Plan: revert-hammer Differential Revision: D23568330 (pytorch@a953a82) Original commit changeset: 03e69fccdbfd fbshipit-source-id: 04ec6843c5eb3c84ddf226dad0088172d9bed84d
1 parent fc51047 commit e0c65ab

File tree

6 files changed

+70
-198
lines changed

6 files changed

+70
-198
lines changed

test/test_ops.py

-4
Original file line numberDiff line numberDiff line change
@@ -102,8 +102,6 @@ def test_method_grad(self, device, dtype, op):
102102
@dtypes(torch.double, torch.cdouble)
103103
@ops(op_db)
104104
def test_inplace_grad(self, device, dtype, op):
105-
if not op.test_inplace_grad:
106-
self.skipTest("Skipped! Inplace gradcheck marked to skip.")
107105
self._grad_test_helper(device, dtype, op, self._get_safe_inplace(op.get_inplace()))
108106

109107
# Test that gradients of gradients are computed correctly
@@ -120,8 +118,6 @@ def test_method_gradgrad(self, device, dtype, op):
120118
@dtypes(torch.double, torch.cdouble)
121119
@ops(op_db)
122120
def test_inplace_gradgrad(self, device, dtype, op):
123-
if not op.test_inplace_grad:
124-
self.skipTest("Skipped! Inplace gradgradcheck marked to skip.")
125121
self._gradgrad_test_helper(device, dtype, op, self._get_safe_inplace(op.get_inplace()))
126122

127123

test/test_torch.py

+13-1
Original file line numberDiff line numberDiff line change
@@ -20307,7 +20307,19 @@ def __init__(self,
2030720307
self.dtypes = dtypes
2030820308
self.replace_inf_with_nan = replace_inf_with_nan
2030920309

20310-
torch_op_tests = [_TorchMathTestMeta('sqrt'),
20310+
torch_op_tests = [_TorchMathTestMeta('asin', reffn='arcsin'),
20311+
_TorchMathTestMeta('asinh', reffn='arcsinh'),
20312+
_TorchMathTestMeta('sinh'),
20313+
_TorchMathTestMeta('acosh', reffn='arccosh'),
20314+
_TorchMathTestMeta('tan'),
20315+
_TorchMathTestMeta('atan', reffn='arctan'),
20316+
_TorchMathTestMeta('atanh', reffn='arctanh'),
20317+
_TorchMathTestMeta('tanh'),
20318+
_TorchMathTestMeta('log'),
20319+
_TorchMathTestMeta('log10'),
20320+
_TorchMathTestMeta('log1p'),
20321+
_TorchMathTestMeta('log2'),
20322+
_TorchMathTestMeta('sqrt'),
2031120323
_TorchMathTestMeta('erf', ref_backend='scipy'),
2031220324
_TorchMathTestMeta('erfc', ref_backend='scipy'),
2031320325
_TorchMathTestMeta('exp'),

test/test_unary_ufuncs.py

+38-42
Original file line numberDiff line numberDiff line change
@@ -19,21 +19,6 @@
1919
if TEST_NUMPY:
2020
import numpy as np
2121

22-
# Tests for unary "universal functions (ufuncs)" that accept a single
23-
# tensor and have common properties like:
24-
# - they are elementwise functions
25-
# - the input shape is the output shape
26-
# - they typically have method and inplace variants
27-
# - they typically support the out kwarg
28-
# - they typically have NumPy or SciPy references
29-
30-
# See NumPy's universal function documentation
31-
# (https://numpy.org/doc/1.18/reference/ufuncs.html) for more details
32-
# about the concept of ufuncs.
33-
34-
# Functions tested here:
35-
#
36-
3722
# Interesting values and extremal values for different dtypes
3823
_unsigned_int_vals = (0, 1, 55, 127)
3924
_int_vals = (0, -1, 1, -55, 55, -127, 127, -128, 128)
@@ -132,13 +117,50 @@ def generate_numeric_tensors(device, dtype, *,
132117

133118
return chain(empty_tensors, scalar_tensors, small_tensors, (medium_tensor,), (large_tensor,))
134119

120+
# Tests for unary "universal functions (ufuncs)" that accept a single
121+
# tensor and have common properties like:
122+
# - they are elementwise functions
123+
# - the input shape is the output shape
124+
# - they typically have method and inplace variants
125+
# - they typically support the out kwarg
126+
# - they typically have NumPy or SciPy references
127+
128+
# See NumPy's universal function documentation
129+
# (https://numpy.org/doc/1.18/reference/ufuncs.html) for more details
130+
# about the concept of ufuncs.
131+
135132
# TODO: port test_unary_out_op_mem_overlap
136133
# TODO: add out= tests (different devices, dtypes, mismatched sizes,
137134
# correct sizes, 0 size, broadcasted out)
138135
# TODO: add test for inplace variants erroring on broadcasted inputs
139136
class TestUnaryUfuncs(TestCase):
140137
exact_dtype = True
141138

139+
# Helper for comparing torch tensors and numpy arrays
140+
# TODO: should this or assertEqual also validate that strides are equal?
141+
def assertEqualHelper(self, actual, expected, *, dtype, exact_dtype=True, **kwargs):
142+
assert isinstance(actual, torch.Tensor)
143+
144+
# Some NumPy functions return scalars, not arrays
145+
if isinstance(expected, Number):
146+
self.assertEqual(actual.item(), expected)
147+
elif isinstance(expected, np.ndarray):
148+
# Handles exact dtype comparisons between arrays and tensors
149+
if exact_dtype:
150+
# Allows array dtype to be float32 when comparing with bfloat16 tensors
151+
# since NumPy doesn't support the bfloat16 dtype
152+
if expected.dtype == np.float32:
153+
assert actual.dtype in (torch.bfloat16, torch.float32)
154+
else:
155+
assert expected.dtype == torch_to_numpy_dtype_dict[actual.dtype]
156+
157+
self.assertEqual(actual,
158+
torch.from_numpy(expected).to(actual.dtype),
159+
exact_device=False,
160+
**kwargs)
161+
else:
162+
self.assertEqual(actual, expected, exact_device=False, **kwargs)
163+
142164
# Tests bool tensor negation raises the correct error
143165
def test_neg_error_message(self, device):
144166
msg = ("Negation, the `\\-` operator, on a bool tensor is not supported."
@@ -212,32 +234,6 @@ def _fn(t):
212234
actual = alt(t.clone())
213235
self.assertEqual(actual, expected, rtol=0, atol=0)
214236

215-
# Helper for comparing torch tensors and numpy arrays
216-
# TODO: should this or assertEqual also validate that strides are equal?
217-
def assertEqualHelper(self, actual, expected, msg, *, dtype, exact_dtype=True, **kwargs):
218-
assert isinstance(actual, torch.Tensor)
219-
220-
# Some NumPy functions return scalars, not arrays
221-
if isinstance(expected, Number):
222-
self.assertEqual(actual.item(), expected)
223-
elif isinstance(expected, np.ndarray):
224-
# Handles exact dtype comparisons between arrays and tensors
225-
if exact_dtype:
226-
# Allows array dtype to be float32 when comparing with bfloat16 tensors
227-
# since NumPy doesn't support the bfloat16 dtype
228-
if expected.dtype == np.float32:
229-
assert actual.dtype in (torch.bfloat16, torch.float32)
230-
else:
231-
assert expected.dtype == torch_to_numpy_dtype_dict[actual.dtype]
232-
233-
self.assertEqual(actual,
234-
torch.from_numpy(expected).to(actual.dtype),
235-
msg,
236-
exact_device=False,
237-
**kwargs)
238-
else:
239-
self.assertEqual(actual, expected, msg, exact_device=False, **kwargs)
240-
241237
# Tests that the function and its (array-accepting) reference produce the same
242238
# values on a range of tensors, including empty tensors, scalar tensors,
243239
# 1D tensors and a large 2D tensor with interesting and extremal values
@@ -270,7 +266,7 @@ def test_reference_numerics(self, device, dtype, op):
270266
else:
271267
msg = None
272268

273-
self.assertEqualHelper(actual, expected, msg, dtype=dtype)
269+
self.assertEqualHelper(actual, expected, dtype=dtype, msg=msg)
274270

275271
# Tests for testing (dis)contiguity consistency
276272

torch/testing/_internal/common_device_type.py

+8-20
Original file line numberDiff line numberDiff line change
@@ -229,45 +229,33 @@ def instantiate_test(cls, name, test, *, generic_cls=None):
229229

230230
def instantiate_test_helper(cls, name, *, test, dtype, op):
231231

232-
# Constructs the test's name
233-
test_name = _construct_test_name(name, op, cls.device_type, dtype)
234-
235-
# wraps instantiated test with op decorators
236-
# NOTE: test_wrapper exists because we don't want to apply
237-
# op-specific decorators to the original test.
238-
# Test-sepcific decorators are applied to the original test,
239-
# however.
232+
# wraps test with op decorators
240233
if op is not None and op.decorators is not None:
241-
@wraps(test)
242-
def test_wrapper(*args, **kwargs):
243-
return test(*args, **kwargs)
244-
245234
for decorator in op.decorators:
246-
test_wrapper = decorator(test_wrapper)
235+
test = decorator(test)
247236

248-
test_fn = test_wrapper
249-
else:
250-
test_fn = test
237+
# Constructs the test's name
238+
test_name = _construct_test_name(name, op, cls.device_type, dtype)
251239

252240
# Constructs the test
253241
@wraps(test)
254-
def instantiated_test(self, name=name, test=test_fn, dtype=dtype, op=op):
242+
def instantiated_test(self, name=name, test=test, dtype=dtype, op=op):
255243
if op is not None and op.should_skip(generic_cls.__name__, name,
256244
self.device_type, dtype):
257245
self.skipTest("Skipped!")
258246

259247
device_arg = cls.get_primary_device()
260-
if hasattr(test_fn, 'num_required_devices'):
248+
if hasattr(test, 'num_required_devices'):
261249
device_arg = cls.get_all_devices()
262250

263251
# Sets precision and runs test
264252
# Note: precision is reset after the test is run
265253
guard_precision = self.precision
266254
try:
267-
self.precision = self._get_precision_override(test_fn, dtype)
255+
self.precision = self._get_precision_override(test, dtype)
268256
args = (device_arg, dtype, op)
269257
args = (arg for arg in args if arg is not None)
270-
result = test_fn(self, *args)
258+
result = test(self, *args)
271259
finally:
272260
self.precision = guard_precision
273261

torch/testing/_internal/common_methods_invocations.py

+7-127
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
from torch.testing import \
1111
(make_non_contiguous,
1212
_dispatch_dtypes,
13-
floating_types, floating_types_and, floating_types_and_half,
13+
floating_types, floating_types_and,
1414
floating_and_complex_types, floating_and_complex_types_and,
1515
all_types_and_complex_and)
1616
from torch.testing._internal.common_device_type import \
@@ -62,7 +62,6 @@ def __init__(self,
6262
dtypesIfCPU=None, # dtypes this function is expected to work with on CPU
6363
dtypesIfCUDA=None, # dtypes this function is expected to work with on CUDA
6464
dtypesIfROCM=None, # dtypes this function is expected to work with on ROCM
65-
test_inplace_grad=True, # whether to gradcheck and gradgradcheck the inplace variant
6665
skips=tuple(), # information about which tests to skip
6766
decorators=None): # decorators to apply to generated tests
6867
# Validates the dtypes are generated from the dispatch-related functions
@@ -84,8 +83,6 @@ def __init__(self,
8483
inplace_name = name + "_"
8584
self.inplace_variant = getattr(torch.Tensor, inplace_name) if hasattr(torch.Tensor, name) else None
8685

87-
self.test_inplace_grad = test_inplace_grad
88-
8986
self.skips = skips
9087
self.decorators = decorators
9188

@@ -200,7 +197,7 @@ def sample_inputs(self, device, dtype, requires_grad=False):
200197

201198

202199

203-
# Operator database (sorted alphabetically)
200+
# Operator database
204201
op_db = [
205202
# NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952)
206203
UnaryUfuncInfo('acos',
@@ -215,56 +212,13 @@ def sample_inputs(self, device, dtype, requires_grad=False):
215212
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
216213
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
217214
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
218-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
219-
device_type='cuda', dtypes=[torch.float16],
220-
active_if=TEST_WITH_ROCM),
221215
SkipInfo('TestGradients', 'test_fn_grad',
222216
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
223217
SkipInfo('TestGradients', 'test_method_grad',
224218
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
225219
SkipInfo('TestGradients', 'test_inplace_grad',
226220
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
227221
)),
228-
# NOTE: the derivative for inplace acosh is not implemented
229-
UnaryUfuncInfo('acosh',
230-
ref=np.arccosh,
231-
domain=(1, float('inf')),
232-
dtypesIfCPU=floating_types(),
233-
dtypesIfCUDA=floating_types_and_half(),
234-
test_inplace_grad=False),
235-
UnaryUfuncInfo('asin',
236-
ref=np.arcsin,
237-
domain=(-1, 1),
238-
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
239-
skips=(
240-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
241-
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
242-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
243-
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
244-
active_if=IS_WINDOWS),
245-
)),
246-
# NOTE: derivative for inplace asinh is not implemented
247-
UnaryUfuncInfo('asinh',
248-
ref=np.arcsinh,
249-
dtypesIfCPU=floating_types(),
250-
dtypesIfCUDA=floating_types_and_half(),
251-
test_inplace_grad=False),
252-
UnaryUfuncInfo('atan',
253-
ref=np.arctan,
254-
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
255-
skips=(
256-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
257-
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
258-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
259-
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
260-
active_if=IS_WINDOWS),
261-
)),
262-
UnaryUfuncInfo('atanh',
263-
ref=np.arctanh,
264-
domain=(-1, 1),
265-
dtypesIfCPU=floating_types(),
266-
dtypesIfCUDA=floating_types_and_half(),
267-
test_inplace_grad=False),
268222
UnaryUfuncInfo('cos',
269223
ref=np.cos,
270224
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
@@ -287,52 +241,6 @@ def sample_inputs(self, device, dtype, requires_grad=False):
287241
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics', device_type='cpu',
288242
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
289243
)),
290-
UnaryUfuncInfo('log',
291-
ref=np.log,
292-
domain=(0, float('inf')),
293-
skips=(
294-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
295-
device_type='cpu', dtypes=[torch.bfloat16]),
296-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
297-
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),
298-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
299-
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
300-
active_if=IS_WINDOWS),
301-
)),
302-
UnaryUfuncInfo('log10',
303-
ref=np.log10,
304-
domain=(0, float('inf')),
305-
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
306-
skips=(
307-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
308-
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),
309-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
310-
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
311-
active_if=IS_WINDOWS),
312-
)),
313-
UnaryUfuncInfo('log1p',
314-
ref=np.log1p,
315-
domain=(-1, float('inf')),
316-
dtypesIfCPU=floating_types_and(torch.bfloat16),
317-
dtypesIfCUDA=floating_types_and_half(),
318-
decorators=(precisionOverride({torch.bfloat16: 1e-1}),)),
319-
UnaryUfuncInfo('log2',
320-
ref=np.log2,
321-
domain=(0, float('inf')),
322-
skips=(
323-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
324-
device_type='cpu', dtypes=[torch.bfloat16]),
325-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
326-
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),
327-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
328-
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
329-
active_if=IS_WINDOWS),
330-
)),
331-
UnaryUfuncInfo('neg',
332-
ref=np.negative,
333-
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
334-
dtypesIfCPU=all_types_and_complex_and(torch.half, torch.bfloat16),
335-
dtypesIfCUDA=all_types_and_complex_and(torch.half)),
336244
UnaryUfuncInfo('sin',
337245
ref=np.sin,
338246
handles_large_floats=False,
@@ -344,39 +252,11 @@ def sample_inputs(self, device, dtype, requires_grad=False):
344252
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
345253
dtypes=[torch.float], active_if=TEST_WITH_ROCM),
346254
)),
347-
UnaryUfuncInfo('sinh',
348-
ref=np.sinh,
349-
dtypesIfCPU=floating_and_complex_types(),
350-
decorators=(precisionOverride({torch.float16: 1e-2}),),
351-
skips=(
352-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
353-
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
354-
active_if=(IS_MACOS or IS_WINDOWS)),
355-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
356-
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
357-
active_if=IS_WINDOWS),
358-
)),
359-
UnaryUfuncInfo('tan',
360-
ref=np.tan,
361-
skips=(
362-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
363-
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),
364-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
365-
device_type='cpu', dtypes=[torch.bfloat16]),
366-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
367-
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
368-
active_if=(IS_MACOS or IS_WINDOWS)),
369-
)),
370-
UnaryUfuncInfo('tanh',
371-
ref=np.tanh,
372-
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
373-
skips=(
374-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
375-
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),
376-
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
377-
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
378-
active_if=(IS_MACOS or IS_WINDOWS)),
379-
)),
255+
UnaryUfuncInfo('neg',
256+
ref=np.negative,
257+
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
258+
dtypesIfCPU=all_types_and_complex_and(torch.half, torch.bfloat16),
259+
dtypesIfCUDA=all_types_and_complex_and(torch.half)),
380260
]
381261

382262
# Common operator groupings

0 commit comments

Comments
 (0)