10
10
from torch .testing import \
11
11
(make_non_contiguous ,
12
12
_dispatch_dtypes ,
13
- floating_types , floating_types_and , floating_types_and_half ,
13
+ floating_types , floating_types_and ,
14
14
floating_and_complex_types , floating_and_complex_types_and ,
15
15
all_types_and_complex_and )
16
16
from torch .testing ._internal .common_device_type import \
@@ -62,7 +62,6 @@ def __init__(self,
62
62
dtypesIfCPU = None , # dtypes this function is expected to work with on CPU
63
63
dtypesIfCUDA = None , # dtypes this function is expected to work with on CUDA
64
64
dtypesIfROCM = None , # dtypes this function is expected to work with on ROCM
65
- test_inplace_grad = True , # whether to gradcheck and gradgradcheck the inplace variant
66
65
skips = tuple (), # information about which tests to skip
67
66
decorators = None ): # decorators to apply to generated tests
68
67
# Validates the dtypes are generated from the dispatch-related functions
@@ -84,8 +83,6 @@ def __init__(self,
84
83
inplace_name = name + "_"
85
84
self .inplace_variant = getattr (torch .Tensor , inplace_name ) if hasattr (torch .Tensor , name ) else None
86
85
87
- self .test_inplace_grad = test_inplace_grad
88
-
89
86
self .skips = skips
90
87
self .decorators = decorators
91
88
@@ -200,7 +197,7 @@ def sample_inputs(self, device, dtype, requires_grad=False):
200
197
201
198
202
199
203
- # Operator database (sorted alphabetically)
200
+ # Operator database
204
201
op_db = [
205
202
# NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952)
206
203
UnaryUfuncInfo ('acos' ,
@@ -215,56 +212,13 @@ def sample_inputs(self, device, dtype, requires_grad=False):
215
212
device_type = 'cpu' , dtypes = [torch .cfloat , torch .cdouble ]),
216
213
SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
217
214
dtypes = [torch .cfloat , torch .cdouble ], active_if = IS_WINDOWS ),
218
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
219
- device_type = 'cuda' , dtypes = [torch .float16 ],
220
- active_if = TEST_WITH_ROCM ),
221
215
SkipInfo ('TestGradients' , 'test_fn_grad' ,
222
216
dtypes = [torch .cdouble ], active_if = IS_WINDOWS ),
223
217
SkipInfo ('TestGradients' , 'test_method_grad' ,
224
218
dtypes = [torch .cdouble ], active_if = IS_WINDOWS ),
225
219
SkipInfo ('TestGradients' , 'test_inplace_grad' ,
226
220
dtypes = [torch .cdouble ], active_if = IS_WINDOWS ),
227
221
)),
228
- # NOTE: the derivative for inplace acosh is not implemented
229
- UnaryUfuncInfo ('acosh' ,
230
- ref = np .arccosh ,
231
- domain = (1 , float ('inf' )),
232
- dtypesIfCPU = floating_types (),
233
- dtypesIfCUDA = floating_types_and_half (),
234
- test_inplace_grad = False ),
235
- UnaryUfuncInfo ('asin' ,
236
- ref = np .arcsin ,
237
- domain = (- 1 , 1 ),
238
- decorators = (precisionOverride ({torch .bfloat16 : 1e-2 }),),
239
- skips = (
240
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
241
- device_type = 'cpu' , dtypes = [torch .cfloat , torch .cdouble ]),
242
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
243
- device_type = 'cuda' , dtypes = [torch .cfloat , torch .cdouble ],
244
- active_if = IS_WINDOWS ),
245
- )),
246
- # NOTE: derivative for inplace asinh is not implemented
247
- UnaryUfuncInfo ('asinh' ,
248
- ref = np .arcsinh ,
249
- dtypesIfCPU = floating_types (),
250
- dtypesIfCUDA = floating_types_and_half (),
251
- test_inplace_grad = False ),
252
- UnaryUfuncInfo ('atan' ,
253
- ref = np .arctan ,
254
- decorators = (precisionOverride ({torch .bfloat16 : 1e-2 }),),
255
- skips = (
256
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
257
- device_type = 'cpu' , dtypes = [torch .cfloat , torch .cdouble ]),
258
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
259
- device_type = 'cuda' , dtypes = [torch .cfloat , torch .cdouble ],
260
- active_if = IS_WINDOWS ),
261
- )),
262
- UnaryUfuncInfo ('atanh' ,
263
- ref = np .arctanh ,
264
- domain = (- 1 , 1 ),
265
- dtypesIfCPU = floating_types (),
266
- dtypesIfCUDA = floating_types_and_half (),
267
- test_inplace_grad = False ),
268
222
UnaryUfuncInfo ('cos' ,
269
223
ref = np .cos ,
270
224
dtypesIfCUDA = floating_and_complex_types_and (torch .half , torch .bfloat16 ),
@@ -287,52 +241,6 @@ def sample_inputs(self, device, dtype, requires_grad=False):
287
241
SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' , device_type = 'cpu' ,
288
242
dtypes = [torch .cfloat , torch .cdouble ], active_if = IS_MACOS ),
289
243
)),
290
- UnaryUfuncInfo ('log' ,
291
- ref = np .log ,
292
- domain = (0 , float ('inf' )),
293
- skips = (
294
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
295
- device_type = 'cpu' , dtypes = [torch .bfloat16 ]),
296
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
297
- device_type = 'cuda' , dtypes = [torch .cfloat , torch .cdouble ]),
298
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
299
- device_type = 'cpu' , dtypes = [torch .cfloat , torch .cdouble ],
300
- active_if = IS_WINDOWS ),
301
- )),
302
- UnaryUfuncInfo ('log10' ,
303
- ref = np .log10 ,
304
- domain = (0 , float ('inf' )),
305
- decorators = (precisionOverride ({torch .bfloat16 : 1e-2 }),),
306
- skips = (
307
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
308
- device_type = 'cuda' , dtypes = [torch .cfloat , torch .cdouble ]),
309
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
310
- device_type = 'cpu' , dtypes = [torch .cfloat , torch .cdouble ],
311
- active_if = IS_WINDOWS ),
312
- )),
313
- UnaryUfuncInfo ('log1p' ,
314
- ref = np .log1p ,
315
- domain = (- 1 , float ('inf' )),
316
- dtypesIfCPU = floating_types_and (torch .bfloat16 ),
317
- dtypesIfCUDA = floating_types_and_half (),
318
- decorators = (precisionOverride ({torch .bfloat16 : 1e-1 }),)),
319
- UnaryUfuncInfo ('log2' ,
320
- ref = np .log2 ,
321
- domain = (0 , float ('inf' )),
322
- skips = (
323
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
324
- device_type = 'cpu' , dtypes = [torch .bfloat16 ]),
325
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
326
- device_type = 'cuda' , dtypes = [torch .cfloat , torch .cdouble ]),
327
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
328
- device_type = 'cpu' , dtypes = [torch .cfloat , torch .cdouble ],
329
- active_if = IS_WINDOWS ),
330
- )),
331
- UnaryUfuncInfo ('neg' ,
332
- ref = np .negative ,
333
- dtypes = all_types_and_complex_and (torch .half , torch .bfloat16 ),
334
- dtypesIfCPU = all_types_and_complex_and (torch .half , torch .bfloat16 ),
335
- dtypesIfCUDA = all_types_and_complex_and (torch .half )),
336
244
UnaryUfuncInfo ('sin' ,
337
245
ref = np .sin ,
338
246
handles_large_floats = False ,
@@ -344,39 +252,11 @@ def sample_inputs(self, device, dtype, requires_grad=False):
344
252
SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
345
253
dtypes = [torch .float ], active_if = TEST_WITH_ROCM ),
346
254
)),
347
- UnaryUfuncInfo ('sinh' ,
348
- ref = np .sinh ,
349
- dtypesIfCPU = floating_and_complex_types (),
350
- decorators = (precisionOverride ({torch .float16 : 1e-2 }),),
351
- skips = (
352
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
353
- device_type = 'cpu' , dtypes = [torch .cfloat , torch .cdouble ],
354
- active_if = (IS_MACOS or IS_WINDOWS )),
355
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
356
- device_type = 'cuda' , dtypes = [torch .cfloat , torch .cdouble ],
357
- active_if = IS_WINDOWS ),
358
- )),
359
- UnaryUfuncInfo ('tan' ,
360
- ref = np .tan ,
361
- skips = (
362
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
363
- device_type = 'cuda' , dtypes = [torch .cfloat , torch .cdouble ]),
364
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
365
- device_type = 'cpu' , dtypes = [torch .bfloat16 ]),
366
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
367
- device_type = 'cpu' , dtypes = [torch .cfloat , torch .cdouble ],
368
- active_if = (IS_MACOS or IS_WINDOWS )),
369
- )),
370
- UnaryUfuncInfo ('tanh' ,
371
- ref = np .tanh ,
372
- decorators = (precisionOverride ({torch .bfloat16 : 1e-2 }),),
373
- skips = (
374
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
375
- device_type = 'cuda' , dtypes = [torch .cfloat , torch .cdouble ]),
376
- SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics' ,
377
- device_type = 'cpu' , dtypes = [torch .cfloat , torch .cdouble ],
378
- active_if = (IS_MACOS or IS_WINDOWS )),
379
- )),
255
+ UnaryUfuncInfo ('neg' ,
256
+ ref = np .negative ,
257
+ dtypes = all_types_and_complex_and (torch .half , torch .bfloat16 ),
258
+ dtypesIfCPU = all_types_and_complex_and (torch .half , torch .bfloat16 ),
259
+ dtypesIfCUDA = all_types_and_complex_and (torch .half )),
380
260
]
381
261
382
262
# Common operator groupings
0 commit comments