diff --git a/dpnp/tests/test_product.py b/dpnp/tests/test_product.py index 9963c85f737..bef8c4259ec 100644 --- a/dpnp/tests/test_product.py +++ b/dpnp/tests/test_product.py @@ -12,9 +12,7 @@ assert_dtype_allclose, generate_random_numpy_array, get_all_dtypes, - is_gpu_device, is_ptl, - is_win_platform, numpy_version, ) from .third_party.cupy import testing @@ -1499,9 +1497,6 @@ def test_invalid_axes(self, xp): @testing.with_requires("numpy>=2.2") class TestMatvec: - @pytest.mark.skipif( - is_win_platform() and not is_gpu_device(), reason="SAT-8073" - ) @pytest.mark.parametrize("dtype", get_all_dtypes(no_none=True)) @pytest.mark.parametrize( "shape1, shape2", @@ -2213,9 +2208,6 @@ def test_error(self, xp): @testing.with_requires("numpy>=2.2") class TestVecmat: - @pytest.mark.skipif( - is_win_platform() and not is_gpu_device(), reason="SAT-8073" - ) @pytest.mark.parametrize("dtype", get_all_dtypes(no_none=True)) @pytest.mark.parametrize( "shape1, shape2", diff --git a/dpnp/tests/test_umath.py b/dpnp/tests/test_umath.py index 0039d74789f..56f55de2f1c 100644 --- a/dpnp/tests/test_umath.py +++ b/dpnp/tests/test_umath.py @@ -23,7 +23,6 @@ has_support_aspect64, is_cuda_device, is_gpu_device, - is_win_platform, ) # full list of umaths @@ -122,9 +121,6 @@ def test_umaths(test_cases): pytest.skip("dpnp.modf is not supported with dpnp.float16") elif is_cuda_device(): pytest.skip("dpnp.modf is not supported on CUDA device") - elif umath in ["vecmat", "matvec"]: - if is_win_platform() and not is_gpu_device(): - pytest.skip("SAT-8073") expected = getattr(numpy, umath)(*args) result = getattr(dpnp, umath)(*iargs)