@@ -2771,94 +2771,84 @@ Tensor& linalg_norm_out(const Tensor& X, c10::string_view ord, OptionalIntArrayR
2771
2771
2772
2772
// //////////////////////////////////////////////////////////////////////////////
2773
2773
// Frobenius Norm //
2774
- // Just used in torch..norm. It should not be removed. //
2775
2774
// //////////////////////////////////////////////////////////////////////////////
2776
2775
2777
2776
Tensor frobenius_norm (const Tensor& self, IntArrayRef dim, bool keepdim) {
2778
- TORCH_CHECK (
2779
- dim.size () <= 2 ,
2780
- " Expected at most 2 dimensions, but got " ,
2781
- dim.size (),
2782
- " dimensions instead." );
2783
- Tensor result;
2784
- if (dim.size () == 1 || dim.size () == 0 ) {
2785
- result = at::norm (self, 2 , dim, keepdim);
2786
- } else {
2787
- auto dim_ = dim.vec ();
2788
- maybe_wrap_dims (dim_, self.dim ());
2789
- TORCH_CHECK (dim_[0 ] != dim_[1 ], " Expected dims to be different, got " , dim, " instead" );
2790
- if (self.is_complex ()) {
2791
- result = at::sqrt (at::sum (at::real (self.conj () * self), dim_, keepdim));
2792
- } else {
2793
- result = at::sqrt (at::sum ((self * self), dim_, keepdim));
2794
- }
2777
+ auto device = self.device ();
2778
+ if (self.layout () == Layout::Strided && (device == kCPU || device == kCUDA || device == kMeta )) {
2779
+ TORCH_WARN_ONCE (
2780
+ " at::frobenius_norm is deprecated and it is just left for JIT compatibility. " ,
2781
+ " It will be removed in a future PyTorch release. Please use " ,
2782
+ " `linalg.vector_norm(A, 2., dim, keepdim)` instead"
2783
+ );
2795
2784
}
2796
- TORCH_INTERNAL_ASSERT (result.scalar_type () == toRealValueType (self.scalar_type ()));
2797
- TORCH_INTERNAL_ASSERT (result.layout () == c10::Layout::Strided);
2798
- return result;
2785
+ // This frobenius norm is just wrong, but well
2786
+ TORCH_CHECK (dim.size () <= 2 ,
2787
+ " Expected at most 2 dimensions, but got " , dim.size (), " dimensions instead." );
2788
+ // Dispatch to at::norm as it is implemented for Sparse and MPS backends
2789
+ // TODO Make the backends implement vector_norm and matrix_norm
2790
+ return at::norm (self, 2 ., dim, keepdim);
2799
2791
}
2800
2792
2801
2793
Tensor &frobenius_norm_out (const Tensor& self,
2802
2794
IntArrayRef dim,
2803
2795
bool keepdim,
2804
2796
Tensor& result) {
2805
- auto result_ = at::native::frobenius_norm (self, dim, keepdim);
2806
- // NOTE: It would be better to avoid resize and copy by using norm_out and sqrt_out in frobenius_norm.
2807
- // However, norm_out and sqrt_out do not support automatic differentiation.
2808
- // More details here: https://github.com/pytorch/pytorch/pull/44095#discussion_r486673947
2809
- at::native::resize_output (result, result_.sizes ());
2810
- result.copy_ (result_);
2811
- return result;
2797
+ auto device = self.device ();
2798
+ if (self.layout () == Layout::Strided && (device == kCPU || device == kCUDA || device == kMeta )) {
2799
+ TORCH_WARN_ONCE (
2800
+ " at::frobenius_norm is deprecated and it is just left for JIT compatibility. " ,
2801
+ " It will be removed in a future PyTorch release. Please use " ,
2802
+ " `linalg.vector_norm(A, 2., dim, keepdim)` instead"
2803
+ );
2804
+ }
2805
+ TORCH_CHECK (dim.size () <= 2 ,
2806
+ " Expected at most 2 dimensions, but got " , dim.size (), " dimensions instead." );
2807
+ return at::norm_out (result, self, 2 ., dim, keepdim);
2812
2808
}
2813
2809
2814
2810
// //////////////////////////////////////////////////////////////////////////////
2815
2811
// Nuclear Norm //
2816
- // Just used in torch.norm. It should not be removed. //
2817
2812
// //////////////////////////////////////////////////////////////////////////////
2818
2813
2819
2814
Tensor nuclear_norm (const Tensor& self, bool keepdim) {
2820
- TORCH_CHECK (
2821
- self.dim () == 2 ,
2822
- " Expected a tensor with 2 dimensions, but got a tensor with " ,
2823
- self.dim (), " dimension" , self.dim ()==1 ? " " : " s" , " instead." );
2824
- return at::native::nuclear_norm (self, IntArrayRef ({0 , 1 }), keepdim);
2815
+ return at::native::nuclear_norm (self, IntArrayRef ({-2 , -1 }), keepdim);
2825
2816
}
2826
2817
2827
2818
Tensor &nuclear_norm_out (const Tensor& self, bool keepdim, Tensor& result) {
2828
- TORCH_CHECK (
2829
- self.dim () == 2 ,
2830
- " Expected a tensor with 2 dimensions, but got a tensor with " ,
2831
- self.dim (), " dimension" , self.dim ()==1 ? " " : " s" , " instead." );
2832
- return at::native::nuclear_norm_out (self, IntArrayRef ({0 , 1 }), keepdim, result);
2833
- }
2834
-
2835
- namespace {
2836
- Tensor nuclear_norm_impl (const Tensor& self, IntArrayRef dim, bool keepdim) {
2837
- TORCH_CHECK (dim.size () == 2 , " nuclear norm requires a 'dim' argument of size 2" );
2838
- auto dim_ = dim.vec ();
2839
- maybe_wrap_dims (dim_, self.dim ());
2840
-
2841
- auto permutation = create_dim_backshift_permutation (dim_[0 ], dim_[1 ], self.dim ());
2842
- Tensor p = self.permute (permutation);
2843
- Tensor result_ = at::sum (at::linalg_svdvals (p), -1 , keepdim);
2844
- if (keepdim) {
2845
- result_.unsqueeze_ (-1 );
2846
- auto permutation_reverse = create_reverse_permutation (std::move (permutation));
2847
- result_ = result_.permute (permutation_reverse);
2819
+ auto device = self.device ();
2820
+ if (self.layout () == Layout::Strided && (device == kCPU || device == kCUDA || device == kMeta )) {
2821
+ TORCH_WARN_ONCE (
2822
+ " at::nuclear_norm is deprecated and it is just left for JIT compatibility. " ,
2823
+ " It will be removed in a future PyTorch release. Please use " ,
2824
+ " `linalg.matrix_norm(A, 'nuc', dim, keepdim)` instead"
2825
+ );
2848
2826
}
2849
- return result_ ;
2827
+ return at::linalg_matrix_norm_out (result, self, " nuc " , IntArrayRef ({- 2 , - 1 }), keepdim) ;
2850
2828
}
2851
- } // anonymous namespace
2852
2829
2853
2830
Tensor nuclear_norm (const Tensor& self, IntArrayRef dim, bool keepdim) {
2854
- return nuclear_norm_impl (self, dim, keepdim).to (toRealValueType (self.scalar_type ()));
2831
+ auto device = self.device ();
2832
+ if (self.layout () == Layout::Strided && (device == kCPU || device == kCUDA || device == kMeta )) {
2833
+ TORCH_WARN_ONCE (
2834
+ " at::nuclear_norm is deprecated and it is just left for JIT compatibility. " ,
2835
+ " It will be removed in a future PyTorch release. Please use " ,
2836
+ " `linalg.matrix_norm(A, 'nuc', dim, keepdim)` instead"
2837
+ );
2838
+ }
2839
+ return at::linalg_matrix_norm (self, " nuc" , dim, keepdim);
2855
2840
}
2856
2841
2857
2842
Tensor& nuclear_norm_out (const Tensor& self, IntArrayRef dim, bool keepdim, Tensor& result) {
2858
- auto result_ = nuclear_norm_impl (self, dim, keepdim);
2859
- at::native::resize_output (result, result_.sizes ());
2860
- result.copy_ (result_);
2861
- return result;
2843
+ auto device = self.device ();
2844
+ if (self.layout () == Layout::Strided && (device == kCPU || device == kCUDA || device == kMeta )) {
2845
+ TORCH_WARN_ONCE (
2846
+ " at::nuclear_norm is deprecated and it is just left for JIT compatibility. " ,
2847
+ " It will be removed in a future PyTorch release. Please use " ,
2848
+ " `linalg.matrix_norm(A, 'nuc', dim, keepdim)` instead"
2849
+ );
2850
+ }
2851
+ return at::linalg_matrix_norm_out (result, self, " nuc" , dim, keepdim);
2862
2852
}
2863
2853
2864
2854
// //////////////////////////////////////////////////////////////////////////////
0 commit comments