|
461 | 461 | CPU: _cosh_out_cpu
|
462 | 462 | CUDA: _cosh_out_cuda
|
463 | 463 |
|
464 |
| -- func: cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, double margin=0.0, int64_t reduction=Reduction::ElementwiseMean) -> Tensor |
| 464 | +- func: cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, double margin=0.0, int64_t reduction=Reduction::Mean) -> Tensor |
465 | 465 |
|
466 | 466 | - func: cudnn_affine_grid_generator(Tensor theta, int64_t N, int64_t C, int64_t H, int64_t W) -> Tensor
|
467 | 467 | return:
|
|
568 | 568 |
|
569 | 569 | - func: cumprod_out(Tensor result, Tensor self, int64_t dim) -> Tensor
|
570 | 570 |
|
571 |
| -- func: ctc_loss(Tensor log_probs, Tensor targets, IntList input_lengths, IntList target_lengths, int64_t blank=0, int64_t reduction=Reduction::ElementwiseMean) -> Tensor |
| 571 | +- func: ctc_loss(Tensor log_probs, Tensor targets, IntList input_lengths, IntList target_lengths, int64_t blank=0, int64_t reduction=Reduction::Mean) -> Tensor |
572 | 572 |
|
573 | 573 | # convenience function that converts to intlists for you
|
574 |
| -- func: ctc_loss(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int64_t blank=0, int64_t reduction=Reduction::ElementwiseMean) -> Tensor |
| 574 | +- func: ctc_loss(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int64_t blank=0, int64_t reduction=Reduction::Mean) -> Tensor |
575 | 575 |
|
576 | 576 | - func: _ctc_loss(Tensor log_probs, Tensor targets, IntList input_lengths, IntList target_lengths, int64_t blank=0) -> (Tensor, Tensor)
|
577 | 577 | dispatch:
|
|
831 | 831 |
|
832 | 832 | - func: hamming_window(int64_t window_length, bool periodic, double alpha, double beta, TensorOptions options={}) -> Tensor
|
833 | 833 |
|
834 |
| -- func: hinge_embedding_loss(Tensor self, Tensor target, double margin=1.0, int64_t reduction=Reduction::ElementwiseMean) -> Tensor |
| 834 | +- func: hinge_embedding_loss(Tensor self, Tensor target, double margin=1.0, int64_t reduction=Reduction::Mean) -> Tensor |
835 | 835 |
|
836 | 836 | - func: ger(Tensor self, Tensor vec2) -> Tensor
|
837 | 837 | variants: function, method
|
|
937 | 937 | variants: function, method
|
938 | 938 | device_guard: false
|
939 | 939 |
|
940 |
| -- func: kl_div(Tensor self, Tensor target, int64_t reduction=Reduction::ElementwiseMean) -> Tensor |
| 940 | +- func: kl_div(Tensor self, Tensor target, int64_t reduction=Reduction::Mean) -> Tensor |
941 | 941 |
|
942 |
| -- func: kl_div_backward(Tensor grad_output, Tensor self, Tensor target, int64_t reduction=Reduction::ElementwiseMean) -> Tensor |
| 942 | +- func: kl_div_backward(Tensor grad_output, Tensor self, Tensor target, int64_t reduction=Reduction::Mean) -> Tensor |
943 | 943 | dispatch:
|
944 | 944 | CPU: kl_div_backward_cpu
|
945 | 945 | CUDA: kl_div_backward_cuda
|
|
1054 | 1054 |
|
1055 | 1055 | - func: logsumexp_out(Tensor result, Tensor self, int64_t dim, bool keepdim=False) -> Tensor
|
1056 | 1056 |
|
1057 |
| -- func: margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, double margin=0.0, int64_t reduction=Reduction::ElementwiseMean) -> Tensor |
| 1057 | +- func: margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, double margin=0.0, int64_t reduction=Reduction::Mean) -> Tensor |
1058 | 1058 |
|
1059 | 1059 | - func: matmul(Tensor self, Tensor other) -> Tensor
|
1060 | 1060 | variants: function, method
|
|
1699 | 1699 |
|
1700 | 1700 | - func: _trilinear(Tensor i1, Tensor i2, Tensor i3, IntList expand1, IntList expand2, IntList expand3, IntList sumdim, int64_t unroll_dim=1) -> Tensor
|
1701 | 1701 |
|
1702 |
| -- func: triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, double margin=1.0, double p=2, double eps=1e-6, bool swap=false, int64_t reduction=Reduction::ElementwiseMean) -> Tensor |
| 1702 | +- func: triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, double margin=1.0, double p=2, double eps=1e-6, bool swap=false, int64_t reduction=Reduction::Mean) -> Tensor |
1703 | 1703 |
|
1704 | 1704 | - func: trunc(Tensor self) -> Tensor
|
1705 | 1705 | variants: function, method
|
|
0 commit comments