6
6
7
7
#include < ATen/functorch/BatchRulesHelper.h>
8
8
#include < iostream>
9
+ #include < utility>
9
10
10
11
#include < ATen/Operators.h>
11
12
#include < ATen/functorch/PlumbingHelper.h>
@@ -236,7 +237,7 @@ std::tuple<Tensor, optional<int64_t>> squeeze_batch_rule(const Tensor& self, opt
236
237
}
237
238
238
239
auto result = self.view (squeezed_sizes);
239
- return std::make_tuple (result, c10::optional<int64_t >(new_batch_idx));
240
+ return std::make_tuple (std::move ( result) , c10::optional<int64_t >(new_batch_idx));
240
241
}
241
242
242
243
std::tuple<Tensor, optional<int64_t >> squeeze_dims_batch_rule (
@@ -284,13 +285,13 @@ std::tuple<std::vector<Tensor>, optional<int64_t>> chunk_batching_rule(const Ten
284
285
285
286
std::tuple<Tensor, optional<int64_t >> select_batching_rule (const Tensor& self, optional<int64_t > bdim, int64_t dim, c10::SymInt index) {
286
287
if (!bdim) {
287
- return std::make_tuple (self.select_symint (dim, index ), nullopt);
288
+ return std::make_tuple (self.select_symint (dim, std::move ( index ) ), nullopt);
288
289
}
289
290
290
291
auto _self = moveBatchDimToFront (self, bdim);
291
292
auto dim_physical = getPhysicalDim (_self, true , dim);
292
- auto result = _self.select_symint (dim_physical, index );
293
- return std::make_tuple (result, 0 );
293
+ auto result = _self.select_symint (dim_physical, std::move ( index ) );
294
+ return std::make_tuple (std::move ( result) , 0 );
294
295
}
295
296
296
297
std::tuple<Tensor, optional<int64_t >> _reshape_alias_batch_rule (const Tensor& self, optional<int64_t > bdim, const c10::SymIntArrayRef shape, const c10::SymIntArrayRef strides) {
@@ -359,8 +360,8 @@ std::tuple<Tensor,optional<int64_t>> slice_batch_rule(
359
360
auto self_ = moveBatchDimToFront (self, self_bdim);
360
361
dim = getPhysicalDim (self, self_bdim.has_value (), dim);
361
362
362
- auto result = self_.slice_symint (dim, start, end, step);
363
- return std::make_tuple (result, 0 );
363
+ auto result = self_.slice_symint (dim, std::move ( start), std::move ( end), std::move ( step) );
364
+ return std::make_tuple (std::move ( result) , 0 );
364
365
}
365
366
366
367
static bool is_allowed_dim_on_scalar_tensor (int64_t dim) {
@@ -386,7 +387,7 @@ transpose_int_batch_rule(
386
387
dim0 = getPhysicalDim (self, self_bdim.has_value (), dim0);
387
388
dim1 = getPhysicalDim (self, self_bdim.has_value (), dim1);
388
389
auto result = self_.transpose (dim0, dim1);
389
- return std::make_tuple (result, 0 );
390
+ return std::make_tuple (std::move ( result) , 0 );
390
391
}
391
392
392
393
std::tuple<Tensor, optional<int64_t >> permute_batching_rule (
@@ -416,7 +417,7 @@ std::tuple<Tensor,optional<int64_t>> select_backward_batch_rule(
416
417
c10::SymDimVector input_sizes_ (input_sizes.size () + 1 );
417
418
input_sizes_[0 ] = grad_input_.sym_size (0 );
418
419
std::copy (input_sizes.begin (), input_sizes.end (), input_sizes_.begin () + 1 );
419
- auto result = at::select_backward_symint (grad_input_, input_sizes_, dim, index );
420
+ auto result = at::select_backward_symint (grad_input_, input_sizes_, dim, std::move ( index ) );
420
421
return std::make_tuple (std::move (result), 0 );
421
422
}
422
423
@@ -429,7 +430,7 @@ std::tuple<Tensor,optional<int64_t>> slice_backward_batch_rule(
429
430
c10::SymDimVector input_sizes_ (input_sizes.size () + 1 );
430
431
input_sizes_[0 ] = grad_input_.size (0 );
431
432
std::copy (input_sizes.begin (), input_sizes.end (), input_sizes_.begin () + 1 );
432
- auto result = at::slice_backward_symint (grad_input_, input_sizes_, dim, start, end, step);
433
+ auto result = at::slice_backward_symint (grad_input_, input_sizes_, dim, std::move ( start), std::move ( end), std::move ( step) );
433
434
return std::make_tuple (std::move (result), 0 );
434
435
}
435
436
@@ -507,7 +508,7 @@ std::tuple<Tensor, optional<int64_t>> unfold_batch_rule(
507
508
if (logical_rank==0 ) {
508
509
result = result.squeeze (-1 );
509
510
}
510
- return std::make_tuple (result, 0 );
511
+ return std::make_tuple (std::move ( result) , 0 );
511
512
}
512
513
513
514
std::tuple<Tensor, optional<int64_t >> narrow_copy_batch_rule (
@@ -517,9 +518,9 @@ std::tuple<Tensor, optional<int64_t>> narrow_copy_batch_rule(
517
518
auto self_ = moveBatchDimToFront (self, self_bdim);
518
519
auto logical_rank = rankWithoutBatchDim (self, self_bdim);
519
520
dim = maybe_wrap_dim (dim, logical_rank) + 1 ;
520
- auto result = self_.narrow_copy_symint (dim, start, length);
521
+ auto result = self_.narrow_copy_symint (dim, std::move ( start), std::move ( length) );
521
522
522
- return std::make_tuple (result, 0 );
523
+ return std::make_tuple (std::move ( result) , 0 );
523
524
}
524
525
525
526
std::tuple<std::vector<Tensor>, optional<int64_t >> unsafe_split_batch_rule (
@@ -531,8 +532,8 @@ std::tuple<std::vector<Tensor>, optional<int64_t>> unsafe_split_batch_rule(
531
532
auto self_ = moveBatchDimToFront (self, self_bdim);
532
533
auto logical_rank = rankWithoutBatchDim (self, self_bdim);
533
534
dim = maybe_wrap_dim (dim, logical_rank) + 1 ;
534
- auto result = self_.unsafe_split_symint (split_size, dim);
535
- return std::make_tuple (result, 0 );
535
+ auto result = self_.unsafe_split_symint (std::move ( split_size) , dim);
536
+ return std::make_tuple (std::move ( result) , 0 );
536
537
}
537
538
538
539
std::tuple<Tensor, optional<int64_t >> movedim_batch_rule (const Tensor& self, optional<int64_t > self_bdim, IntArrayRef source, IntArrayRef destination) {
0 commit comments