Skip to content

Commit 0a607a4

Browse files
authored
[TorchToLinalg] Use linalg.transpose instead of generic in permuteTensor (llvm#3872)
This PR changes the lowering to use `linalg.transpose` instead of `linalg.generic` in `torch_to_linalg::permuteTensor`.
1 parent c26ca8b commit 0a607a4

File tree

2 files changed

+44
-22
lines changed

2 files changed

+44
-22
lines changed

lib/Conversion/TorchToLinalg/Utils.cpp

+10-22
Original file line numberDiff line numberDiff line change
@@ -578,6 +578,12 @@ LogicalResult torch_to_linalg::permuteTensor(Operation *op,
578578
int64_t inputRank = inType.getRank();
579579
Type elementType = inType.getElementType();
580580

581+
// Check for 0-D tensor.
582+
if (inputRank == 0) {
583+
result = input;
584+
return success();
585+
}
586+
581587
// Check if the dimensions are a valid constants.
582588
int64_t numDimensions = dimensions.size();
583589
if (inputRank != numDimensions)
@@ -596,28 +602,10 @@ LogicalResult torch_to_linalg::permuteTensor(Operation *op,
596602

597603
Value outVector = rewriter.create<tensor::EmptyOp>(
598604
loc, getAsOpFoldResult(outputDims), elementType);
599-
SmallVector<AffineExpr> idExprs;
600-
SmallVector<AffineExpr> swapExprs;
601-
for (uint32_t i = 0; i < inputRank; i++)
602-
idExprs.push_back(getAffineDimExpr(i, rewriter.getContext()));
603-
for (uint32_t i = 0; i < inputRank; i++)
604-
swapExprs.push_back(idExprs[dimensions[i]]);
605-
606-
AffineMap inputMap =
607-
AffineMap::get(inputRank, /*symbolCount=*/0, idExprs, op->getContext());
608-
AffineMap outputMap =
609-
AffineMap::get(inputRank, /*symbolCount=*/0, swapExprs, op->getContext());
610-
SmallVector<AffineMap> indexingMaps{inputMap, outputMap};
611-
SmallVector<utils::IteratorType> iteratorTypes(inputRank,
612-
utils::IteratorType::parallel);
613-
result = rewriter
614-
.create<linalg::GenericOp>(
615-
loc, outVector.getType(), input, outVector, indexingMaps,
616-
iteratorTypes,
617-
[](OpBuilder &b, Location loc, ValueRange args) {
618-
b.create<linalg::YieldOp>(loc, args[0]);
619-
})
620-
.getResult(0);
605+
606+
result =
607+
rewriter.create<linalg::TransposeOp>(loc, input, outVector, dimensions)
608+
->getResult(0);
621609
return success();
622610
}
623611

Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
// RUN: torch-mlir-opt <%s -convert-torch-to-linalg -canonicalize -split-input-file -verify-diagnostics | FileCheck %s
2+
3+
// CHECK-LABEL: func.func @torch.aten.permute(
4+
// CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[64,32,16,8,4],f32>) -> !torch.vtensor<[64,8,4,32,16],f32> {
5+
// CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[64,32,16,8,4],f32> -> tensor<64x32x16x8x4xf32>
6+
// CHECK: %[[VAL_2:.*]] = tensor.empty() : tensor<64x8x4x32x16xf32>
7+
// CHECK: %[[VAL_3:.*]] = linalg.transpose ins(%[[VAL_1]] : tensor<64x32x16x8x4xf32>) outs(%[[VAL_2]] : tensor<64x8x4x32x16xf32>) permutation = [0, 3, 4, 1, 2]
8+
// CHECK: %[[VAL_4:.*]] = torch_c.from_builtin_tensor %[[VAL_3]] : tensor<64x8x4x32x16xf32> -> !torch.vtensor<[64,8,4,32,16],f32>
9+
// CHECK: return %[[VAL_4]] : !torch.vtensor<[64,8,4,32,16],f32>
10+
// CHECK: }
11+
func.func @torch.aten.permute(%arg0: !torch.vtensor<[64,32,16,8,4],f32>) -> !torch.vtensor<[64,8,4,32,16],f32> {
12+
%int0 = torch.constant.int 0
13+
%int3 = torch.constant.int 3
14+
%int4 = torch.constant.int 4
15+
%int1 = torch.constant.int 1
16+
%int2 = torch.constant.int 2
17+
%0 = torch.prim.ListConstruct %int0, %int3, %int4, %int1, %int2 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
18+
%1 = torch.aten.permute %arg0, %0 : !torch.vtensor<[64,32,16,8,4],f32>, !torch.list<int> -> !torch.vtensor<[64,8,4,32,16],f32>
19+
return %1 : !torch.vtensor<[64,8,4,32,16],f32>
20+
}
21+
22+
// -----
23+
24+
// CHECK-LABEL: func.func @torch.aten.permute$rank0(
25+
// CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[],f32>) -> !torch.vtensor<[],f32> {
26+
// CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[],f32> -> tensor<f32>
27+
// CHECK: %[[VAL_2:.*]] = torch_c.from_builtin_tensor %[[VAL_1]] : tensor<f32> -> !torch.vtensor<[],f32>
28+
// CHECK: return %[[VAL_2]] : !torch.vtensor<[],f32>
29+
// CHECK: }
30+
func.func @torch.aten.permute$rank0(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[],f32> {
31+
%0 = torch.prim.ListConstruct : () -> !torch.list<int>
32+
%1 = torch.aten.permute %arg0, %0 : !torch.vtensor<[],f32>, !torch.list<int> -> !torch.vtensor<[],f32>
33+
return %1 : !torch.vtensor<[],f32>
34+
}

0 commit comments

Comments
 (0)