Skip to content

Commit

Permalink
[onnx] Fix lowering onnx.Shrink to Torch (#3603)
Browse files Browse the repository at this point in the history
This fixes the result type of the `torch.aten.lt.Scalar` and
`torch.aten.ge.Scalar` ops created during the lowering of `onnx.Shrink`
to Torch.
  • Loading branch information
marbre authored Aug 7, 2024
1 parent 1813999 commit 341f415
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 10 deletions.
8 changes: 6 additions & 2 deletions lib/Conversion/TorchOnnxToTorch/DefaultDomainQtoZ.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3229,6 +3229,10 @@ void mlir::torch::onnx_c::populateDefaultDomainQtoZ(
return rewriter.notifyMatchFailure(
binder.op, "unimplemented: non-floating point dtype");

Torch::ValueTensorType comparisonResultType =
rewriter.getType<Torch::ValueTensorType>(
ArrayRef<int64_t>(inputType.getSizes()), rewriter.getI1Type());

// The formula of this operator is: If x < -lambd, y = x + bias; If x >
// lambd, y = x - bias; Otherwise, y = 0.
// The implementation is based on the following algorithm:
Expand Down Expand Up @@ -3261,13 +3265,13 @@ void mlir::torch::onnx_c::populateDefaultDomainQtoZ(
loc, rewriter.getFloatAttr(rewriter.getF64Type(), -lambd));

Value inputLTNegLambd = rewriter.create<Torch::AtenLtScalarOp>(
loc, inputType, input, constNegLambd);
loc, comparisonResultType, input, constNegLambd);
Value inputPlusBias = rewriter.create<Torch::AtenAddScalarOp>(
loc, inputType, input, constBias, /*alpha=*/constOne);
Value inputSubBias = rewriter.create<Torch::AtenSubScalarOp>(
loc, inputType, input, constBias, /*alpha=*/constOne);
Value inputGTLambd = rewriter.create<Torch::AtenGtScalarOp>(
loc, inputType, input, constLambd);
loc, comparisonResultType, input, constLambd);

Value inputSubBiasOrZero =
rewriter.create<Torch::AtenWhereScalarOtherOp>(
Expand Down
16 changes: 8 additions & 8 deletions test/Conversion/TorchOnnxToTorch/simple_ops_q_to_z.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -2377,12 +2377,12 @@ func.func @Shrink(%arg0: !torch.vtensor<[5],f32>) -> !torch.vtensor<[5],f32> att
// CHECK: %float0.000000e00 = torch.constant.float 0.000000e+00
// CHECK: %float1.000000e00 = torch.constant.float 1.000000e+00
// CHECK: %float-1.500000e00 = torch.constant.float -1.500000e+00
// CHECK: %0 = torch.aten.lt.Scalar %arg0, %float-1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %0 = torch.aten.lt.Scalar %arg0, %float-1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],i1>
// CHECK: %1 = torch.aten.add.Scalar %arg0, %float1.500000e00_0, %float1.000000e00 : !torch.vtensor<[5],f32>, !torch.float, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %2 = torch.aten.sub.Scalar %arg0, %float1.500000e00_0, %float1.000000e00 : !torch.vtensor<[5],f32>, !torch.float, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %3 = torch.aten.gt.Scalar %arg0, %float1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %4 = torch.aten.where.ScalarOther %3, %2, %float0.000000e00 : !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %5 = torch.aten.where.self %0, %1, %4 : !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32> -> !torch.vtensor<[5],f32>
// CHECK: %3 = torch.aten.gt.Scalar %arg0, %float1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],i1>
// CHECK: %4 = torch.aten.where.ScalarOther %3, %2, %float0.000000e00 : !torch.vtensor<[5],i1>, !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %5 = torch.aten.where.self %0, %1, %4 : !torch.vtensor<[5],i1>, !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32> -> !torch.vtensor<[5],f32>
// CHECK: return %5 : !torch.vtensor<[5],f32>
%0 = torch.operator "onnx.Shrink"(%arg0) {torch.onnx.bias = 1.500000e+00 : f32, torch.onnx.lambd = 1.500000e+00 : f32} : (!torch.vtensor<[5],f32>) -> !torch.vtensor<[5],f32>
return %0 : !torch.vtensor<[5],f32>
Expand All @@ -2397,12 +2397,12 @@ func.func @test_shrink_hard(%arg0: !torch.vtensor<[5],f32>) -> !torch.vtensor<[5
// CHECK: %float0.000000e00_0 = torch.constant.float 0.000000e+00
// CHECK: %float1.000000e00 = torch.constant.float 1.000000e+00
// CHECK: %float-1.500000e00 = torch.constant.float -1.500000e+00
// CHECK: %0 = torch.aten.lt.Scalar %arg0, %float-1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %0 = torch.aten.lt.Scalar %arg0, %float-1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],i1>
// CHECK: %1 = torch.aten.add.Scalar %arg0, %float0.000000e00, %float1.000000e00 : !torch.vtensor<[5],f32>, !torch.float, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %2 = torch.aten.sub.Scalar %arg0, %float0.000000e00, %float1.000000e00 : !torch.vtensor<[5],f32>, !torch.float, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %3 = torch.aten.gt.Scalar %arg0, %float1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %4 = torch.aten.where.ScalarOther %3, %2, %float0.000000e00_0 : !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %5 = torch.aten.where.self %0, %1, %4 : !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32> -> !torch.vtensor<[5],f32>
// CHECK: %3 = torch.aten.gt.Scalar %arg0, %float1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],i1>
// CHECK: %4 = torch.aten.where.ScalarOther %3, %2, %float0.000000e00_0 : !torch.vtensor<[5],i1>, !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %5 = torch.aten.where.self %0, %1, %4 : !torch.vtensor<[5],i1>, !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32> -> !torch.vtensor<[5],f32>
// CHECK: return %5 : !torch.vtensor<[5],f32>
%0 = torch.operator "onnx.Shrink"(%arg0) {torch.onnx.lambd = 1.500000e+00 : f32} : (!torch.vtensor<[5],f32>) -> !torch.vtensor<[5],f32>
return %0 : !torch.vtensor<[5],f32>
Expand Down

0 comments on commit 341f415

Please sign in to comment.