Skip to content

Commit

Permalink
Add dropout op (#436)
Browse files Browse the repository at this point in the history
Co-authored-by: dan <[email protected]>
  • Loading branch information
dan-garvey and dan-garvey committed Nov 29, 2021
1 parent 03fdf56 commit 539511c
Show file tree
Hide file tree
Showing 5 changed files with 67 additions and 1 deletion.
20 changes: 20 additions & 0 deletions e2e_testing/torchscript/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -738,3 +738,23 @@ def forward(self, input, tensor1, tensor2):
@register_test_case(module_factory=lambda: AddCDivModule())
def AddCDivModule_basic(module, tu: TestUtils):
module.forward(tu.rand(1,3), tu.rand(1,3), tu.rand(1,3))


# ==============================================================================

class DropoutModule(torch.nn.Module):
def __init__(self):
super().__init__()

@export
@annotate_args([
None,
([-1, -1], torch.float32, True),
])
def forward(self, x):
return torch.dropout(x, 0.0, False)


@register_test_case(module_factory=lambda: DropoutModule())
def DropoutModule_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 4))
16 changes: 16 additions & 0 deletions include/torch-mlir/Dialect/Torch/IR/GeneratedAtenOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -2199,6 +2199,22 @@ def Torch_AtenIntTensorOp : Torch_Op<"aten.Int.Tensor", [
let assemblyFormat = "$a attr-dict `:` type($a) `->` type($result)";
}

def Torch_AtenDropoutOp : Torch_Op<"aten.dropout", [
AllowsTypeRefinement,
HasValueSemantics
]> {
let summary = "Generated op for `aten::dropout : (Tensor, float, bool) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$input,
Torch_FloatType:$p,
Torch_BoolType:$train
);
let results = (outs
AnyTorchTensorType:$result
);
let assemblyFormat = "$input `,` $p `,` $train attr-dict `:` type($input) `,` type($p) `,` type($train) `->` type($result)";
}

def Torch_Aten__Contains__StrOp : Torch_Op<"aten.__contains__.str", [
AllowsTypeRefinement,
HasValueSemantics
Expand Down
29 changes: 29 additions & 0 deletions lib/Conversion/TorchToLinalg/TorchToLinalg.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1132,6 +1132,33 @@ class ConvertAtenBmmOp : public OpConversionPattern<AtenBmmOp> {
};
} // namespace

namespace {
class ConvertAtenDropoutOp : public OpConversionPattern<AtenDropoutOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult
matchAndRewrite(AtenDropoutOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
return failure();

bool train;
if (!matchPattern(op.train(), m_TorchConstantBool(&train)))
return rewriter.notifyMatchFailure(op,
"Expected train to be constant bool.");

if (train)
return failure();
auto resultType = getTypeConverter()
->convertType(op->getResult(0).getType())
.cast<RankedTensorType>();
rewriter.replaceOpWithNewOp<tensor::CastOp>(op, resultType,
adaptor.input());
return success();
}
};
} // namespace

namespace {
// See comments at in convertMmOp and the heading for this section for general
// considerations. This function needs to be auto-generated.
Expand Down Expand Up @@ -3035,6 +3062,8 @@ class ConvertTorchToLinalg
patterns.add<ConvertAtenIntTensorOp>(typeConverter, context);
target.addIllegalOp<PrimNumToTensorScalarOp>();
patterns.add<ConvertPrimNumToTensorScalarOp>(typeConverter, context);
target.addIllegalOp<AtenDropoutOp>();
patterns.add<ConvertAtenDropoutOp>(typeConverter, context);

if (failed(applyPartialConversion(getOperation(), target,
std::move(patterns))))
Expand Down
2 changes: 1 addition & 1 deletion lib/Dialect/Torch/Transforms/RefineTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ class TypeAnalyzer : public ForwardDataFlowAnalysis<ValueKnowledge> {
AtenContiguousOp, AtenFill_ScalarOp, AtenDetachOp,
AtenMaskedFill_ScalarOp, AtenCopy_Op, AtenIndexPut_Op, AtenCumsumOp,
AtenLayerNormOp, AtenClampOp, AtenLogOp, AtenSqrtOp, AtenFloorOp,
AtenLog2Op, Aten_SoftmaxBackwardDataOp, AtenRsqrtOp,
AtenLog2Op, Aten_SoftmaxBackwardDataOp, AtenRsqrtOp, AtenDropoutOp,
AtenTanhBackwardOp, Aten_LogSoftmaxBackwardDataOp, AtenAddIntOp>(
op)) {
return getLatticeElement(op->getResult(0)).join(*operands[0]);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -569,6 +569,7 @@ def emit_with_mutating_variants(key, **kwargs):
emit("aten::IntImplicit : (Tensor) -> (int)")
emit("aten::tensor.float : (float, int?, Device?, bool) -> (Tensor)")
emit("aten::Int.Tensor : (Tensor) -> (int)")
emit("aten::dropout : (Tensor, float, bool) -> (Tensor)")

# Dict ops.
emit("aten::__contains__.str : (Dict(str, t), str) -> (bool)", has_folder=True)
Expand Down

0 comments on commit 539511c

Please sign in to comment.