From fb1dfa31268c59a829ded35d304969c48ede388b Mon Sep 17 00:00:00 2001 From: Kunwar Grover Date: Fri, 5 Jan 2024 04:03:41 +0530 Subject: [PATCH] Bump llvm-project to 6b65d79fbb4682468333cea42b62f15c2dffd8f3 (#2723) Co-authored-by: hanhanW --- externals/llvm-project | 2 +- lib/Dialect/TMTensor/IR/TMTensorOps.cpp | 21 ++++++++----------- lib/Dialect/Torch/IR/TorchOps.cpp | 14 +++++++++++++ .../Torch/Transforms/InlineGlobalSlots.cpp | 2 +- 4 files changed, 25 insertions(+), 14 deletions(-) diff --git a/externals/llvm-project b/externals/llvm-project index 99045b60b575..6b65d79fbb46 160000 --- a/externals/llvm-project +++ b/externals/llvm-project @@ -1 +1 @@ -Subproject commit 99045b60b57571079f9cb4aea57870692523fbe8 +Subproject commit 6b65d79fbb4682468333cea42b62f15c2dffd8f3 diff --git a/lib/Dialect/TMTensor/IR/TMTensorOps.cpp b/lib/Dialect/TMTensor/IR/TMTensorOps.cpp index dcb2f4215891..ec399fe9633e 100644 --- a/lib/Dialect/TMTensor/IR/TMTensorOps.cpp +++ b/lib/Dialect/TMTensor/IR/TMTensorOps.cpp @@ -166,7 +166,6 @@ static void matmul(OpBuilder &b, Location loc, Value lhs, ValueRange lhsSizes, }) ->getResult(0); b.create(loc, sum, output, localIVs); - b.create(loc); }); } @@ -229,13 +228,15 @@ LogicalResult AttentionOp::generateScalarImplementation(OpBuilder &b, SmallVector(weightRank, one), init, [&](OpBuilder &b, Location loc, ValueRange localIVs, ValueRange accs) { - b.create( - loc, init, - [&](OpBuilder &b, Location loc, Value elem, Value acc) { - Value x = b.create(loc, weight, localIVs); - Value max = b.create(loc, x, acc); - b.create(loc, max); - }); + auto reduceOp = b.create(loc, init); + // Build reduce body. + Block &reductionBody = reduceOp.getReductions()[0].front(); + auto bodyBuilder = OpBuilder::atBlockEnd(&reductionBody); + Value acc = reductionBody.getArgument(0); + Value x = + bodyBuilder.create(loc, weight, localIVs); + Value max = bodyBuilder.create(loc, x, acc); + bodyBuilder.create(loc, max); }) .getResult(0); // weight = (weight - max(weight)) / math.sqrt(querySizes[-1]) @@ -247,7 +248,6 @@ LogicalResult AttentionOp::generateScalarImplementation(OpBuilder &b, x = b.create(loc, x, globalMax); x = b.create(loc, x, scaleFactor); b.create(loc, x, weight, localIVs); - b.create(loc); }); // calculate exp(weight) SmallVector min(weightRank, zero), @@ -258,7 +258,6 @@ LogicalResult AttentionOp::generateScalarImplementation(OpBuilder &b, Value x = b.create(loc, weight, localIVs); x = b.create(loc, x); b.create(loc, x, weight, localIVs); - b.create(loc); }); Value expWeightSum = b.create( loc, @@ -290,7 +289,6 @@ LogicalResult AttentionOp::generateScalarImplementation(OpBuilder &b, Value y = b.create(loc, weight, coords); Value sum = b.create(loc, x, y); b.create(loc, sum, expWeightSum, outsideDims); - b.create(loc); }); }); // calculate exp(weight) / sum(exp(weight)) @@ -305,7 +303,6 @@ LogicalResult AttentionOp::generateScalarImplementation(OpBuilder &b, Value sum = b.create(loc, expWeightSum, sumIVs); x = b.create(loc, x, sum); b.create(loc, x, weight, localIVs); - b.create(loc); }); // output = weight @ value diff --git a/lib/Dialect/Torch/IR/TorchOps.cpp b/lib/Dialect/Torch/IR/TorchOps.cpp index 32a550ce813a..e63a4e376013 100644 --- a/lib/Dialect/Torch/IR/TorchOps.cpp +++ b/lib/Dialect/Torch/IR/TorchOps.cpp @@ -715,6 +715,8 @@ OpFoldResult AtenNeBoolOp::fold(FoldAdaptor adaptor) { //===----------------------------------------------------------------------===// OpFoldResult AtenSqueezeOp::fold(FoldAdaptor adaptor) { + if (getOperand().getType() != getResult().getType()) + return nullptr; if (auto tensorType = getOperand().getType().dyn_cast()) { if (tensorType.hasSizes() && tensorType.getSizes().size() == 0) return getOperand(); @@ -727,6 +729,8 @@ OpFoldResult AtenSqueezeOp::fold(FoldAdaptor adaptor) { //===----------------------------------------------------------------------===// OpFoldResult AtenSqueezeDimOp::fold(FoldAdaptor adaptor) { + if (getOperand(0).getType() != getResult().getType()) + return nullptr; if (auto tensorType = getOperand(0).getType().dyn_cast()) { if (tensorType.hasSizes() && tensorType.getSizes().size() == 0) return getOperand(0); @@ -739,6 +743,8 @@ OpFoldResult AtenSqueezeDimOp::fold(FoldAdaptor adaptor) { //===----------------------------------------------------------------------===// OpFoldResult AtenRoundOp::fold(FoldAdaptor adaptor) { + if (getSelf().getType() != getResult().getType()) + return nullptr; if (auto selfType = getSelf().getType().dyn_cast()) { if (selfType.hasDtype() && selfType.getDtype().isa()) return getSelf(); @@ -911,6 +917,8 @@ OpFoldResult AtenViewOp::fold(FoldAdaptor adaptor) { auto resType = getType().dyn_cast(); if (!resType || !resType.hasSizes() || resType.getSizes().size() != 1) return nullptr; + if (inputType != resType) + return nullptr; // Fold when both the input tensor and result are unity rank tensors. return getOperand(0); } @@ -2441,6 +2449,8 @@ OpFoldResult AtenCatOp::fold(FoldAdaptor adaptor) { auto list = getOperand(0).getDefiningOp(); if (!list || !list->hasOneUse() || list.getElements().size() != 1) return nullptr; + if (list.getElements()[0].getType() != getResult().getType()) + return nullptr; return list.getElements()[0]; } @@ -2451,6 +2461,8 @@ OpFoldResult AtenCatOp::fold(FoldAdaptor adaptor) { OpFoldResult AtenBroadcastToOp::fold(FoldAdaptor adaptor) { auto inType = getOperand(0).getType().dyn_cast(); auto outType = getResult().getType().dyn_cast(); + if (inType != outType) + return nullptr; if (!inType || !outType || !inType.hasSizes() || !outType.hasSizes()) return nullptr; if (inType.getSizes().size() != outType.getSizes().size() || @@ -2480,6 +2492,8 @@ OpFoldResult AtenSliceTensorOp::fold(FoldAdaptor adaptor) { auto inType = getOperand(0).getType().dyn_cast(); auto outType = getResult().getType().dyn_cast(); + if (inType != outType) + return nullptr; if (!inType || !outType || !inType.hasSizes() || !outType.hasSizes()) return nullptr; if (inType.getSizes().size() != outType.getSizes().size() || diff --git a/lib/Dialect/Torch/Transforms/InlineGlobalSlots.cpp b/lib/Dialect/Torch/Transforms/InlineGlobalSlots.cpp index 76b57fe8c9a3..c67e6dc0d3a7 100644 --- a/lib/Dialect/Torch/Transforms/InlineGlobalSlots.cpp +++ b/lib/Dialect/Torch/Transforms/InlineGlobalSlots.cpp @@ -95,7 +95,7 @@ static bool isUseTreatedWithValueSemantics(OpOperand &use) { class InlineGlobalSlotsAnalysisState : public AnalysisState { public: InlineGlobalSlotsAnalysisState(ProgramPoint point) : AnalysisState(point) { - setSafe(); + (void)setSafe(); } void print(raw_ostream &os) const override {