diff --git a/compiler/CMakeLists.txt b/compiler/CMakeLists.txt index 0de92e3..9b55737 100644 --- a/compiler/CMakeLists.txt +++ b/compiler/CMakeLists.txt @@ -58,9 +58,11 @@ add_library(graphalg_lib STATIC src/graphalg/GraphAlgSetDimensions.cpp src/graphalg/GraphAlgSplitAggregate.cpp src/graphalg/GraphAlgToCore.cpp + src/graphalg/GraphAlgToCorePipeline.cpp src/graphalg/GraphAlgTypes.cpp src/graphalg/GraphAlgVerifyDimensions.cpp src/graphalg/SemiringTypes.cpp + src/graphalg/evaluate/Evaluator.cpp ) target_include_directories(graphalg_lib PUBLIC include) target_include_directories(graphalg_lib SYSTEM PUBLIC ${PROJECT_BINARY_DIR}/include) @@ -90,15 +92,13 @@ add_library(graphalg_parse STATIC src/graphalg/parse/Lexer.cpp src/graphalg/parse/Parser.cpp ) -target_include_directories(graphalg_parse PUBLIC include) -# TODO: Nasty, should be provided by graphalg_lib dep -target_include_directories(graphalg_parse SYSTEM PRIVATE ${PROJECT_BINARY_DIR}/include) +target_link_libraries(graphalg_parse PRIVATE graphalg_lib) add_executable(graphalg-translate src/graphalg-translate.cpp) target_link_libraries(graphalg-translate PRIVATE - graphalg_parse ${llvm_libs} graphalg_lib + graphalg_parse MLIRTranslateLib ) @@ -116,15 +116,21 @@ target_link_libraries(graphalg-lsp-server PRIVATE MLIRLspServerLib ) +add_executable(graphalg-exec src/graphalg-exec.cpp) +target_link_libraries(graphalg-exec PRIVATE + ${llvm_libs} + graphalg_lib + MLIRParser +) set(ENABLE_WASM OFF CACHE BOOL "Enable wasm-only targets" FORCE) if(ENABLE_WASM) add_executable(wasm_parse src/wasm_parse.cpp) target_link_libraries(wasm_parse PRIVATE - graphalg_parse ${llvm_libs} graphalg_lib + graphalg_parse ) target_link_options(wasm_parse PRIVATE -sEXPORTED_FUNCTIONS=_ga_parse diff --git a/compiler/include/graphalg/GraphAlgAttr.td b/compiler/include/graphalg/GraphAlgAttr.td index 3109aae..82e60ea 100644 --- a/compiler/include/graphalg/GraphAlgAttr.td +++ b/compiler/include/graphalg/GraphAlgAttr.td @@ -113,4 +113,18 @@ def DimAttr : GraphAlg_Attr<"Dim", "dim"> { }]; } +def MatrixAttr : GraphAlg_Attr<"Matrix", "mat"> { + let summary = "Constant value matrix"; + + let parameters = (ins + AttributeSelfTypeParameter<"">:$type, + "mlir::ArrayAttr":$elems); + + let assemblyFormat = [{ + `<` $elems `>` + }]; + + let genVerifyDecl = 1; +} + #endif // GRAPHALG_GRAPH_ALG_ATTR diff --git a/compiler/include/graphalg/GraphAlgPasses.h b/compiler/include/graphalg/GraphAlgPasses.h index 0ba559f..bfd26a3 100644 --- a/compiler/include/graphalg/GraphAlgPasses.h +++ b/compiler/include/graphalg/GraphAlgPasses.h @@ -4,6 +4,8 @@ #include #include #include +#include +#include #include #include @@ -53,6 +55,13 @@ mlir::FailureOr createScalarOpFor(mlir::Location loc, BinaryOp op, #define GEN_PASS_REGISTRATION #include "graphalg/GraphAlgPasses.h.inc" +struct GraphAlgToCorePipelineOptions + : public mlir::PassPipelineOptions {}; + +void buildGraphAlgToCorePipeline(mlir::OpPassManager &pm, + const GraphAlgToCorePipelineOptions &options); +void registerGraphAlgToCorePipeline(); + // Testing only: void registerTestDensePass(); diff --git a/compiler/include/graphalg/evaluate/Evaluator.h b/compiler/include/graphalg/evaluate/Evaluator.h new file mode 100644 index 0000000..2ac182b --- /dev/null +++ b/compiler/include/graphalg/evaluate/Evaluator.h @@ -0,0 +1,74 @@ +#include +#include + +#include "graphalg/GraphAlgAttr.h" +#include "graphalg/GraphAlgCast.h" +#include "graphalg/GraphAlgTypes.h" + +namespace graphalg { + +/** Helper for reading elements of \c MatrixAttr. */ +class MatrixAttrReader { +private: + MatrixType _type; + std::size_t _rows; + std::size_t _cols; + llvm::ArrayRef _elems; + +public: + MatrixAttrReader(MatrixAttr attr) + : _type(llvm::cast(attr.getType())), + _rows(_type.getRows().getConcreteDim()), + _cols(_type.getCols().getConcreteDim()), + _elems(attr.getElems().getValue()) {} + + std::size_t nRows() const { return _rows; } + std::size_t nCols() const { return _cols; } + + SemiringTypeInterface ring() const { + return llvm::cast(_type.getSemiring()); + } + + mlir::TypedAttr at(std::size_t row, std::size_t col) const { + assert(row < _rows); + assert(col < _cols); + return llvm::cast(_elems[row * _cols + col]); + } +}; + +class MatrixAttrBuilder { +private: + MatrixType _type; + SemiringTypeInterface _ring; + std::size_t _rows; + std::size_t _cols; + llvm::SmallVector _elems; + +public: + MatrixAttrBuilder(MatrixType type) + : _type(type), _rows(_type.getRows().getConcreteDim()), + _cols(_type.getCols().getConcreteDim()), + _ring(llvm::cast(type.getSemiring())), + _elems(_rows * _cols, _ring.addIdentity()) {} + + std::size_t nRows() const { return _rows; } + std::size_t nCols() const { return _cols; } + + SemiringTypeInterface ring() const { return _ring; } + + void set(std::size_t row, std::size_t col, mlir::TypedAttr attr) { + assert(row < _rows); + assert(col < _cols); + assert(attr.getType() == _ring); + _elems[row * _cols + col] = attr; + } + + MatrixAttr build() { + auto *ctx = _type.getContext(); + return MatrixAttr::get(ctx, _type, mlir::ArrayAttr::get(ctx, _elems)); + } +}; + +MatrixAttr evaluate(mlir::func::FuncOp funcOp, llvm::ArrayRef args); + +} // namespace graphalg diff --git a/compiler/src/graphalg-exec.cpp b/compiler/src/graphalg-exec.cpp new file mode 100644 index 0000000..1a228da --- /dev/null +++ b/compiler/src/graphalg-exec.cpp @@ -0,0 +1,232 @@ +/** + * Executes GraphAlg Core IR. + * + * It is used in regression tests for \c graphalg::evaluate. + */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "graphalg/GraphAlgAttr.h" +#include "graphalg/GraphAlgCast.h" +#include "graphalg/GraphAlgDialect.h" +#include "graphalg/GraphAlgTypes.h" +#include "graphalg/SemiringTypes.h" +#include "graphalg/evaluate/Evaluator.h" + +namespace cmd { + +using namespace llvm; + +cl::opt input(cl::Positional, cl::Required, + cl::desc(""), + cl::value_desc("source file")); +cl::opt func(cl::Positional, cl::Required, + cl::desc(""), + cl::value_desc("function to execute")); +cl::list args(cl::Positional, cl::desc(""), + cl::value_desc("input files")); + +} // namespace cmd + +static graphalg::MatrixAttr parseMatrix(llvm::StringRef filename, + const llvm::MemoryBuffer *buffer, + graphalg::MatrixType type) { + auto *ctx = type.getContext(); + graphalg::MatrixAttrBuilder result(type); + + assert(type.getRows().isConcrete() && type.getCols().isConcrete()); + + std::size_t lineIdx = 0; + auto emitError = [&]() { + return mlir::emitError( + mlir::FileLineColLoc::get(type.getContext(), filename, lineIdx, 0)); + }; + + auto data = buffer->getBuffer(); + for (auto line : llvm::split(data, '\n')) { + lineIdx++; + + if (line.empty()) { + continue; + } + + llvm::SmallVector parts; + llvm::SplitString(line, parts); + if (parts.size() < 2) { + emitError() << "expected at least 2 parts, got " << parts.size(); + return nullptr; + } + + std::size_t rowIdx; + if (!llvm::to_integer(parts[0], rowIdx, /*Base=*/10)) { + emitError() << "invalid row index"; + return nullptr; + } else if (rowIdx >= type.getRows().getConcreteDim()) { + emitError() << "row index " << rowIdx << " out of bounds"; + return nullptr; + } + + std::size_t colIdx; + if (!llvm::to_integer(parts[1], colIdx, /*Base=*/10)) { + emitError() << "invalid column index"; + return nullptr; + } else if (colIdx >= type.getCols().getConcreteDim()) { + emitError() << "col index " << colIdx << " out of bounds"; + return nullptr; + } + + mlir::TypedAttr valueAttr; + if (type.getSemiring() == graphalg::SemiringTypes::forInt(ctx)) { + std::int64_t value; + if (parts.size() != 3) { + emitError() << "expected 3 parts, got " << parts.size(); + return nullptr; + } + + if (!llvm::to_integer(parts[2], value, /*Base=*/10)) { + emitError() << "invalid integer value"; + return nullptr; + } + + valueAttr = mlir::IntegerAttr::get(type.getSemiring(), value); + } else if (type.getSemiring() == graphalg::SemiringTypes::forBool(ctx)) { + if (parts.size() != 2) { + emitError() << "expected 2 parts, got " << parts.size(); + return nullptr; + } + + valueAttr = mlir::BoolAttr::get(ctx, true); + } else if (type.getSemiring() == graphalg::SemiringTypes::forReal(ctx)) { + double value; + if (parts.size() != 3) { + emitError() << "expected 3 parts, got " << parts.size(); + return nullptr; + } + + if (!llvm::to_float(parts[2], value)) { + emitError() << "invalid float value"; + return nullptr; + } + + valueAttr = mlir::FloatAttr::get(type.getSemiring(), value); + } else { + emitError() << "unsupported semiring: " << type.getSemiring(); + return nullptr; + } + + result.set(rowIdx, colIdx, valueAttr); + } + + return result.build(); +} + +int main(int argc, char **argv) { + llvm::cl::ParseCommandLineOptions(argc, argv, "Execute GraphAlg program\n"); + mlir::MLIRContext ctx; + ctx.getOrLoadDialect(); + ctx.getOrLoadDialect(); + ctx.getOrLoadDialect(); + + llvm::SourceMgr sourceMgr; + mlir::SourceMgrDiagnosticHandler diagHandler(sourceMgr, &ctx); + std::string inputIncluded; + auto inputId = + sourceMgr.AddIncludeFile(cmd::input, llvm::SMLoc(), inputIncluded); + if (!inputId) { + llvm::WithColor::error() + << "could not find input file '" << cmd::input << "'\n"; + return 1; + } + + mlir::ParserConfig parserConfig(&ctx); + auto moduleOp = + mlir::parseSourceFile(sourceMgr, parserConfig); + if (!moduleOp) { + return 1; + } + + // Find function to execute. + auto funcOp = llvm::dyn_cast_if_present( + moduleOp->lookupSymbol(cmd::func)); + if (!funcOp) { + llvm::WithColor::error() + << "no such function '" << cmd::func << "' in Core IR\n"; + moduleOp->print(llvm::errs()); + return 1; + } + + // Number of arguments must match function parameters. + if (cmd::args.size() != funcOp.getFunctionType().getNumInputs()) { + funcOp->emitOpError("expected ") << funcOp.getFunctionType().getNumInputs() + << "arguments, got " << cmd::args.size(); + return 1; + } + + llvm::SmallVector args; + for (const auto &[i, filename] : llvm::enumerate(cmd::args)) { + auto type = funcOp.getFunctionType().getInput(i); + auto matType = llvm::dyn_cast(type); + if (!matType) { + funcOp->emitOpError("parameter ") + << i << " is not of type " << graphalg::MatrixType::getMnemonic(); + return 1; + } + + std::string argIncluded; + auto id = sourceMgr.AddIncludeFile(filename, llvm::SMLoc(), argIncluded); + if (!id) { + llvm::WithColor::error() + << "could not find argument file '" << filename << "'\n"; + return 1; + } + + auto parsed = parseMatrix(filename, sourceMgr.getMemoryBuffer(id), matType); + if (!parsed) { + return 1; + } + + args.push_back(parsed); + } + + auto result = graphalg::evaluate(funcOp, args); + if (!result) { + return 1; + } + + graphalg::MatrixAttrReader resultReader(result); + for (auto row : llvm::seq(resultReader.nRows())) { + for (auto col : llvm::seq(resultReader.nCols())) { + auto val = resultReader.at(row, col); + if (val != resultReader.ring().addIdentity()) { + llvm::outs() << row << " " << col << " " << val << "\n"; + } + } + } + + return 0; +} diff --git a/compiler/src/graphalg-opt.cpp b/compiler/src/graphalg-opt.cpp index 6ae37df..c25da8b 100644 --- a/compiler/src/graphalg-opt.cpp +++ b/compiler/src/graphalg-opt.cpp @@ -15,6 +15,7 @@ int main(int argc, char **argv) { registry.insert(); graphalg::registerPasses(); + graphalg::registerGraphAlgToCorePipeline(); mlir::registerCanonicalizerPass(); mlir::registerInlinerPass(); mlir::registerCSEPass(); diff --git a/compiler/src/graphalg/GraphAlgAttr.cpp b/compiler/src/graphalg/GraphAlgAttr.cpp index c67bc52..35f4a44 100644 --- a/compiler/src/graphalg/GraphAlgAttr.cpp +++ b/compiler/src/graphalg/GraphAlgAttr.cpp @@ -1,15 +1,17 @@ #include +#include #include +#include #include #include #include +#include #include #include #include #include #include -#include #include "graphalg/GraphAlgEnumAttr.cpp.inc" #define GET_ATTRDEF_CLASSES @@ -147,6 +149,39 @@ void DimAttr::printBare(mlir::AsmPrinter &printer) const { } } +mlir::LogicalResult +MatrixAttr::verify(llvm::function_ref emitError, + mlir::Type type, mlir::ArrayAttr elems) { + auto matType = llvm::dyn_cast(type); + if (!matType.getRows().isConcrete()) { + return emitError() << "row dimension is not concrete\n"; + } else if (!matType.getCols().isConcrete()) { + return emitError() << "col dimension is not concrete\n"; + } + + auto nRows = matType.getRows().getConcreteDim(); + auto nCols = matType.getCols().getConcreteDim(); + auto nElems = nRows * nCols; + if (nElems != elems.size()) { + return emitError() << "expected " << nRows << " * " << nCols << " = " + << nElems << " elements, got " << elems.size(); + } + + for (auto elem : elems) { + auto elemTyped = llvm::dyn_cast(elem); + if (!elemTyped) { + return emitError() << "untyped matrix element: " << elem; + } + + if (elemTyped.getType() != matType.getSemiring()) { + return emitError() << "element " << elem << " does not match matrix type " + << matType; + } + } + + return mlir::success(); +} + // Need to define this here to avoid depending on GraphAlgAttr in // GraphAlgDialect and creating a cycle. void GraphAlgDialect::registerAttributes() { diff --git a/compiler/src/graphalg/GraphAlgToCorePipeline.cpp b/compiler/src/graphalg/GraphAlgToCorePipeline.cpp new file mode 100644 index 0000000..803a8df --- /dev/null +++ b/compiler/src/graphalg/GraphAlgToCorePipeline.cpp @@ -0,0 +1,25 @@ +#include +#include +#include + +#include "graphalg/GraphAlgPasses.h" + +namespace graphalg { + +void buildGraphAlgToCorePipeline(mlir::OpPassManager &pm, + const GraphAlgToCorePipelineOptions &options) { + pm.addPass(graphalg::createGraphAlgPrepareInline()); + pm.addPass(mlir::createInlinerPass()); + pm.addNestedPass( + graphalg::createGraphAlgScalarizeApply()); + pm.addNestedPass(graphalg::createGraphAlgToCore()); + pm.addPass(mlir::createCanonicalizerPass()); +} +void registerGraphAlgToCorePipeline() { + mlir::PassPipelineRegistration( + "graphalg-to-core-pipeline", + "Lowers GraphAlg source IR into core operations", + buildGraphAlgToCorePipeline); +} + +} // namespace graphalg diff --git a/compiler/src/graphalg/evaluate/Evaluator.cpp b/compiler/src/graphalg/evaluate/Evaluator.cpp new file mode 100644 index 0000000..d2d9dba --- /dev/null +++ b/compiler/src/graphalg/evaluate/Evaluator.cpp @@ -0,0 +1,471 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "graphalg/GraphAlgAttr.h" +#include "graphalg/GraphAlgCast.h" +#include "graphalg/GraphAlgDialect.h" +#include "graphalg/GraphAlgOps.h" +#include "graphalg/GraphAlgTypes.h" +#include "graphalg/SemiringTypes.h" +#include "graphalg/evaluate/Evaluator.h" + +namespace graphalg { + +namespace { + +class Evaluator { +private: + llvm::DenseMap _values; + + mlir::LogicalResult evaluate(TransposeOp op); + mlir::LogicalResult evaluate(DiagOp op); + mlir::LogicalResult evaluate(MatMulOp op); + mlir::LogicalResult evaluate(ReduceOp op); + mlir::LogicalResult evaluate(BroadcastOp op); + mlir::LogicalResult evaluate(ConstantMatrixOp op); + mlir::LogicalResult evaluate(ForConstOp op); + mlir::LogicalResult evaluate(ApplyOp op); + mlir::LogicalResult evaluate(PickAnyOp op); + mlir::LogicalResult evaluate(TrilOp op); + mlir::LogicalResult evaluate(mlir::Operation *op); + +public: + MatrixAttr evaluate(mlir::func::FuncOp funcOp, + llvm::ArrayRef args); +}; + +class ScalarEvaluator { +private: + llvm::SmallDenseMap _values; + + mlir::LogicalResult evaluate(ConstantOp op); + mlir::LogicalResult evaluate(AddOp op); + mlir::LogicalResult evaluate(MulOp op); + mlir::LogicalResult evaluate(CastScalarOp op); + mlir::LogicalResult evaluate(EqOp op); + mlir::LogicalResult evaluate(mlir::arith::DivFOp op); + mlir::LogicalResult evaluate(mlir::arith::SubIOp op); + mlir::LogicalResult evaluate(mlir::arith::SubFOp op); + mlir::LogicalResult evaluate(mlir::Operation *op); + +public: + mlir::TypedAttr evaluate(ApplyOp op, llvm::ArrayRef args); +}; + +} // namespace + +mlir::LogicalResult Evaluator::evaluate(TransposeOp op) { + MatrixAttrReader input(_values[op.getInput()]); + MatrixAttrBuilder result(op.getType()); + for (auto row : llvm::seq(input.nRows())) { + for (auto col : llvm::seq(input.nCols())) { + result.set(col, row, input.at(row, col)); + } + } + + _values[op.getResult()] = result.build(); + return mlir::success(); +} + +mlir::LogicalResult Evaluator::evaluate(DiagOp op) { + MatrixAttrReader input(_values[op.getInput()]); + MatrixAttrBuilder result(op.getType()); + + for (auto row : llvm::seq(input.nRows())) { + result.set(row, row, input.at(row, 0)); + } + + _values[op.getResult()] = result.build(); + return mlir::success(); +} + +mlir::LogicalResult Evaluator::evaluate(MatMulOp op) { + MatrixAttrReader lhs(_values[op.getLhs()]); + MatrixAttrReader rhs(_values[op.getRhs()]); + MatrixAttrBuilder result(op.getType()); + + auto ring = result.ring(); + // result[row, col] = SUM{i}(lhs[row, i] * rhs[i, col]) + for (auto row : llvm::seq(lhs.nRows())) { + for (auto col : llvm::seq(rhs.nCols())) { + auto value = ring.addIdentity(); + for (auto i : llvm::seq(lhs.nCols())) { + value = ring.add(value, ring.mul(lhs.at(row, i), rhs.at(i, col))); + } + + result.set(row, col, value); + } + } + + _values[op.getResult()] = result.build(); + return mlir::success(); +} + +mlir::LogicalResult Evaluator::evaluate(ReduceOp op) { + MatrixAttrReader input(_values[op.getInput()]); + MatrixAttrBuilder result(op.getType()); + + auto ring = result.ring(); + if (op.getType().isScalar()) { + // Reduce all to a single value. + auto value = ring.addIdentity(); + for (auto row : llvm::seq(input.nRows())) { + for (auto col : llvm::seq(input.nCols())) { + value = ring.add(value, input.at(row, col)); + } + } + + result.set(0, 0, value); + } else if (op.getType().isColumnVector()) { + // Per-row reduce. + for (auto row : llvm::seq(input.nRows())) { + auto value = ring.addIdentity(); + for (auto col : llvm::seq(input.nCols())) { + value = ring.add(value, input.at(row, col)); + } + + result.set(row, 0, value); + } + } else if (op.getType().isRowVector()) { + // Per-column reduce. + for (auto col : llvm::seq(input.nCols())) { + auto value = ring.addIdentity(); + for (auto row : llvm::seq(input.nRows())) { + value = ring.add(value, input.at(row, col)); + } + + result.set(0, col, value); + } + } else { + // Reduce nothing. + return op.emitOpError("Not reducing along any dimension"); + } + + _values[op.getResult()] = result.build(); + return mlir::success(); +} + +mlir::LogicalResult Evaluator::evaluate(BroadcastOp op) { + MatrixAttrReader input(_values[op.getInput()]); + MatrixAttrBuilder result(op.getType()); + + for (auto row : llvm::seq(result.nRows())) { + for (auto col : llvm::seq(result.nCols())) { + auto inRow = input.nRows() == 1 ? 0 : row; + auto inCol = input.nCols() == 1 ? 0 : col; + result.set(row, col, input.at(inRow, inCol)); + } + } + + _values[op.getResult()] = result.build(); + return mlir::success(); +} + +mlir::LogicalResult Evaluator::evaluate(ConstantMatrixOp op) { + MatrixAttrBuilder result(op.getType()); + + for (auto row : llvm::seq(result.nRows())) { + for (auto col : llvm::seq(result.nCols())) { + result.set(row, col, op.getValue()); + } + } + + _values[op.getResult()] = result.build(); + return mlir::success(); +} + +mlir::LogicalResult Evaluator::evaluate(ForConstOp op) { + MatrixAttrReader rangeBeginMat(_values[op.getRangeBegin()]); + MatrixAttrReader rangeEndMat(_values[op.getRangeEnd()]); + auto rangeBegin = + llvm::cast(rangeBeginMat.at(0, 0)).getInt(); + auto rangeEnd = llvm::cast(rangeEndMat.at(0, 0)).getInt(); + + auto &body = op.getBody().front(); + auto *ctx = op.getContext(); + + // Initialize block arguments + for (auto [init, blockArg] : + llvm::zip_equal(op.getInitArgs(), body.getArguments().drop_front())) { + _values[blockArg] = _values[init]; + } + + for (auto i : llvm::seq(rangeBegin, rangeEnd)) { + // Iteration variable. + auto iterAttr = mlir::IntegerAttr::get(SemiringTypes::forInt(ctx), i); + auto iterArg = body.getArgument(0); + auto iterType = llvm::cast(iterArg.getType()); + MatrixAttrBuilder iterBuilder(iterType); + iterBuilder.set(0, 0, iterAttr); + _values[body.getArgument(0)] = iterBuilder.build(); + + for (auto &op : body) { + if (auto yieldOp = llvm::dyn_cast(op)) { + // Update block arguments + for (auto [value, blockArg] : llvm::zip_equal( + yieldOp.getInputs(), body.getArguments().drop_front())) { + _values[blockArg] = _values[value]; + } + } else if (mlir::failed(evaluate(&op))) { + return mlir::failure(); + } + } + + bool breakFromUntil = false; + if (!op.getUntil().empty()) { + // Have an until clause to evaluate. + auto &until = op.getUntil().front(); + + // Use current state of loop variables as input to until block. + for (auto [bodyArg, untilArg] : + llvm::zip_equal(body.getArguments(), until.getArguments())) { + _values[untilArg] = _values[bodyArg]; + } + + for (auto &op : until) { + if (auto yieldOp = llvm::dyn_cast(op)) { + // Check break condition + assert(yieldOp->getNumOperands() == 1); + MatrixAttrReader condMat(_values[yieldOp.getInputs().front()]); + breakFromUntil = + llvm::cast(condMat.at(0, 0)).getValue(); + } else if (mlir::failed(evaluate(&op))) { + return mlir::failure(); + } + } + } + + if (breakFromUntil) { + break; + } + } + + // Set loop results. + for (auto [value, result] : + llvm::zip_equal(body.getArguments().drop_front(), op->getResults())) { + _values[result] = _values[value]; + } + + return mlir::success(); +} + +mlir::LogicalResult Evaluator::evaluate(ApplyOp op) { + llvm::SmallVector inputs; + for (auto input : op.getInputs()) { + inputs.emplace_back(_values[input]); + } + + MatrixAttrBuilder result(op.getType()); + for (auto row : llvm::seq(result.nRows())) { + for (auto col : llvm::seq(result.nCols())) { + llvm::SmallVector args; + for (const auto &input : inputs) { + args.push_back(input.at(row, col)); + } + + ScalarEvaluator scalarEvaluator; + auto value = scalarEvaluator.evaluate(op, args); + if (!value) { + return mlir::failure(); + } + + result.set(row, col, value); + } + } + + _values[op] = result.build(); + return mlir::success(); +} + +mlir::LogicalResult Evaluator::evaluate(PickAnyOp op) { + MatrixAttrReader input(_values[op.getInput()]); + MatrixAttrBuilder result(op.getType()); + + for (auto row : llvm::seq(input.nRows())) { + for (auto col : llvm::seq(input.nCols())) { + auto value = input.at(row, col); + if (value != result.ring().addIdentity()) { + result.set(row, col, value); + break; + } + } + } + + _values[op.getResult()] = result.build(); + return mlir::success(); +} + +mlir::LogicalResult Evaluator::evaluate(TrilOp op) { + MatrixAttrReader input(_values[op.getInput()]); + MatrixAttrBuilder result(op.getType()); + + for (auto row : llvm::seq(input.nRows())) { + for (auto col : llvm::seq(input.nCols())) { + if (col < row) { + auto value = input.at(row, col); + result.set(row, col, value); + } + } + } + + _values[op.getResult()] = result.build(); + return mlir::success(); +} + +mlir::LogicalResult Evaluator::evaluate(mlir::Operation *op) { + return llvm::TypeSwitch(op) +#define GA_CASE(Op) .Case([&](Op op) { return evaluate(op); }) + GA_CASE(TransposeOp) GA_CASE(DiagOp) GA_CASE(MatMulOp) GA_CASE(ReduceOp) + GA_CASE(BroadcastOp) GA_CASE(ConstantMatrixOp) GA_CASE(ForConstOp) + GA_CASE(ApplyOp) GA_CASE(PickAnyOp) GA_CASE(TrilOp) +#undef GA_CASE + .Default([](mlir::Operation *op) { + return op->emitOpError("unsupported op"); + }); +} + +MatrixAttr Evaluator::evaluate(mlir::func::FuncOp funcOp, + llvm::ArrayRef args) { + auto &body = funcOp.getFunctionBody().front(); + if (body.getNumArguments() != args.size()) { + funcOp->emitOpError("function has ") + << funcOp.getFunctionType().getNumInputs() << " inputs, got " + << args.size() << "inputs"; + return nullptr; + } + + for (auto [i, value] : llvm::enumerate(args)) { + auto arg = body.getArgument(i); + if (arg.getType() != value.getType()) { + mlir::emitError(arg.getLoc()) + << "parameter " << i << " has type " << arg.getType() + << ", but argument value has type " << value.getType(); + return nullptr; + } + + _values[arg] = value; + } + + for (auto &op : body) { + if (auto retOp = llvm::dyn_cast(op)) { + assert(retOp->getNumOperands() == 1); + return _values[retOp->getOperand(0)]; + } + + if (mlir::failed(evaluate(&op))) { + return nullptr; + } + } + + funcOp->emitOpError("missing return op"); + return nullptr; +} + +mlir::LogicalResult ScalarEvaluator::evaluate(ConstantOp op) { + _values[op] = op.getValue(); + return mlir::success(); +} + +mlir::LogicalResult ScalarEvaluator::evaluate(AddOp op) { + auto ring = llvm::cast(op.getType()); + _values[op] = ring.add(_values[op.getLhs()], _values[op.getRhs()]); + return mlir::success(); +} + +mlir::LogicalResult ScalarEvaluator::evaluate(MulOp op) { + auto ring = llvm::cast(op.getType()); + _values[op] = ring.mul(_values[op.getLhs()], _values[op.getRhs()]); + return mlir::success(); +} + +mlir::LogicalResult ScalarEvaluator::evaluate(CastScalarOp op) { + auto *dialect = op->getContext()->getLoadedDialect(); + _values[op] = dialect->castAttribute(_values[op.getInput()], op.getType()); + return mlir::success(); +} + +mlir::LogicalResult ScalarEvaluator::evaluate(EqOp op) { + auto ring = llvm::cast(op.getType()); + bool eq = _values[op.getLhs()] == _values[op.getRhs()]; + _values[op] = mlir::BoolAttr::get(op.getContext(), eq); + return mlir::success(); +} + +mlir::LogicalResult ScalarEvaluator::evaluate(mlir::arith::DivFOp op) { + auto lhs = + llvm::cast(_values[op.getLhs()]).getValueAsDouble(); + auto rhs = + llvm::cast(_values[op.getRhs()]).getValueAsDouble(); + double result = rhs == 0 ? 0 : lhs / rhs; + _values[op] = mlir::FloatAttr::get(op.getType(), result); + return mlir::success(); +} + +mlir::LogicalResult ScalarEvaluator::evaluate(mlir::arith::SubIOp op) { + auto lhs = llvm::cast(_values[op.getLhs()]).getInt(); + auto rhs = llvm::cast(_values[op.getRhs()]).getInt(); + double result = lhs - rhs; + _values[op] = mlir::IntegerAttr::get(op.getType(), result); + return mlir::success(); +} + +mlir::LogicalResult ScalarEvaluator::evaluate(mlir::arith::SubFOp op) { + auto lhs = + llvm::cast(_values[op.getLhs()]).getValueAsDouble(); + auto rhs = + llvm::cast(_values[op.getRhs()]).getValueAsDouble(); + double result = lhs - rhs; + _values[op] = mlir::FloatAttr::get(op.getType(), result); + return mlir::success(); +} + +mlir::LogicalResult ScalarEvaluator::evaluate(mlir::Operation *op) { + return llvm::TypeSwitch(op) +#define GA_CASE(Op) .Case([&](Op op) { return evaluate(op); }) + GA_CASE(ConstantOp) GA_CASE(AddOp) GA_CASE(MulOp) GA_CASE(CastScalarOp) + GA_CASE(EqOp) GA_CASE(mlir::arith::DivFOp) + GA_CASE(mlir::arith::SubIOp) GA_CASE(mlir::arith::SubFOp) +#undef GA_CASE + .Default([](mlir::Operation *op) { + return op->emitOpError("unsupported op"); + }); +} + +mlir::TypedAttr +ScalarEvaluator::evaluate(ApplyOp op, llvm::ArrayRef args) { + auto &block = op.getBody().front(); + for (auto [blockArg, value] : llvm::zip_equal(block.getArguments(), args)) { + _values[blockArg] = value; + } + + for (auto &op : block) { + if (auto retOp = llvm::dyn_cast(op)) { + return _values[retOp.getValue()]; + } else if (mlir::failed(evaluate(&op))) { + return nullptr; + } + } + + op->emitOpError("missing return op"); + return nullptr; +} + +MatrixAttr evaluate(mlir::func::FuncOp funcOp, + llvm::ArrayRef args) { + Evaluator evaluator; + return evaluator.evaluate(funcOp, args); +} + +} // namespace graphalg diff --git a/compiler/test/CMakeLists.txt b/compiler/test/CMakeLists.txt index 1f8c48e..3411870 100644 --- a/compiler/test/CMakeLists.txt +++ b/compiler/test/CMakeLists.txt @@ -12,5 +12,8 @@ configure_lit_site_cfg( add_lit_testsuite(check "Run integration tests" ${CMAKE_CURRENT_BINARY_DIR} - DEPENDS graphalg-translate graphalg-opt + DEPENDS + graphalg-exec + graphalg-opt + graphalg-translate ) diff --git a/compiler/test/exec/add.mlir b/compiler/test/exec/add.mlir new file mode 100644 index 0000000..fb85cef --- /dev/null +++ b/compiler/test/exec/add.mlir @@ -0,0 +1,24 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir Add %t/input.m | diff - %t/output.m + +//--- input.m +0 0 11 +1 0 13 +1 1 14 + +//--- input.mlir +func.func @Add(%arg0: !graphalg.mat<2 x 2 x i64>) -> !graphalg.mat<2 x 2 x i64> { + %0 = graphalg.apply %arg0 : !graphalg.mat<2 x 2 x i64> -> <2 x 2 x i64> { + ^bb0(%arg1: i64): + %1 = graphalg.const 42 : i64 + %2 = graphalg.add %arg1, %1 : i64 + graphalg.apply.return %2 : i64 + } + return %0 : !graphalg.mat<2 x 2 x i64> +} + +//--- output.m +0 0 53 : i64 +0 1 42 : i64 +1 0 55 : i64 +1 1 56 : i64 diff --git a/compiler/test/exec/apply.mlir b/compiler/test/exec/apply.mlir new file mode 100644 index 0000000..e874823 --- /dev/null +++ b/compiler/test/exec/apply.mlir @@ -0,0 +1,30 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir Apply %t/lhs.m %t/rhs.m | diff - %t/output.m + +//--- lhs.m +0 0 3 +0 1 5 +1 0 7 +1 1 11 + +//--- rhs.m +0 0 13 +0 1 17 +1 0 19 +1 1 23 + +//--- input.mlir +func.func @Apply(%arg0: !graphalg.mat<2 x 2 x i64>, %arg1: !graphalg.mat<2 x 2 x i64>) -> !graphalg.mat<2 x 2 x i64> { + %0 = graphalg.apply %arg0, %arg1 : !graphalg.mat<2 x 2 x i64>, !graphalg.mat<2 x 2 x i64> -> <2 x 2 x i64> { + ^bb0(%arg2: i64, %arg3: i64): + %1 = graphalg.add %arg2, %arg3 : i64 + graphalg.apply.return %1 : i64 + } + return %0 : !graphalg.mat<2 x 2 x i64> +} + +//--- output.m +0 0 16 : i64 +0 1 22 : i64 +1 0 26 : i64 +1 1 34 : i64 diff --git a/compiler/test/exec/broadcast.mlir b/compiler/test/exec/broadcast.mlir new file mode 100644 index 0000000..6625d0c --- /dev/null +++ b/compiler/test/exec/broadcast.mlir @@ -0,0 +1,18 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir Broadcast %t/input.m | diff - %t/output.m + +//--- input.m +0 0 42 + +//--- input.mlir +func.func @Broadcast(%arg0: !graphalg.mat<1 x 1 x i64>) + -> !graphalg.mat<2 x 2 x i64> { + %0 = graphalg.broadcast %arg0 : <1 x 1 x i64> -> <2 x 2 x i64> + return %0 : !graphalg.mat<2 x 2 x i64> +} + +//--- output.m +0 0 42 : i64 +0 1 42 : i64 +1 0 42 : i64 +1 1 42 : i64 diff --git a/compiler/test/exec/cast.mlir b/compiler/test/exec/cast.mlir new file mode 100644 index 0000000..27dfcac --- /dev/null +++ b/compiler/test/exec/cast.mlir @@ -0,0 +1,22 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir Cast %t/input.m | diff - %t/output.m + +//--- input.m +0 0 11 +1 0 13 +1 1 14 + +//--- input.mlir +func.func @Cast(%arg0: !graphalg.mat<2 x 2 x i64>) -> !graphalg.mat<2 x 2 x f64> { + %0 = graphalg.apply %arg0 : !graphalg.mat<2 x 2 x i64> -> <2 x 2 x f64> { + ^bb0(%arg1: i64): + %1 = graphalg.cast_scalar %arg1 : i64 -> f64 + graphalg.apply.return %1 : f64 + } + return %0 : !graphalg.mat<2 x 2 x f64> +} + +//--- output.m +0 0 1.100000e+01 : f64 +1 0 1.300000e+01 : f64 +1 1 1.400000e+01 : f64 diff --git a/compiler/test/exec/diag.mlir b/compiler/test/exec/diag.mlir new file mode 100644 index 0000000..340f2b7 --- /dev/null +++ b/compiler/test/exec/diag.mlir @@ -0,0 +1,16 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir Diag %t/input.m | diff - %t/output.m + +//--- input.m +0 0 42 +1 0 43 + +//--- input.mlir +func.func @Diag(%arg0: !graphalg.mat<2 x 1 x i64>) -> !graphalg.mat<2 x 2 x i64> { + %0 = graphalg.diag %arg0 : !graphalg.mat<2 x 1 x i64> + return %0 : !graphalg.mat<2 x 2 x i64> +} + +//--- output.m +0 0 42 : i64 +1 1 43 : i64 diff --git a/compiler/test/exec/div.mlir b/compiler/test/exec/div.mlir new file mode 100644 index 0000000..30766fa --- /dev/null +++ b/compiler/test/exec/div.mlir @@ -0,0 +1,26 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir Div %t/lhs.m %t/rhs.m | diff - %t/output.m + +//--- lhs.m +0 0 42.0 +0 1 0.0 +1 1 42.0 + +//--- rhs.m +0 0 2.0 +0 1 2.0 +1 0 0.0 +1 1 0.0 + +//--- input.mlir +func.func @Div(%arg0: !graphalg.mat<2 x 2 x f64>, %arg1: !graphalg.mat<2 x 2 x f64>) -> !graphalg.mat<2 x 2 x f64> { + %0 = graphalg.apply %arg0, %arg1 : !graphalg.mat<2 x 2 x f64>, !graphalg.mat<2 x 2 x f64> -> <2 x 2 x f64> { + ^bb0(%arg2: f64, %arg3: f64): + %1 = arith.divf %arg2, %arg3 : f64 + graphalg.apply.return %1 : f64 + } + return %0 : !graphalg.mat<2 x 2 x f64> +} + +//--- output.m +0 0 2.100000e+01 : f64 diff --git a/compiler/test/exec/eq.mlir b/compiler/test/exec/eq.mlir new file mode 100644 index 0000000..c978099 --- /dev/null +++ b/compiler/test/exec/eq.mlir @@ -0,0 +1,21 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir Eq %t/input.m | diff - %t/output.m + +//--- input.m +0 0 41 +1 0 42 +1 1 43 + +//--- input.mlir +func.func @Eq(%arg0: !graphalg.mat<2 x 2 x i64>) -> !graphalg.mat<2 x 2 x i1> { + %0 = graphalg.apply %arg0 : !graphalg.mat<2 x 2 x i64> -> <2 x 2 x i1> { + ^bb0(%arg1: i64): + %1 = graphalg.const 42 : i64 + %2 = graphalg.eq %arg1, %1 : i64 + graphalg.apply.return %2 : i1 + } + return %0 : !graphalg.mat<2 x 2 x i1> +} + +//--- output.m +1 0 true diff --git a/compiler/test/exec/for.mlir b/compiler/test/exec/for.mlir new file mode 100644 index 0000000..6516987 --- /dev/null +++ b/compiler/test/exec/for.mlir @@ -0,0 +1,33 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir Reach %t/graph.m %t/source.m | diff - %t/output.m + +//--- graph.m +0 1 +1 2 + +//--- source.m +0 0 + +//--- input.mlir +func.func @Reach(%arg0: !graphalg.mat<3 x 3 x i1>, %arg1: !graphalg.mat<3 x 1 x i1>) -> !graphalg.mat<3 x 1 x i1> { + %0 = graphalg.const_mat 0 : i64 -> <1 x 1 x i64> + %1 = graphalg.const_mat 3 : i64 -> <1 x 1 x i64> + %2 = graphalg.for_const range(%0, %1) : <1 x 1 x i64> init(%arg1) : !graphalg.mat<3 x 1 x i1> -> !graphalg.mat<3 x 1 x i1> body { + ^bb0(%arg2: !graphalg.mat<1 x 1 x i64>, %arg3: !graphalg.mat<3 x 1 x i1>): + %3 = graphalg.transpose %arg0 : <3 x 3 x i1> + %4 = graphalg.mxm %3, %arg3 : <3 x 3 x i1>, <3 x 1 x i1> + %5 = graphalg.apply %arg3, %4 : !graphalg.mat<3 x 1 x i1>, !graphalg.mat<3 x 1 x i1> -> <3 x 1 x i1> { + ^bb0(%arg4: i1, %arg5: i1): + %6 = graphalg.add %arg4, %arg5 : i1 + graphalg.apply.return %6 : i1 + } + graphalg.yield %5 : !graphalg.mat<3 x 1 x i1> + } until { + } + return %2 : !graphalg.mat<3 x 1 x i1> +} + +//--- output.m +0 0 true +1 0 true +2 0 true diff --git a/compiler/test/exec/matmul.mlir b/compiler/test/exec/matmul.mlir new file mode 100644 index 0000000..0c86d38 --- /dev/null +++ b/compiler/test/exec/matmul.mlir @@ -0,0 +1,28 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir MatMul %t/lhs.m %t/rhs.m | diff - %t/output.m + +//--- lhs.m +0 0 3 +0 1 5 +1 0 7 +1 1 11 + +//--- rhs.m +0 0 13 +0 1 17 +1 0 19 +1 1 23 + +//--- input.mlir +func.func @MatMul( + %arg0: !graphalg.mat<2 x 2 x i64>, + %arg1: !graphalg.mat<2 x 2 x i64>) -> !graphalg.mat<2 x 2 x i64> { + %0 = graphalg.mxm %arg0, %arg1 : <2 x 2 x i64>, <2 x 2 x i64> + return %0 : !graphalg.mat<2 x 2 x i64> +} + +//--- output.m +0 0 134 : i64 +0 1 166 : i64 +1 0 300 : i64 +1 1 372 : i64 diff --git a/compiler/test/exec/mul.mlir b/compiler/test/exec/mul.mlir new file mode 100644 index 0000000..0e8cfc4 --- /dev/null +++ b/compiler/test/exec/mul.mlir @@ -0,0 +1,23 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir Mul %t/input.m | diff - %t/output.m + +//--- input.m +0 0 10 +1 0 20 +1 1 30 + +//--- input.mlir +func.func @Mul(%arg0: !graphalg.mat<2 x 2 x i64>) -> !graphalg.mat<2 x 2 x i64> { + %0 = graphalg.apply %arg0 : !graphalg.mat<2 x 2 x i64> -> <2 x 2 x i64> { + ^bb0(%arg1: i64): + %1 = graphalg.const 2 : i64 + %2 = graphalg.mul %arg1, %1 : i64 + graphalg.apply.return %2 : i64 + } + return %0 : !graphalg.mat<2 x 2 x i64> +} + +//--- output.m +0 0 20 : i64 +1 0 40 : i64 +1 1 60 : i64 diff --git a/compiler/test/exec/pick_any.mlir b/compiler/test/exec/pick_any.mlir new file mode 100644 index 0000000..cf60054 --- /dev/null +++ b/compiler/test/exec/pick_any.mlir @@ -0,0 +1,19 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir PickAny %t/input.m | diff - %t/output.m + +//--- input.m +0 0 11 +0 1 12 +0 2 13 +1 1 14 +1 2 15 + +//--- input.mlir +func.func @PickAny(%arg0: !graphalg.mat<3 x 3 x i64>) -> !graphalg.mat<3 x 3 x i64> { + %0 = graphalg.pick_any %arg0 : <3 x 3 x i64> + return %0 : !graphalg.mat<3 x 3 x i64> +} + +//--- output.m +0 0 11 : i64 +1 1 14 : i64 diff --git a/compiler/test/exec/reduce-cols.mlir b/compiler/test/exec/reduce-cols.mlir new file mode 100644 index 0000000..0c8cd0c --- /dev/null +++ b/compiler/test/exec/reduce-cols.mlir @@ -0,0 +1,18 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir ReduceCols %t/input.m | diff - %t/output.m + +//--- input.m +0 0 3 +0 1 5 +1 0 7 +1 1 11 + +//--- input.mlir +func.func @ReduceCols(%arg0: !graphalg.mat<2 x 2 x i64>) -> !graphalg.mat<1 x 2 x i64> { + %0 = graphalg.reduce %arg0 : <2 x 2 x i64> -> <1 x 2 x i64> + return %0 : !graphalg.mat<1 x 2 x i64> +} + +//--- output.m +0 0 10 : i64 +0 1 16 : i64 diff --git a/compiler/test/exec/reduce-rows.mlir b/compiler/test/exec/reduce-rows.mlir new file mode 100644 index 0000000..a1b23f1 --- /dev/null +++ b/compiler/test/exec/reduce-rows.mlir @@ -0,0 +1,18 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir ReduceRows %t/input.m | diff - %t/output.m + +//--- input.m +0 0 3 +0 1 5 +1 0 7 +1 1 11 + +//--- input.mlir +func.func @ReduceRows(%arg0: !graphalg.mat<2 x 2 x i64>) -> !graphalg.mat<2 x 1 x i64> { + %0 = graphalg.reduce %arg0 : <2 x 2 x i64> -> <2 x 1 x i64> + return %0 : !graphalg.mat<2 x 1 x i64> +} + +//--- output.m +0 0 8 : i64 +1 0 18 : i64 diff --git a/compiler/test/exec/reduce.mlir b/compiler/test/exec/reduce.mlir new file mode 100644 index 0000000..a87c2c2 --- /dev/null +++ b/compiler/test/exec/reduce.mlir @@ -0,0 +1,17 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir Reduce %t/input.m | diff - %t/output.m + +//--- input.m +0 0 3 +0 1 5 +1 0 7 +1 1 11 + +//--- input.mlir +func.func @Reduce(%arg0: !graphalg.mat<2 x 2 x i64>) -> !graphalg.mat<1 x 1 x i64> { + %0 = graphalg.reduce %arg0 : <2 x 2 x i64> -> <1 x 1 x i64> + return %0 : !graphalg.mat<1 x 1 x i64> +} + +//--- output.m +0 0 26 : i64 diff --git a/compiler/test/exec/sub-int.mlir b/compiler/test/exec/sub-int.mlir new file mode 100644 index 0000000..1523409 --- /dev/null +++ b/compiler/test/exec/sub-int.mlir @@ -0,0 +1,23 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir Sub %t/input.m | diff - %t/output.m + +//--- input.m +0 0 41 +1 0 42 +1 1 43 + +//--- input.mlir +func.func @Sub(%arg0: !graphalg.mat<2 x 2 x i64>) -> !graphalg.mat<2 x 2 x i64> { + %0 = graphalg.apply %arg0 : !graphalg.mat<2 x 2 x i64> -> <2 x 2 x i64> { + ^bb0(%arg1: i64): + %1 = graphalg.const 42 : i64 + %2 = arith.subi %arg1, %1 : i64 + graphalg.apply.return %2 : i64 + } + return %0 : !graphalg.mat<2 x 2 x i64> +} + +//--- output.m +0 0 -1 : i64 +0 1 -42 : i64 +1 1 1 : i64 diff --git a/compiler/test/exec/sub-real.mlir b/compiler/test/exec/sub-real.mlir new file mode 100644 index 0000000..f068b4e --- /dev/null +++ b/compiler/test/exec/sub-real.mlir @@ -0,0 +1,23 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir Sub %t/input.m | diff - %t/output.m + +//--- input.m +0 0 41.0 +1 0 42.0 +1 1 43.0 + +//--- input.mlir +func.func @Sub(%arg0: !graphalg.mat<2 x 2 x f64>) -> !graphalg.mat<2 x 2 x f64> { + %0 = graphalg.apply %arg0 : !graphalg.mat<2 x 2 x f64> -> <2 x 2 x f64> { + ^bb0(%arg1: f64): + %1 = graphalg.const 42.0 : f64 + %2 = arith.subf %arg1, %1 : f64 + graphalg.apply.return %2 : f64 + } + return %0 : !graphalg.mat<2 x 2 x f64> +} + +//--- output.m +0 0 -1.000000e+00 : f64 +0 1 -4.200000e+01 : f64 +1 1 1.000000e+00 : f64 diff --git a/compiler/test/exec/transpose.mlir b/compiler/test/exec/transpose.mlir new file mode 100644 index 0000000..0592c2d --- /dev/null +++ b/compiler/test/exec/transpose.mlir @@ -0,0 +1,20 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir Transpose %t/input.m | diff - %t/output.m + +//--- input.m +0 0 1 +0 1 2 +1 0 3 +1 1 4 + +//--- input.mlir +func.func @Transpose(%arg0: !graphalg.mat<2 x 2 x i64>) -> !graphalg.mat<2 x 2 x i64> { + %0 = graphalg.transpose %arg0 : <2 x 2 x i64> + return %0 : !graphalg.mat<2 x 2 x i64> +} + +//--- output.m +0 0 1 : i64 +0 1 3 : i64 +1 0 2 : i64 +1 1 4 : i64 diff --git a/compiler/test/exec/tril.mlir b/compiler/test/exec/tril.mlir new file mode 100644 index 0000000..95ff8b4 --- /dev/null +++ b/compiler/test/exec/tril.mlir @@ -0,0 +1,17 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir Tril %t/input.m | diff - %t/output.m + +//--- input.m +0 0 1 +0 1 2 +1 0 3 +1 1 4 + +//--- input.mlir +func.func @Tril(%arg0: !graphalg.mat<2 x 2 x i64>) -> !graphalg.mat<2 x 2 x i64> { + %0 = graphalg.tril %arg0 : <2 x 2 x i64> + return %0 : !graphalg.mat<2 x 2 x i64> +} + +//--- output.m +1 0 3 : i64 diff --git a/compiler/test/exec/until.mlir b/compiler/test/exec/until.mlir new file mode 100644 index 0000000..7ad1e26 --- /dev/null +++ b/compiler/test/exec/until.mlir @@ -0,0 +1,31 @@ +// RUN: split-file %s %t +// RUN: graphalg-exec %t/input.mlir Fib | diff - %t/output.m + +//--- input.mlir +func.func @Fib() -> !graphalg.mat<1 x 1 x i64> { + %0 = graphalg.const_mat 0 : i64 -> <1 x 1 x i64> + %1 = graphalg.const_mat 1 : i64 -> <1 x 1 x i64> + %2 = graphalg.const_mat 1000000 : i64 -> <1 x 1 x i64> + %3:2 = graphalg.for_const range(%0, %2) : <1 x 1 x i64> init(%0, %1) : !graphalg.mat<1 x 1 x i64>, !graphalg.mat<1 x 1 x i64> -> !graphalg.mat<1 x 1 x i64>, !graphalg.mat<1 x 1 x i64> body { + ^bb0(%arg0: !graphalg.mat<1 x 1 x i64>, %arg1: !graphalg.mat<1 x 1 x i64>, %arg2: !graphalg.mat<1 x 1 x i64>): + %4 = graphalg.apply %arg1, %arg2 : !graphalg.mat<1 x 1 x i64>, !graphalg.mat<1 x 1 x i64> -> <1 x 1 x i64> { + ^bb0(%arg3: i64, %arg4: i64): + %5 = graphalg.add %arg3, %arg4 : i64 + graphalg.apply.return %5 : i64 + } + graphalg.yield %arg2, %4 : !graphalg.mat<1 x 1 x i64>, !graphalg.mat<1 x 1 x i64> + } until { + ^bb0(%arg0: !graphalg.mat<1 x 1 x i64>, %arg1: !graphalg.mat<1 x 1 x i64>, %arg2: !graphalg.mat<1 x 1 x i64>): + %4 = graphalg.apply %arg2 : !graphalg.mat<1 x 1 x i64> -> <1 x 1 x i1> { + ^bb0(%arg3: i64): + %5 = graphalg.const 34 : i64 + %6 = graphalg.eq %arg3, %5 : i64 + graphalg.apply.return %6 : i1 + } + graphalg.yield %4 : !graphalg.mat<1 x 1 x i1> + } + return %3#1 : !graphalg.mat<1 x 1 x i64> +} + +//--- output.m +0 0 34 : i64 diff --git a/compiler/test/lit.cfg.py b/compiler/test/lit.cfg.py index 3371ca1..0ad7b62 100644 --- a/compiler/test/lit.cfg.py +++ b/compiler/test/lit.cfg.py @@ -14,9 +14,11 @@ # The tools we want to use in lit test (inside RUN) tools = [ + "graphalg-exec", "graphalg-opt", "graphalg-translate", "mlir-opt", + "split-file", ] # Where we look for the tools