From ab84213157416a99982116180fa31f4074377f95 Mon Sep 17 00:00:00 2001 From: Rahul Joshi Date: Fri, 31 Jan 2025 13:53:21 -0800 Subject: [PATCH] [TableGen] Emit OpName as an enum class instead of a namespace - Change InstrInfoEmitter to emit OpName as an enum class instead of an anonymous enum in the OpName namespace. - This will help clearly distinguish between vales that are OpNames vs just operand indices and should help avoid bugs due to confusion between the two. - Also updated AMDGPU, RISCV, and WebAssembly backends to conform to the new definition of OpName (mostly mechanical changes). --- llvm/docs/WritingAnLLVMBackend.rst | 14 +- .../AMDGPU/AsmParser/AMDGPUAsmParser.cpp | 60 ++++----- .../Disassembler/AMDGPUDisassembler.cpp | 40 +++--- llvm/lib/Target/AMDGPU/GCNDPPCombine.cpp | 6 +- .../lib/Target/AMDGPU/GCNHazardRecognizer.cpp | 2 +- .../AMDGPU/MCA/AMDGPUCustomBehaviour.cpp | 2 +- .../Target/AMDGPU/MCA/AMDGPUCustomBehaviour.h | 3 +- .../AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp | 4 +- .../MCTargetDesc/AMDGPUMCCodeEmitter.cpp | 9 +- .../AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h | 1 - .../AMDGPU/MCTargetDesc/R600MCTargetDesc.h | 1 - .../Target/AMDGPU/R600ExpandSpecialInstrs.cpp | 5 +- llvm/lib/Target/AMDGPU/R600InstrInfo.cpp | 125 +++++++++--------- llvm/lib/Target/AMDGPU/R600InstrInfo.h | 17 +-- llvm/lib/Target/AMDGPU/R600Packetizer.cpp | 9 +- llvm/lib/Target/AMDGPU/SIFoldOperands.cpp | 12 +- llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp | 2 +- llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 40 +++--- llvm/lib/Target/AMDGPU/SIInstrInfo.h | 23 ++-- .../Target/AMDGPU/SILoadStoreOptimizer.cpp | 20 +-- .../Target/AMDGPU/SIOptimizeExecMasking.cpp | 2 +- llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h | 9 +- llvm/lib/Target/RISCV/RISCVInstrInfo.h | 3 - .../Target/WebAssembly/WebAssemblyInstrInfo.h | 6 - llvm/test/TableGen/get-named-operand-idx.td | 85 ++++++++++++ llvm/utils/TableGen/InstrInfoEmitter.cpp | 18 ++- 26 files changed, 296 insertions(+), 222 deletions(-) create mode 100644 llvm/test/TableGen/get-named-operand-idx.td diff --git a/llvm/docs/WritingAnLLVMBackend.rst b/llvm/docs/WritingAnLLVMBackend.rst index 1b9173b1fe139..3c5d594cc605e 100644 --- a/llvm/docs/WritingAnLLVMBackend.rst +++ b/llvm/docs/WritingAnLLVMBackend.rst @@ -954,8 +954,8 @@ Instruction Operand Name Mapping TableGen will also generate a function called getNamedOperandIdx() which can be used to look up an operand's index in a MachineInstr based on its TableGen name. Setting the UseNamedOperandTable bit in an instruction's -TableGen definition will add all of its operands to an enumeration in the -llvm::XXX:OpName namespace and also add an entry for it into the OperandMap +TableGen definition will add all of its operands to an enumeration +llvm::XXX:OpName and also add an entry for it into the OperandMap table, which can be queried using getNamedOperandIdx() .. code-block:: text @@ -978,20 +978,18 @@ XXXInstrInfo.cpp: .. code-block:: c++ - #define GET_INSTRINFO_NAMED_OPS // For getNamedOperandIdx() function + // For getNamedOperandIdx() function definition. + #define GET_INSTRINFO_NAMED_OPS #include "XXXGenInstrInfo.inc" XXXInstrInfo.h: .. code-block:: c++ - #define GET_INSTRINFO_OPERAND_ENUM // For OpName enum + // For OpName enum and getNamedOperandIdx declaration. + #define GET_INSTRINFO_OPERAND_ENUM #include "XXXGenInstrInfo.inc" - namespace XXX { - int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex); - } // End namespace XXX - Instruction Operand Types ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp index 4ff9cff09f31d..54ed3789326cb 100644 --- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -1783,7 +1783,7 @@ class AMDGPUAsmParser : public MCTargetAsmParser { bool validateMIMGMSAA(const MCInst &Inst); bool validateOpSel(const MCInst &Inst); bool validateTrue16OpSel(const MCInst &Inst); - bool validateNeg(const MCInst &Inst, int OpName); + bool validateNeg(const MCInst &Inst, AMDGPU::OpName OpName); bool validateDPP(const MCInst &Inst, const OperandVector &Operands); bool validateVccOperand(MCRegister Reg) const; bool validateVOPLiteral(const MCInst &Inst, const OperandVector &Operands); @@ -3959,8 +3959,9 @@ bool AMDGPUAsmParser::validateMIMGAddrSize(const MCInst &Inst, const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); - int RSrcOpName = (Desc.TSFlags & SIInstrFlags::MIMG) ? AMDGPU::OpName::srsrc - : AMDGPU::OpName::rsrc; + AMDGPU::OpName RSrcOpName = (Desc.TSFlags & SIInstrFlags::MIMG) + ? AMDGPU::OpName::srsrc + : AMDGPU::OpName::rsrc; int SrsrcIdx = AMDGPU::getNamedOperandIdx(Opc, RSrcOpName); int DimIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dim); int A16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::a16); @@ -4671,8 +4672,8 @@ bool AMDGPUAsmParser::validateTrue16OpSel(const MCInst &Inst) { if (OpSelOpValue == 0) return true; unsigned OpCount = 0; - for (int OpName : {AMDGPU::OpName::src0, AMDGPU::OpName::src1, - AMDGPU::OpName::src2, AMDGPU::OpName::vdst}) { + for (AMDGPU::OpName OpName : {AMDGPU::OpName::src0, AMDGPU::OpName::src1, + AMDGPU::OpName::src2, AMDGPU::OpName::vdst}) { int OpIdx = AMDGPU::getNamedOperandIdx(Inst.getOpcode(), OpName); if (OpIdx == -1) continue; @@ -4690,7 +4691,7 @@ bool AMDGPUAsmParser::validateTrue16OpSel(const MCInst &Inst) { return true; } -bool AMDGPUAsmParser::validateNeg(const MCInst &Inst, int OpName) { +bool AMDGPUAsmParser::validateNeg(const MCInst &Inst, AMDGPU::OpName OpName) { assert(OpName == AMDGPU::OpName::neg_lo || OpName == AMDGPU::OpName::neg_hi); const unsigned Opc = Inst.getOpcode(); @@ -4715,9 +4716,9 @@ bool AMDGPUAsmParser::validateNeg(const MCInst &Inst, int OpName) { // It is convenient that such instructions don't have src_modifiers operand // for src operands that don't allow neg because they also don't allow opsel. - int SrcMods[3] = {AMDGPU::OpName::src0_modifiers, - AMDGPU::OpName::src1_modifiers, - AMDGPU::OpName::src2_modifiers}; + const AMDGPU::OpName SrcMods[3] = {AMDGPU::OpName::src0_modifiers, + AMDGPU::OpName::src1_modifiers, + AMDGPU::OpName::src2_modifiers}; for (unsigned i = 0; i < 3; ++i) { if (!AMDGPU::hasNamedOperand(Opc, SrcMods[i])) { @@ -4844,9 +4845,9 @@ bool AMDGPUAsmParser::validateVOPLiteral(const MCInst &Inst, } // Returns -1 if not a register, 0 if VGPR and 1 if AGPR. -static int IsAGPROperand(const MCInst &Inst, uint16_t NameIdx, +static int IsAGPROperand(const MCInst &Inst, AMDGPU::OpName Name, const MCRegisterInfo *MRI) { - int OpIdx = AMDGPU::getNamedOperandIdx(Inst.getOpcode(), NameIdx); + int OpIdx = AMDGPU::getNamedOperandIdx(Inst.getOpcode(), Name); if (OpIdx < 0) return -1; @@ -4867,12 +4868,13 @@ bool AMDGPUAsmParser::validateAGPRLdSt(const MCInst &Inst) const { SIInstrFlags::DS)) == 0) return true; - uint16_t DataNameIdx = (TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0 - : AMDGPU::OpName::vdata; + AMDGPU::OpName DataName = (TSFlags & SIInstrFlags::DS) + ? AMDGPU::OpName::data0 + : AMDGPU::OpName::vdata; const MCRegisterInfo *MRI = getMRI(); int DstAreg = IsAGPROperand(Inst, AMDGPU::OpName::vdst, MRI); - int DataAreg = IsAGPROperand(Inst, DataNameIdx, MRI); + int DataAreg = IsAGPROperand(Inst, DataName, MRI); if ((TSFlags & SIInstrFlags::DS) && DataAreg >= 0) { int Data2Areg = IsAGPROperand(Inst, AMDGPU::OpName::data1, MRI); @@ -8703,9 +8705,8 @@ static void cvtVOP3DstOpSelOnly(MCInst &Inst, const MCRegisterInfo &MRI) { return; int SrcNum; - const int Ops[] = { AMDGPU::OpName::src0, - AMDGPU::OpName::src1, - AMDGPU::OpName::src2 }; + const AMDGPU::OpName Ops[] = {AMDGPU::OpName::src0, AMDGPU::OpName::src1, + AMDGPU::OpName::src2}; for (SrcNum = 0; SrcNum < 3 && AMDGPU::hasNamedOperand(Opc, Ops[SrcNum]); ++SrcNum) ; @@ -8827,12 +8828,11 @@ void AMDGPUAsmParser::cvtVINTERP(MCInst &Inst, const OperandVector &Operands) if (OpSelIdx == -1) return; - const int Ops[] = { AMDGPU::OpName::src0, - AMDGPU::OpName::src1, - AMDGPU::OpName::src2 }; - const int ModOps[] = { AMDGPU::OpName::src0_modifiers, - AMDGPU::OpName::src1_modifiers, - AMDGPU::OpName::src2_modifiers }; + const AMDGPU::OpName Ops[] = {AMDGPU::OpName::src0, AMDGPU::OpName::src1, + AMDGPU::OpName::src2}; + const AMDGPU::OpName ModOps[] = {AMDGPU::OpName::src0_modifiers, + AMDGPU::OpName::src1_modifiers, + AMDGPU::OpName::src2_modifiers}; unsigned OpSel = Inst.getOperand(OpSelIdx).getImm(); @@ -8968,12 +8968,11 @@ void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst, const OperandVector &Operands, if (NegHiIdx != -1) addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegHi); - const int Ops[] = { AMDGPU::OpName::src0, - AMDGPU::OpName::src1, - AMDGPU::OpName::src2 }; - const int ModOps[] = { AMDGPU::OpName::src0_modifiers, - AMDGPU::OpName::src1_modifiers, - AMDGPU::OpName::src2_modifiers }; + const AMDGPU::OpName Ops[] = {AMDGPU::OpName::src0, AMDGPU::OpName::src1, + AMDGPU::OpName::src2}; + const AMDGPU::OpName ModOps[] = {AMDGPU::OpName::src0_modifiers, + AMDGPU::OpName::src1_modifiers, + AMDGPU::OpName::src2_modifiers}; unsigned OpSel = 0; unsigned OpSelHi = 0; @@ -9036,7 +9035,8 @@ void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst, const OperandVector &Operands) { } static void addSrcModifiersAndSrc(MCInst &Inst, const OperandVector &Operands, - unsigned i, unsigned Opc, unsigned OpName) { + unsigned i, unsigned Opc, + AMDGPU::OpName OpName) { if (AMDGPU::getNamedOperandIdx(Opc, OpName) != -1) ((AMDGPUOperand &)*Operands[i]).addRegOrImmWithFPInputModsOperands(Inst, 2); else diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp index 02ad08740049d..308ab8e3b82c4 100644 --- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp +++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp @@ -75,8 +75,8 @@ addOperand(MCInst &Inst, const MCOperand& Opnd) { } static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op, - uint16_t NameIdx) { - int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx); + AMDGPU::OpName Name) { + int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), Name); if (OpIdx != -1) { auto *I = MI.begin(); std::advance(I, OpIdx); @@ -423,10 +423,11 @@ static DecodeStatus decodeAVLdSt(MCInst &Inst, unsigned Imm, // are also tied. unsigned Opc = Inst.getOpcode(); uint64_t TSFlags = DAsm->getMCII()->get(Opc).TSFlags; - uint16_t DataNameIdx = (TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0 - : AMDGPU::OpName::vdata; + AMDGPU::OpName DataName = (TSFlags & SIInstrFlags::DS) + ? AMDGPU::OpName::data0 + : AMDGPU::OpName::vdata; const MCRegisterInfo *MRI = DAsm->getContext().getRegisterInfo(); - int DataIdx = AMDGPU::getNamedOperandIdx(Opc, DataNameIdx); + int DataIdx = AMDGPU::getNamedOperandIdx(Opc, DataName); if ((int)Inst.getNumOperands() == DataIdx) { int DstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); if (IsAGPROperand(Inst, DstIdx, MRI)) @@ -922,9 +923,9 @@ static VOPModifiers collectVOPModifiers(const MCInst &MI, bool IsVOP3P = false) { VOPModifiers Modifiers; unsigned Opc = MI.getOpcode(); - const int ModOps[] = {AMDGPU::OpName::src0_modifiers, - AMDGPU::OpName::src1_modifiers, - AMDGPU::OpName::src2_modifiers}; + const AMDGPU::OpName ModOps[] = {AMDGPU::OpName::src0_modifiers, + AMDGPU::OpName::src1_modifiers, + AMDGPU::OpName::src2_modifiers}; for (int J = 0; J < 3; ++J) { int OpIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]); if (OpIdx == -1) @@ -951,15 +952,15 @@ void AMDGPUDisassembler::convertTrue16OpSel(MCInst &MI) const { const unsigned Opc = MI.getOpcode(); const MCRegisterClass &ConversionRC = MRI.getRegClass(AMDGPU::VGPR_16RegClassID); - constexpr std::array, 4> OpAndOpMods = { - {{AMDGPU::OpName::src0, AMDGPU::OpName::src0_modifiers, - SISrcMods::OP_SEL_0}, - {AMDGPU::OpName::src1, AMDGPU::OpName::src1_modifiers, - SISrcMods::OP_SEL_0}, - {AMDGPU::OpName::src2, AMDGPU::OpName::src2_modifiers, - SISrcMods::OP_SEL_0}, - {AMDGPU::OpName::vdst, AMDGPU::OpName::src0_modifiers, - SISrcMods::DST_OP_SEL}}}; + constexpr std::array, 4> + OpAndOpMods = {{{AMDGPU::OpName::src0, AMDGPU::OpName::src0_modifiers, + SISrcMods::OP_SEL_0}, + {AMDGPU::OpName::src1, AMDGPU::OpName::src1_modifiers, + SISrcMods::OP_SEL_0}, + {AMDGPU::OpName::src2, AMDGPU::OpName::src2_modifiers, + SISrcMods::OP_SEL_0}, + {AMDGPU::OpName::vdst, AMDGPU::OpName::src0_modifiers, + SISrcMods::DST_OP_SEL}}}; for (const auto &[OpName, OpModsName, OpSelMask] : OpAndOpMods) { int OpIdx = AMDGPU::getNamedOperandIdx(Opc, OpName); int OpModsIdx = AMDGPU::getNamedOperandIdx(Opc, OpModsName); @@ -1069,8 +1070,9 @@ void AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const { AMDGPU::OpName::vdata); int VAddr0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0); - int RsrcOpName = (TSFlags & SIInstrFlags::MIMG) ? AMDGPU::OpName::srsrc - : AMDGPU::OpName::rsrc; + AMDGPU::OpName RsrcOpName = (TSFlags & SIInstrFlags::MIMG) + ? AMDGPU::OpName::srsrc + : AMDGPU::OpName::rsrc; int RsrcIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), RsrcOpName); int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dmask); diff --git a/llvm/lib/Target/AMDGPU/GCNDPPCombine.cpp b/llvm/lib/Target/AMDGPU/GCNDPPCombine.cpp index cc802b5fbb67c..b22babb4a00d8 100644 --- a/llvm/lib/Target/AMDGPU/GCNDPPCombine.cpp +++ b/llvm/lib/Target/AMDGPU/GCNDPPCombine.cpp @@ -70,9 +70,7 @@ class GCNDPPCombine { RegSubRegPair CombOldVGPR, bool CombBCZ, bool IsShrinkable) const; - bool hasNoImmOrEqual(MachineInstr &MI, - unsigned OpndName, - int64_t Value, + bool hasNoImmOrEqual(MachineInstr &MI, AMDGPU::OpName OpndName, int64_t Value, int64_t Mask = -1) const; bool combineDPPMov(MachineInstr &MI) const; @@ -513,7 +511,7 @@ MachineInstr *GCNDPPCombine::createDPPInst( // returns true if MI doesn't have OpndName immediate operand or the // operand has Value -bool GCNDPPCombine::hasNoImmOrEqual(MachineInstr &MI, unsigned OpndName, +bool GCNDPPCombine::hasNoImmOrEqual(MachineInstr &MI, AMDGPU::OpName OpndName, int64_t Value, int64_t Mask) const { auto *Imm = TII->getNamedOperand(MI, OpndName); if (!Imm) diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp index b0f087737afa7..827598078af53 100644 --- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -1310,7 +1310,7 @@ bool GCNHazardRecognizer::fixSMEMtoVectorWriteHazards(MachineInstr *MI) { if (!SIInstrInfo::isVALU(*MI)) return false; - unsigned SDSTName; + AMDGPU::OpName SDSTName; switch (MI->getOpcode()) { case AMDGPU::V_READLANE_B32: case AMDGPU::V_READFIRSTLANE_B32: diff --git a/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.cpp b/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.cpp index ed9c48ff9c4de..d0043bcc920b6 100644 --- a/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.cpp +++ b/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.cpp @@ -308,7 +308,7 @@ bool AMDGPUCustomBehaviour::isVMEM(const MCInstrDesc &MCID) { // taken from SIInstrInfo::hasModifiersSet() bool AMDGPUCustomBehaviour::hasModifiersSet( - const std::unique_ptr &Inst, unsigned OpName) const { + const std::unique_ptr &Inst, AMDGPU::OpName OpName) const { int Idx = AMDGPU::getNamedOperandIdx(Inst->getOpcode(), OpName); if (Idx == -1) return false; diff --git a/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.h b/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.h index 3a231758887ba..85b9c188b5d1a 100644 --- a/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.h +++ b/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.h @@ -17,6 +17,7 @@ #ifndef LLVM_LIB_TARGET_AMDGPU_MCA_AMDGPUCUSTOMBEHAVIOUR_H #define LLVM_LIB_TARGET_AMDGPU_MCA_AMDGPUCUSTOMBEHAVIOUR_H +#include "Utils/AMDGPUBaseInfo.h" #include "llvm/ADT/SmallVector.h" #include "llvm/MCA/CustomBehaviour.h" #include "llvm/TargetParser/TargetParser.h" @@ -66,7 +67,7 @@ class AMDGPUCustomBehaviour : public CustomBehaviour { void generateWaitCntInfo(); /// Helper function used in generateWaitCntInfo() bool hasModifiersSet(const std::unique_ptr &Inst, - unsigned OpName) const; + AMDGPU::OpName OpName) const; /// Helper function used in generateWaitCntInfo() bool isGWS(uint16_t Opcode) const; /// Helper function used in generateWaitCntInfo() diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp index c389f3a13d952..381841f142855 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp @@ -1205,7 +1205,7 @@ void AMDGPUInstPrinter::printPackedModifier(const MCInst *MI, int NumOps = 0; int Ops[3]; - std::pair MOps[] = { + std::pair MOps[] = { {AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src0}, {AMDGPU::OpName::src1_modifiers, AMDGPU::OpName::src1}, {AMDGPU::OpName::src2_modifiers, AMDGPU::OpName::src2}}; @@ -1226,7 +1226,7 @@ void AMDGPUInstPrinter::printPackedModifier(const MCInst *MI, MII.get(MI->getOpcode()).TSFlags & SIInstrFlags::IsWMMA) { NumOps = 0; int DefaultValue = Mod == SISrcMods::OP_SEL_1; - for (int OpName : + for (AMDGPU::OpName OpName : {AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src1_modifiers, AMDGPU::OpName::src2_modifiers}) { int Idx = AMDGPU::getNamedOperandIdx(Opc, OpName); diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp index b9a424bb1d059..1391ef6dd09e5 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp @@ -340,14 +340,13 @@ AMDGPUMCCodeEmitter::getLitEncoding(const MCOperand &MO, uint64_t AMDGPUMCCodeEmitter::getImplicitOpSelHiEncoding(int Opcode) const { using namespace AMDGPU::VOP3PEncoding; - using namespace AMDGPU::OpName; - if (AMDGPU::hasNamedOperand(Opcode, op_sel_hi)) { - if (AMDGPU::hasNamedOperand(Opcode, src2)) + if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::op_sel_hi)) { + if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src2)) return 0; - if (AMDGPU::hasNamedOperand(Opcode, src1)) + if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src1)) return OP_SEL_HI_2; - if (AMDGPU::hasNamedOperand(Opcode, src0)) + if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src0)) return OP_SEL_HI_1 | OP_SEL_HI_2; } return OP_SEL_HI_0 | OP_SEL_HI_1 | OP_SEL_HI_2; diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h index 879dbe1b279b1..9c0b2da0fcb0a 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h @@ -50,7 +50,6 @@ createAMDGPUELFObjectWriter(bool Is64Bit, uint8_t OSABI, #include "AMDGPUGenRegisterInfo.inc" #define GET_INSTRINFO_ENUM -#define GET_INSTRINFO_OPERAND_ENUM #define GET_INSTRINFO_MC_HELPER_DECLS #include "AMDGPUGenInstrInfo.inc" diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/R600MCTargetDesc.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/R600MCTargetDesc.h index cf40e7eccb5d2..20f2cb826ac4b 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/R600MCTargetDesc.h +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/R600MCTargetDesc.h @@ -32,7 +32,6 @@ MCInstrInfo *createR600MCInstrInfo(); #include "R600GenRegisterInfo.inc" #define GET_INSTRINFO_ENUM -#define GET_INSTRINFO_OPERAND_ENUM #define GET_INSTRINFO_SCHED_ENUM #define GET_INSTRINFO_MC_HELPER_DECLS #include "R600GenInstrInfo.inc" diff --git a/llvm/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp b/llvm/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp index ef2d049f91752..429ce0e0857ac 100644 --- a/llvm/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp +++ b/llvm/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp @@ -31,7 +31,7 @@ class R600ExpandSpecialInstrsPass : public MachineFunctionPass { const R600InstrInfo *TII = nullptr; void SetFlagInNewMI(MachineInstr *NewMI, const MachineInstr *OldMI, - unsigned Op); + R600::OpName Op); public: static char ID; @@ -61,7 +61,8 @@ FunctionPass *llvm::createR600ExpandSpecialInstrsPass() { } void R600ExpandSpecialInstrsPass::SetFlagInNewMI(MachineInstr *NewMI, - const MachineInstr *OldMI, unsigned Op) { + const MachineInstr *OldMI, + R600::OpName Op) { int OpIdx = TII->getOperandIdx(*OldMI, Op); if (OpIdx > -1) { uint64_t Val = OldMI->getOperand(OpIdx).getImm(); diff --git a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp index f95649db2942e..1c4a992c87271 100644 --- a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp @@ -222,19 +222,18 @@ bool R600InstrInfo::readsLDSSrcReg(const MachineInstr &MI) const { } int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const { - static const unsigned SrcSelTable[][2] = { - {R600::OpName::src0, R600::OpName::src0_sel}, - {R600::OpName::src1, R600::OpName::src1_sel}, - {R600::OpName::src2, R600::OpName::src2_sel}, - {R600::OpName::src0_X, R600::OpName::src0_sel_X}, - {R600::OpName::src0_Y, R600::OpName::src0_sel_Y}, - {R600::OpName::src0_Z, R600::OpName::src0_sel_Z}, - {R600::OpName::src0_W, R600::OpName::src0_sel_W}, - {R600::OpName::src1_X, R600::OpName::src1_sel_X}, - {R600::OpName::src1_Y, R600::OpName::src1_sel_Y}, - {R600::OpName::src1_Z, R600::OpName::src1_sel_Z}, - {R600::OpName::src1_W, R600::OpName::src1_sel_W} - }; + static const R600::OpName SrcSelTable[][2] = { + {R600::OpName::src0, R600::OpName::src0_sel}, + {R600::OpName::src1, R600::OpName::src1_sel}, + {R600::OpName::src2, R600::OpName::src2_sel}, + {R600::OpName::src0_X, R600::OpName::src0_sel_X}, + {R600::OpName::src0_Y, R600::OpName::src0_sel_Y}, + {R600::OpName::src0_Z, R600::OpName::src0_sel_Z}, + {R600::OpName::src0_W, R600::OpName::src0_sel_W}, + {R600::OpName::src1_X, R600::OpName::src1_sel_X}, + {R600::OpName::src1_Y, R600::OpName::src1_sel_Y}, + {R600::OpName::src1_Z, R600::OpName::src1_sel_Z}, + {R600::OpName::src1_W, R600::OpName::src1_sel_W}}; for (const auto &Row : SrcSelTable) { if (getOperandIdx(Opcode, Row[0]) == (int)SrcIdx) { @@ -249,15 +248,15 @@ R600InstrInfo::getSrcs(MachineInstr &MI) const { SmallVector, 3> Result; if (MI.getOpcode() == R600::DOT_4) { - static const unsigned OpTable[8][2] = { - {R600::OpName::src0_X, R600::OpName::src0_sel_X}, - {R600::OpName::src0_Y, R600::OpName::src0_sel_Y}, - {R600::OpName::src0_Z, R600::OpName::src0_sel_Z}, - {R600::OpName::src0_W, R600::OpName::src0_sel_W}, - {R600::OpName::src1_X, R600::OpName::src1_sel_X}, - {R600::OpName::src1_Y, R600::OpName::src1_sel_Y}, - {R600::OpName::src1_Z, R600::OpName::src1_sel_Z}, - {R600::OpName::src1_W, R600::OpName::src1_sel_W}, + static const R600::OpName OpTable[8][2] = { + {R600::OpName::src0_X, R600::OpName::src0_sel_X}, + {R600::OpName::src0_Y, R600::OpName::src0_sel_Y}, + {R600::OpName::src0_Z, R600::OpName::src0_sel_Z}, + {R600::OpName::src0_W, R600::OpName::src0_sel_W}, + {R600::OpName::src1_X, R600::OpName::src1_sel_X}, + {R600::OpName::src1_Y, R600::OpName::src1_sel_Y}, + {R600::OpName::src1_Z, R600::OpName::src1_sel_Z}, + {R600::OpName::src1_W, R600::OpName::src1_sel_W}, }; for (const auto &Op : OpTable) { @@ -273,10 +272,10 @@ R600InstrInfo::getSrcs(MachineInstr &MI) const { return Result; } - static const unsigned OpTable[3][2] = { - {R600::OpName::src0, R600::OpName::src0_sel}, - {R600::OpName::src1, R600::OpName::src1_sel}, - {R600::OpName::src2, R600::OpName::src2_sel}, + static const R600::OpName OpTable[3][2] = { + {R600::OpName::src0, R600::OpName::src0_sel}, + {R600::OpName::src1, R600::OpName::src1_sel}, + {R600::OpName::src2, R600::OpName::src2_sel}, }; for (const auto &Op : OpTable) { @@ -1238,19 +1237,14 @@ MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MB return MIB; } -#define OPERAND_CASE(Label) \ - case Label: { \ - static const unsigned Ops[] = \ - { \ - Label##_X, \ - Label##_Y, \ - Label##_Z, \ - Label##_W \ - }; \ - return Ops[Slot]; \ +#define OPERAND_CASE(Label) \ + case Label: { \ + static const R600::OpName Ops[] = {Label##_X, Label##_Y, Label##_Z, \ + Label##_W}; \ + return Ops[Slot]; \ } -static unsigned getSlotedOps(unsigned Op, unsigned Slot) { +static R600::OpName getSlotedOps(R600::OpName Op, unsigned Slot) { switch (Op) { OPERAND_CASE(R600::OpName::update_exec_mask) OPERAND_CASE(R600::OpName::update_pred) @@ -1292,21 +1286,21 @@ MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction( getOperandIdx(MI->getOpcode(), getSlotedOps(R600::OpName::src1, Slot))); MachineInstr *MIB = buildDefaultInstruction( MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg()); - static const unsigned Operands[14] = { - R600::OpName::update_exec_mask, - R600::OpName::update_pred, - R600::OpName::write, - R600::OpName::omod, - R600::OpName::dst_rel, - R600::OpName::clamp, - R600::OpName::src0_neg, - R600::OpName::src0_rel, - R600::OpName::src0_abs, - R600::OpName::src0_sel, - R600::OpName::src1_neg, - R600::OpName::src1_rel, - R600::OpName::src1_abs, - R600::OpName::src1_sel, + static const R600::OpName Operands[14] = { + R600::OpName::update_exec_mask, + R600::OpName::update_pred, + R600::OpName::write, + R600::OpName::omod, + R600::OpName::dst_rel, + R600::OpName::clamp, + R600::OpName::src0_neg, + R600::OpName::src0_rel, + R600::OpName::src0_abs, + R600::OpName::src0_sel, + R600::OpName::src1_neg, + R600::OpName::src1_rel, + R600::OpName::src1_abs, + R600::OpName::src1_sel, }; MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(), @@ -1314,7 +1308,7 @@ MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction( MIB->getOperand(getOperandIdx(Opcode, R600::OpName::pred_sel)) .setReg(MO.getReg()); - for (unsigned Operand : Operands) { + for (R600::OpName Operand : Operands) { MachineOperand &MO = MI->getOperand( getOperandIdx(MI->getOpcode(), getSlotedOps(Operand, Slot))); assert (MO.isImm()); @@ -1340,15 +1334,16 @@ MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB, return buildDefaultInstruction(*MBB, I, R600::MOV, DstReg, SrcReg); } -int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const { +int R600InstrInfo::getOperandIdx(const MachineInstr &MI, + R600::OpName Op) const { return getOperandIdx(MI.getOpcode(), Op); } -int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const { +int R600InstrInfo::getOperandIdx(unsigned Opcode, R600::OpName Op) const { return R600::getNamedOperandIdx(Opcode, Op); } -void R600InstrInfo::setImmOperand(MachineInstr &MI, unsigned Op, +void R600InstrInfo::setImmOperand(MachineInstr &MI, R600::OpName Op, int64_t Imm) const { int Idx = getOperandIdx(MI, Op); assert(Idx != -1 && "Operand not supported for this instruction."); @@ -1425,37 +1420,37 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr &MI, unsigned SrcIdx, return FlagOp; } -void R600InstrInfo::addFlag(MachineInstr &MI, unsigned Operand, +void R600InstrInfo::addFlag(MachineInstr &MI, unsigned SrcIdx, unsigned Flag) const { unsigned TargetFlags = get(MI.getOpcode()).TSFlags; if (Flag == 0) { return; } if (HAS_NATIVE_OPERANDS(TargetFlags)) { - MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag); + MachineOperand &FlagOp = getFlagOp(MI, SrcIdx, Flag); if (Flag == MO_FLAG_NOT_LAST) { - clearFlag(MI, Operand, MO_FLAG_LAST); + clearFlag(MI, SrcIdx, MO_FLAG_LAST); } else if (Flag == MO_FLAG_MASK) { - clearFlag(MI, Operand, Flag); + clearFlag(MI, SrcIdx, Flag); } else { FlagOp.setImm(1); } } else { - MachineOperand &FlagOp = getFlagOp(MI, Operand); - FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand))); + MachineOperand &FlagOp = getFlagOp(MI, SrcIdx); + FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * SrcIdx))); } } -void R600InstrInfo::clearFlag(MachineInstr &MI, unsigned Operand, +void R600InstrInfo::clearFlag(MachineInstr &MI, unsigned SrcIdx, unsigned Flag) const { unsigned TargetFlags = get(MI.getOpcode()).TSFlags; if (HAS_NATIVE_OPERANDS(TargetFlags)) { - MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag); + MachineOperand &FlagOp = getFlagOp(MI, SrcIdx, Flag); FlagOp.setImm(0); } else { MachineOperand &FlagOp = getFlagOp(MI); unsigned InstFlags = FlagOp.getImm(); - InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand)); + InstFlags &= ~(Flag << (NUM_MO_FLAGS * SrcIdx)); FlagOp.setImm(InstFlags); } } diff --git a/llvm/lib/Target/AMDGPU/R600InstrInfo.h b/llvm/lib/Target/AMDGPU/R600InstrInfo.h index c767ecb24590b..a403c65141fed 100644 --- a/llvm/lib/Target/AMDGPU/R600InstrInfo.h +++ b/llvm/lib/Target/AMDGPU/R600InstrInfo.h @@ -18,6 +18,7 @@ #include "llvm/CodeGen/TargetInstrInfo.h" #define GET_INSTRINFO_HEADER +#define GET_INSTRINFO_OPERAND_ENUM #include "R600GenInstrInfo.inc" namespace llvm { @@ -287,21 +288,21 @@ class R600InstrInfo final : public R600GenInstrInfo { /// Get the index of Op in the MachineInstr. /// /// \returns -1 if the Instruction does not contain the specified \p Op. - int getOperandIdx(const MachineInstr &MI, unsigned Op) const; + int getOperandIdx(const MachineInstr &MI, R600::OpName Op) const; /// Get the index of \p Op for the given Opcode. /// /// \returns -1 if the Instruction does not contain the specified \p Op. - int getOperandIdx(unsigned Opcode, unsigned Op) const; + int getOperandIdx(unsigned Opcode, R600::OpName Op) const; /// Helper function for setting instruction flag values. - void setImmOperand(MachineInstr &MI, unsigned Op, int64_t Imm) const; + void setImmOperand(MachineInstr &MI, R600::OpName Op, int64_t Imm) const; - ///Add one of the MO_FLAG* flags to the specified \p Operand. - void addFlag(MachineInstr &MI, unsigned Operand, unsigned Flag) const; + /// Add one of the MO_FLAG* flags to the operand at \p SrcIdx. + void addFlag(MachineInstr &MI, unsigned SrcIdx, unsigned Flag) const; - ///Determine if the specified \p Flag is set on this \p Operand. - bool isFlagSet(const MachineInstr &MI, unsigned Operand, unsigned Flag) const; + /// Determine if the specified \p Flag is set on operand at \p SrcIdx. + bool isFlagSet(const MachineInstr &MI, unsigned SrcIdx, unsigned Flag) const; /// \param SrcIdx The register source to set the flag on (e.g src0, src1, src2) /// \param Flag The flag being set. @@ -311,7 +312,7 @@ class R600InstrInfo final : public R600GenInstrInfo { unsigned Flag = 0) const; /// Clear the specified flag on the instruction. - void clearFlag(MachineInstr &MI, unsigned Operand, unsigned Flag) const; + void clearFlag(MachineInstr &MI, unsigned SrcIdx, unsigned Flag) const; // Helper functions that check the opcode for status information bool isRegisterStore(const MachineInstr &MI) const { diff --git a/llvm/lib/Target/AMDGPU/R600Packetizer.cpp b/llvm/lib/Target/AMDGPU/R600Packetizer.cpp index 28bf6e33384d2..c1ed176ed29d2 100644 --- a/llvm/lib/Target/AMDGPU/R600Packetizer.cpp +++ b/llvm/lib/Target/AMDGPU/R600Packetizer.cpp @@ -122,12 +122,9 @@ class R600PacketizerList : public VLIWPacketizerList { void substitutePV(MachineInstr &MI, const DenseMap &PVs) const { - unsigned Ops[] = { - R600::OpName::src0, - R600::OpName::src1, - R600::OpName::src2 - }; - for (unsigned Op : Ops) { + const R600::OpName Ops[] = {R600::OpName::src0, R600::OpName::src1, + R600::OpName::src2}; + for (R600::OpName Op : Ops) { int OperandIdx = TII->getOperandIdx(MI.getOpcode(), Op); if (OperandIdx < 0) continue; diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp index 2bc19137b1ca0..d8f3f9c54abc1 100644 --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -369,20 +369,20 @@ bool SIFoldOperandsImpl::tryFoldImmWithOpSel(FoldCandidate &Fold) const { // Refer to op_sel/op_sel_hi and check if we can change the immediate and // op_sel in a way that allows an inline constant. - int ModIdx = -1; + AMDGPU::OpName ModName = AMDGPU::OpName::NUM_OPERAND_NAMES; unsigned SrcIdx = ~0; if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0)) { - ModIdx = AMDGPU::OpName::src0_modifiers; + ModName = AMDGPU::OpName::src0_modifiers; SrcIdx = 0; } else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1)) { - ModIdx = AMDGPU::OpName::src1_modifiers; + ModName = AMDGPU::OpName::src1_modifiers; SrcIdx = 1; } else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2)) { - ModIdx = AMDGPU::OpName::src2_modifiers; + ModName = AMDGPU::OpName::src2_modifiers; SrcIdx = 2; } - assert(ModIdx != -1); - ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx); + assert(ModName != AMDGPU::OpName::NUM_OPERAND_NAMES); + int ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModName); MachineOperand &Mod = MI->getOperand(ModIdx); unsigned ModVal = Mod.getImm(); diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp index 3d6419778f4b1..ee263f58bcaf2 100644 --- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp @@ -1139,7 +1139,7 @@ FunctionPass *llvm::createSIInsertWaitcntsPass() { return new SIInsertWaitcnts(); } -static bool updateOperandIfDifferent(MachineInstr &MI, uint16_t OpName, +static bool updateOperandIfDifferent(MachineInstr &MI, AMDGPU::OpName OpName, unsigned NewEnc) { int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpName); assert(OpIdx >= 0); diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index 35667801c809d..baacb5d3d5455 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -79,7 +79,8 @@ static unsigned getNumOperandsNoGlue(SDNode *Node) { /// Returns true if both nodes have the same value for the given /// operand \p Op, or if both nodes do not have this operand. -static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { +static bool nodesHaveSameOperandValue(SDNode *N0, SDNode *N1, + AMDGPU::OpName OpName) { unsigned Opc0 = N0->getMachineOpcode(); unsigned Opc1 = N1->getMachineOpcode(); @@ -2701,11 +2702,10 @@ SIInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { return std::nullopt; } -bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, - MachineOperand &Src0, - unsigned Src0OpName, +bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, MachineOperand &Src0, + AMDGPU::OpName Src0OpName, MachineOperand &Src1, - unsigned Src1OpName) const { + AMDGPU::OpName Src1OpName) const { MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); if (!Src0Mods) return false; @@ -3432,14 +3432,14 @@ bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) { } } -static constexpr unsigned ModifierOpNames[] = { +static constexpr AMDGPU::OpName ModifierOpNames[] = { AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src1_modifiers, AMDGPU::OpName::src2_modifiers, AMDGPU::OpName::clamp, AMDGPU::OpName::omod, AMDGPU::OpName::op_sel}; void SIInstrInfo::removeModOperands(MachineInstr &MI) const { unsigned Opc = MI.getOpcode(); - for (unsigned Name : reverse(ModifierOpNames)) { + for (AMDGPU::OpName Name : reverse(ModifierOpNames)) { int Idx = AMDGPU::getNamedOperandIdx(Opc, Name); if (Idx >= 0) MI.removeOperand(Idx); @@ -4494,14 +4494,14 @@ bool SIInstrInfo::hasModifiers(unsigned Opcode) const { } bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, - unsigned OpName) const { + AMDGPU::OpName OpName) const { const MachineOperand *Mods = getNamedOperand(MI, OpName); return Mods && Mods->getImm(); } bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const { return any_of(ModifierOpNames, - [&](unsigned Name) { return hasModifiersSet(MI, Name); }); + [&](AMDGPU::OpName Name) { return hasModifiersSet(MI, Name); }); } bool SIInstrInfo::canShrink(const MachineInstr &MI, @@ -5268,7 +5268,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, if (DimOp) { int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vaddr0); - int RSrcOpName = + AMDGPU::OpName RSrcOpName = isMIMG(MI) ? AMDGPU::OpName::srsrc : AMDGPU::OpName::rsrc; int RsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, RSrcOpName); const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode); @@ -5373,9 +5373,9 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, if ((MI.mayStore() || MI.mayLoad()) && !isVGPRSpill(MI)) { const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); - uint16_t DataNameIdx = isDS(Opcode) ? AMDGPU::OpName::data0 - : AMDGPU::OpName::vdata; - const MachineOperand *Data = getNamedOperand(MI, DataNameIdx); + AMDGPU::OpName DataName = + isDS(Opcode) ? AMDGPU::OpName::data0 : AMDGPU::OpName::vdata; + const MachineOperand *Data = getNamedOperand(MI, DataName); const MachineOperand *Data2 = getNamedOperand(MI, AMDGPU::OpName::data1); if (Data && !Data->isReg()) Data = nullptr; @@ -5405,7 +5405,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, } if (ST.needsAlignedVGPRs()) { - const auto isAlignedReg = [&MI, &MRI, this](unsigned OpName) -> bool { + const auto isAlignedReg = [&MI, &MRI, this](AMDGPU::OpName OpName) -> bool { const MachineOperand *Op = getNamedOperand(MI, OpName); if (!Op) return true; @@ -6890,13 +6890,15 @@ SIInstrInfo::legalizeOperands(MachineInstr &MI, // conversion to the addr64 form. if (isImage(MI) || (AMDGPU::isGraphics(MF.getFunction().getCallingConv()) && (isMUBUF(MI) || isMTBUF(MI)))) { - int RSrcOpName = (isVIMAGE(MI) || isVSAMPLE(MI)) ? AMDGPU::OpName::rsrc - : AMDGPU::OpName::srsrc; + AMDGPU::OpName RSrcOpName = (isVIMAGE(MI) || isVSAMPLE(MI)) + ? AMDGPU::OpName::rsrc + : AMDGPU::OpName::srsrc; MachineOperand *SRsrc = getNamedOperand(MI, RSrcOpName); if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) CreatedBB = loadMBUFScalarOperandsFromVGPR(*this, MI, {SRsrc}, MDT); - int SampOpName = isMIMG(MI) ? AMDGPU::OpName::ssamp : AMDGPU::OpName::samp; + AMDGPU::OpName SampOpName = + isMIMG(MI) ? AMDGPU::OpName::ssamp : AMDGPU::OpName::samp; MachineOperand *SSamp = getNamedOperand(MI, SampOpName); if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) CreatedBB = loadMBUFScalarOperandsFromVGPR(*this, MI, {SSamp}, MDT); @@ -8792,7 +8794,7 @@ Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI, } MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, - unsigned OperandName) const { + AMDGPU::OpName OperandName) const { int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); if (Idx == -1) return nullptr; @@ -10109,7 +10111,7 @@ bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, } void SIInstrInfo::enforceOperandRCAlignment(MachineInstr &MI, - unsigned OpName) const { + AMDGPU::OpName OpName) const { if (!ST.needsAlignedVGPRs()) return; diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h index 933935a86f9f9..6b0de138251ab 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h @@ -190,9 +190,9 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo { std::optional isCopyInstrImpl(const MachineInstr &MI) const override; - bool swapSourceModifiers(MachineInstr &MI, - MachineOperand &Src0, unsigned Src0OpName, - MachineOperand &Src1, unsigned Src1OpName) const; + bool swapSourceModifiers(MachineInstr &MI, MachineOperand &Src0, + AMDGPU::OpName Src0OpName, MachineOperand &Src1, + AMDGPU::OpName Src1OpName) const; bool isLegalToSwap(const MachineInstr &MI, unsigned fromIdx, const MachineOperand *fromMO, unsigned toIdx, const MachineOperand *toMO) const; @@ -1137,8 +1137,7 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo { /// e.g. src[012]_mod, omod, clamp. bool hasModifiers(unsigned Opcode) const; - bool hasModifiersSet(const MachineInstr &MI, - unsigned OpName) const; + bool hasModifiersSet(const MachineInstr &MI, AMDGPU::OpName OpName) const; bool hasAnyModifiersSet(const MachineInstr &MI) const; bool canShrink(const MachineInstr &MI, @@ -1294,17 +1293,19 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo { /// Returns the operand named \p Op. If \p MI does not have an /// operand named \c Op, this function returns nullptr. LLVM_READONLY - MachineOperand *getNamedOperand(MachineInstr &MI, unsigned OperandName) const; + MachineOperand *getNamedOperand(MachineInstr &MI, + AMDGPU::OpName OperandName) const; LLVM_READONLY const MachineOperand *getNamedOperand(const MachineInstr &MI, - unsigned OpName) const { - return getNamedOperand(const_cast(MI), OpName); + AMDGPU::OpName OperandName) const { + return getNamedOperand(const_cast(MI), OperandName); } /// Get required immediate operand - int64_t getNamedImmOperand(const MachineInstr &MI, unsigned OpName) const { - int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpName); + int64_t getNamedImmOperand(const MachineInstr &MI, + AMDGPU::OpName OperandName) const { + int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); return MI.getOperand(Idx).getImm(); } @@ -1461,7 +1462,7 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo { // Enforce operand's \p OpName even alignment if required by target. // This is used if an operand is a 32 bit register but needs to be aligned // regardless. - void enforceOperandRCAlignment(MachineInstr &MI, unsigned OpName) const; + void enforceOperandRCAlignment(MachineInstr &MI, AMDGPU::OpName OpName) const; }; /// \brief Returns true if a reg:subreg pair P has a TRC class diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp index 708acc9f88445..39359d24cff0c 100644 --- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -228,11 +228,11 @@ class SILoadStoreOptimizer { CombineInfo *checkAndPrepareMerge(CombineInfo &CI, CombineInfo &Paired); void copyToDestRegs(CombineInfo &CI, CombineInfo &Paired, - MachineBasicBlock::iterator InsertBefore, int OpName, - Register DestReg) const; + MachineBasicBlock::iterator InsertBefore, + AMDGPU::OpName OpName, Register DestReg) const; Register copyFromSrcRegs(CombineInfo &CI, CombineInfo &Paired, MachineBasicBlock::iterator InsertBefore, - int OpName) const; + AMDGPU::OpName OpName) const; unsigned read2Opcode(unsigned EltSize) const; unsigned read2ST64Opcode(unsigned EltSize) const; @@ -699,7 +699,7 @@ static AddressRegs getRegs(unsigned Opc, const SIInstrInfo &TII) { if (TII.isImage(Opc)) { int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); if (VAddr0Idx >= 0) { - int RsrcName = + AMDGPU::OpName RsrcName = TII.isMIMG(Opc) ? AMDGPU::OpName::srsrc : AMDGPU::OpName::rsrc; int RsrcIdx = AMDGPU::getNamedOperandIdx(Opc, RsrcName); Result.NumVAddrs = RsrcIdx - VAddr0Idx; @@ -968,11 +968,11 @@ bool SILoadStoreOptimizer::dmasksCanBeCombined(const CombineInfo &CI, return false; // Check other optional immediate operands for equality. - unsigned OperandsToMatch[] = {AMDGPU::OpName::cpol, AMDGPU::OpName::d16, - AMDGPU::OpName::unorm, AMDGPU::OpName::da, - AMDGPU::OpName::r128, AMDGPU::OpName::a16}; + AMDGPU::OpName OperandsToMatch[] = { + AMDGPU::OpName::cpol, AMDGPU::OpName::d16, AMDGPU::OpName::unorm, + AMDGPU::OpName::da, AMDGPU::OpName::r128, AMDGPU::OpName::a16}; - for (auto op : OperandsToMatch) { + for (AMDGPU::OpName op : OperandsToMatch) { int Idx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), op); if (AMDGPU::getNamedOperandIdx(Paired.I->getOpcode(), op) != Idx) return false; @@ -1256,7 +1256,7 @@ SILoadStoreOptimizer::checkAndPrepareMerge(CombineInfo &CI, // Paired. void SILoadStoreOptimizer::copyToDestRegs( CombineInfo &CI, CombineInfo &Paired, - MachineBasicBlock::iterator InsertBefore, int OpName, + MachineBasicBlock::iterator InsertBefore, AMDGPU::OpName OpName, Register DestReg) const { MachineBasicBlock *MBB = CI.I->getParent(); DebugLoc DL = CI.I->getDebugLoc(); @@ -1287,7 +1287,7 @@ void SILoadStoreOptimizer::copyToDestRegs( Register SILoadStoreOptimizer::copyFromSrcRegs(CombineInfo &CI, CombineInfo &Paired, MachineBasicBlock::iterator InsertBefore, - int OpName) const { + AMDGPU::OpName OpName) const { MachineBasicBlock *MBB = CI.I->getParent(); DebugLoc DL = CI.I->getDebugLoc(); diff --git a/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp b/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp index 3fb8d5b560496..920c3e11e4718 100644 --- a/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp +++ b/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp @@ -619,7 +619,7 @@ bool SIOptimizeExecMasking::optimizeVCMPSaveExecSequence( VCmp.getDebugLoc(), TII->get(NewOpcode)); auto TryAddImmediateValueFromNamedOperand = - [&](unsigned OperandName) -> void { + [&](AMDGPU::OpName OperandName) -> void { if (auto *Mod = TII->getNamedOperand(VCmp, OperandName)) Builder.addImm(Mod->getImm()); }; diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h index 67bebfb3418d5..e458b6b9604b6 100644 --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h @@ -19,6 +19,10 @@ #include #include +// Pull in OpName enum definition and getNamedOperandIdx() declaration. +#define GET_INSTRINFO_OPERAND_ENUM +#include "AMDGPUGenInstrInfo.inc" + struct amd_kernel_code_t; namespace llvm { @@ -394,10 +398,7 @@ template struct EncodingFields { }; LLVM_READONLY -int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx); - -LLVM_READONLY -inline bool hasNamedOperand(uint64_t Opcode, uint64_t NamedIdx) { +inline bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx) { return getNamedOperandIdx(Opcode, NamedIdx) != -1; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h index 1c81719c767ec..ec628620d2982 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h @@ -330,9 +330,6 @@ isRVVSpillForZvlsseg(unsigned Opcode); bool isFaultFirstLoad(const MachineInstr &MI); -// Implemented in RISCVGenInstrInfo.inc -int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex); - // Return true if both input instructions have equal rounding mode. If at least // one of the instructions does not have rounding mode, false will be returned. bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h index 8cb692f9bc0c4..474f04628066b 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h @@ -27,12 +27,6 @@ namespace llvm { -namespace WebAssembly { - -int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex); - -} - class WebAssemblySubtarget; class WebAssemblyInstrInfo final : public WebAssemblyGenInstrInfo { diff --git a/llvm/test/TableGen/get-named-operand-idx.td b/llvm/test/TableGen/get-named-operand-idx.td new file mode 100644 index 0000000000000..f5c5d93f9e522 --- /dev/null +++ b/llvm/test/TableGen/get-named-operand-idx.td @@ -0,0 +1,85 @@ +// RUN: llvm-tblgen -gen-instr-info -I %p/../../include %s | FileCheck %s + +// Check that OpName enum and getNamedOperandIdx are as expected. + +include "llvm/Target/Target.td" + +def archInstrInfo : InstrInfo { } + +def arch : Target { + let InstructionSet = archInstrInfo; +} + +class InstBase : Instruction { + let Namespace = "MyNamespace"; + let UseNamedOperandTable = 1; + let Size = 1; + field bits<8> Inst; +} + +def Reg : Register<"reg">; +def RegClass : RegisterClass<"foo", [i32], 0, (add Reg)>; + +def OpA : Operand; +def OpB : Operand; + +def RegOp : RegisterOperand; + +def InstA : InstBase { + let OutOperandList = (outs OpA:$a); + let InOperandList = (ins OpB:$b, i32imm:$c); +} + +def InstB : InstBase { + let OutOperandList = (outs i32imm:$d); + let InOperandList = (ins unknown:$x); +} + +def InstC : InstBase { + let OutOperandList = (outs RegClass:$d); + let InOperandList = (ins RegOp:$x); +} + +// InstD has UseNamedOperandTable = 0, so it won't be handled in +// getNamedOperandIdx(). +def InstD : InstBase { + let OutOperandList = (outs RegClass:$e); + let InOperandList = (ins RegOp:$f); + let UseNamedOperandTable = 0; +} + +// CHECK: #ifdef GET_INSTRINFO_OPERAND_ENUM +// CHECK: #undef GET_INSTRINFO_OPERAND_ENUM +// CHECK: namespace llvm::MyNamespace { +// CHECK: enum class OpName { +// CHECK: a = 0, +// CHECK: b = 1, +// CHECK: c = 2, +// CHECK: d = 3, +// CHECK: x = 4, +// CHECK: NUM_OPERAND_NAMES = 5, +// CHECK: }; // enum class OpName +// CHECK: } // end namespace llvm::MyNamespace +// CHECK: #endif //GET_INSTRINFO_OPERAND_ENUM + +// CHECK: #ifdef GET_INSTRINFO_NAMED_OPS +// CHECK: #undef GET_INSTRINFO_NAMED_OPS +// CHECK: namespace llvm::MyNamespace { +// CHECK: LLVM_READONLY +// CHECK: int16_t getNamedOperandIdx(uint16_t Opcode, OpName Name) { +// CHECK: assert(Name != OpName::NUM_OPERAND_NAMES); +// CHECK: static constexpr int8_t OperandMap[][5] = { +// CHECK: {0, 1, 2, -1, -1, }, +// CHECK: {-1, -1, -1, 0, 1, }, +// CHECK: }; +// CHECK: switch(Opcode) { +// CHECK: case MyNamespace::InstA: +// CHECK: return OperandMap[0][static_cast(Name)]; +// CHECK: case MyNamespace::InstB: +// CHECK: case MyNamespace::InstC: +// CHECK: return OperandMap[1][static_cast(Name)]; +// CHECK: default: return -1; +// CHECK: } +// CHECK: } +// CHECK: } // end namespace llvm::MyNamespace +// CHECK: #endif //GET_INSTRINFO_NAMED_OPS diff --git a/llvm/utils/TableGen/InstrInfoEmitter.cpp b/llvm/utils/TableGen/InstrInfoEmitter.cpp index a46a83ea12284..3ea76ed414d91 100644 --- a/llvm/utils/TableGen/InstrInfoEmitter.cpp +++ b/llvm/utils/TableGen/InstrInfoEmitter.cpp @@ -283,20 +283,23 @@ void InstrInfoEmitter::emitOperandNameMappings( OS << "#ifdef GET_INSTRINFO_OPERAND_ENUM\n"; OS << "#undef GET_INSTRINFO_OPERAND_ENUM\n"; - OS << "namespace llvm::" << Namespace << "::OpName {\n"; - OS << "enum {\n"; + OS << "namespace llvm::" << Namespace << " {\n"; + OS << "enum class OpName {\n"; for (const auto &[I, Op] : enumerate(OperandNameToID)) OS << " " << Op.first << " = " << I << ",\n"; - OS << " OPERAND_LAST = " << NumOperandNames << ",\n"; - OS << "};\n"; - OS << "} // end namespace llvm::" << Namespace << "::OpName\n"; + OS << " NUM_OPERAND_NAMES = " << NumOperandNames << ",\n"; + OS << "}; // enum class OpName\n\n"; + OS << "LLVM_READONLY\n"; + OS << "int16_t getNamedOperandIdx(uint16_t Opcode, OpName Name);\n"; + OS << "} // end namespace llvm::" << Namespace << '\n'; OS << "#endif //GET_INSTRINFO_OPERAND_ENUM\n\n"; OS << "#ifdef GET_INSTRINFO_NAMED_OPS\n"; OS << "#undef GET_INSTRINFO_NAMED_OPS\n"; OS << "namespace llvm::" << Namespace << " {\n"; OS << "LLVM_READONLY\n"; - OS << "int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx) {\n"; + OS << "int16_t getNamedOperandIdx(uint16_t Opcode, OpName Name) {\n"; + OS << " assert(Name != OpName::NUM_OPERAND_NAMES);\n"; if (NumOperandNames != 0) { assert(MaxOperandNo <= INT16_MAX && "Too many operands for the operand name -> index table"); @@ -320,7 +323,8 @@ void InstrInfoEmitter::emitOperandNameMappings( for (const auto &[TableIndex, Entry] : enumerate(OperandMap)) { for (StringRef Name : Entry.second) OS << " case " << Namespace << "::" << Name << ":\n"; - OS << " return OperandMap[" << TableIndex << "][NamedIdx];\n"; + OS << " return OperandMap[" << TableIndex + << "][static_cast(Name)];\n"; } OS << " default: return -1;\n"; OS << " }\n";