-
Notifications
You must be signed in to change notification settings - Fork 13.7k
[AArch64] Fix a multitude of AArch64 typos (NFC) #143370
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
jthackray
wants to merge
1
commit into
main
Choose a base branch
from
users/jthackray/fix-aarch64-speelings
base: main
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
+125
−125
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Fix a multitude of typos in the AArch64 codebase using the https://github.com/crate-ci/typos Rust package.
@llvm/pr-subscribers-backend-aarch64 Author: Jonathan Thackray (jthackray) ChangesFix a multitude of typos in the AArch64 codebase using the https://github.com/crate-ci/typos Rust package. Patch is 63.88 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/143370.diff 40 Files Affected:
diff --git a/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp b/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
index 08a6fa2ea8db0..c85adcf85f8dc 100644
--- a/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
+++ b/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
@@ -73,11 +73,11 @@ class AArch64AdvSIMDScalar : public MachineFunctionPass {
bool isProfitableToTransform(const MachineInstr &MI) const;
// transformInstruction - Perform the transformation of an instruction
- // to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
+ // to its equivalent AdvSIMD scalar instruction. Update inputs and outputs
// to be the correct register class, minimizing cross-class copies.
void transformInstruction(MachineInstr &MI);
- // processMachineBasicBlock - Main optimzation loop.
+ // processMachineBasicBlock - Main optimization loop.
bool processMachineBasicBlock(MachineBasicBlock *MBB);
public:
@@ -231,7 +231,7 @@ bool AArch64AdvSIMDScalar::isProfitableToTransform(
// If any of the uses of the original instructions is a cross class copy,
// that's a copy that will be removable if we transform. Likewise, if
- // any of the uses is a transformable instruction, it's likely the tranforms
+ // any of the uses is a transformable instruction, it's likely the transforms
// will chain, enabling us to save a copy there, too. This is an aggressive
// heuristic that approximates the graph based cost analysis described above.
Register Dst = MI.getOperand(0).getReg();
@@ -280,7 +280,7 @@ static MachineInstr *insertCopy(const TargetInstrInfo *TII, MachineInstr &MI,
}
// transformInstruction - Perform the transformation of an instruction
-// to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
+// to its equivalent AdvSIMD scalar instruction. Update inputs and outputs
// to be the correct register class, minimizing cross-class copies.
void AArch64AdvSIMDScalar::transformInstruction(MachineInstr &MI) {
LLVM_DEBUG(dbgs() << "Scalar transform: " << MI);
@@ -372,7 +372,7 @@ void AArch64AdvSIMDScalar::transformInstruction(MachineInstr &MI) {
++NumScalarInsnsUsed;
}
-// processMachineBasicBlock - Main optimzation loop.
+// processMachineBasicBlock - Main optimization loop.
bool AArch64AdvSIMDScalar::processMachineBasicBlock(MachineBasicBlock *MBB) {
bool Changed = false;
for (MachineInstr &MI : llvm::make_early_inc_range(*MBB)) {
diff --git a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
index 0d019bda36130..3f92c1dbfbf49 100644
--- a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
+++ b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
@@ -467,7 +467,7 @@ void AArch64AsmPrinter::emitAttributes(unsigned Flags,
PAuthABIVersion = (uint64_t(-1) == PAuthABIVersion) ? 0 : PAuthABIVersion;
if (PAuthABIPlatform || PAuthABIVersion) {
- TS->emitAtributesSubsection(
+ TS->emitAttributesSubsection(
AArch64BuildAttributes::getVendorName(
AArch64BuildAttributes::AEABI_PAUTHABI),
AArch64BuildAttributes::SubsectionOptional::REQUIRED,
@@ -490,7 +490,7 @@ void AArch64AsmPrinter::emitAttributes(unsigned Flags,
(Flags & AArch64BuildAttributes::Feature_GCS_Flag) ? 1 : 0;
if (BTIValue || PACValue || GCSValue) {
- TS->emitAtributesSubsection(
+ TS->emitAttributesSubsection(
AArch64BuildAttributes::getVendorName(
AArch64BuildAttributes::AEABI_FEATURE_AND_BITS),
AArch64BuildAttributes::SubsectionOptional::OPTIONAL,
@@ -3531,7 +3531,7 @@ const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV,
char AArch64AsmPrinter::ID = 0;
INITIALIZE_PASS(AArch64AsmPrinter, "aarch64-asm-printer",
- "AArch64 Assmebly Printer", false, false)
+ "AArch64 Assembly Printer", false, false)
// Force static initialization.
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmPrinter() {
diff --git a/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp b/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp
index 64f21c4cb2297..53e8e438c5e57 100644
--- a/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp
@@ -232,7 +232,7 @@ static bool isCandidateLoad(const MachineInstr &MI) {
}
}
-/// Check whether the given instruction can load a litteral.
+/// Check whether the given instruction can load a literal.
static bool supportLoadFromLiteral(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default:
@@ -247,7 +247,7 @@ static bool supportLoadFromLiteral(const MachineInstr &MI) {
}
}
-/// Number of GPR registers traked by mapRegToGPRIndex()
+/// Number of GPR registers tracked by mapRegToGPRIndex()
static const unsigned N_GPR_REGS = 31;
/// Map register number to index from 0-30.
static int mapRegToGPRIndex(MCRegister Reg) {
diff --git a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
index 9b59ee6483cd9..484bc2a4be8fa 100644
--- a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
@@ -573,7 +573,7 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
// Update the CFG first.
updateTailPHIs();
- // Save successor probabilties before removing CmpBB and Tail from their
+ // Save successor probabilities before removing CmpBB and Tail from their
// parents.
BranchProbability Head2CmpBB = MBPI->getEdgeProbability(Head, CmpBB);
BranchProbability CmpBB2Tail = MBPI->getEdgeProbability(CmpBB, Tail);
@@ -581,7 +581,7 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
Head->removeSuccessor(CmpBB);
CmpBB->removeSuccessor(Tail);
- // If Head and CmpBB had successor probabilties, udpate the probabilities to
+ // If Head and CmpBB had successor probabilities, update the probabilities to
// reflect the ccmp-conversion.
if (Head->hasSuccessorProbabilities() && CmpBB->hasSuccessorProbabilities()) {
@@ -596,7 +596,7 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
Head2Tail + Head2CmpBB * CmpBB2Tail);
// We will transfer successors of CmpBB to Head in a moment without
- // normalizing the successor probabilities. Set the successor probabilites
+ // normalizing the successor probabilities. Set the successor probabilities
// before doing so.
//
// Pr(I|Head) = Pr(CmpBB|Head) * Pr(I|CmpBB).
diff --git a/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp b/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
index 71284b0574e57..987dfbcdd53e9 100644
--- a/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
+++ b/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
@@ -64,10 +64,10 @@ static bool usesFrameIndex(const MachineInstr &MI) {
return false;
}
-// Instructions that lose their 'read' operation for a subesquent fence acquire
+// Instructions that lose their 'read' operation for a subsequent fence acquire
// (DMB LD) once the zero register is used.
//
-// WARNING: The aquire variants of the instructions are also affected, but they
+// WARNING: The acquire variants of the instructions are also affected, but they
// are split out into `atomicBarrierDroppedOnZero()` to support annotations on
// assembly.
static bool atomicReadDroppedOnZero(unsigned Opcode) {
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index bb7e6b662f80e..9d74bb5a8661d 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -508,7 +508,7 @@ Register AArch64FastISel::materializeGV(const GlobalValue *GV) {
// also uses BuildMI for making an ADRP (+ MOVK) + ADD, but the operands
// are not exactly 1:1 with FastISel so we cannot easily abstract this
// out. At some point, it would be nice to find a way to not have this
- // duplciate code.
+ // duplicate code.
Register DstReg = createResultReg(&AArch64::GPR64commonRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::MOVKXi),
DstReg)
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 22683237fa0a8..3335ee04bb0e0 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -399,7 +399,7 @@ static const unsigned DefaultSafeSPDisplacement = 255;
/// size limit beyond which some of these instructions will require a scratch
/// register during their expansion later.
static unsigned estimateRSStackSizeLimit(MachineFunction &MF) {
- // FIXME: For now, just conservatively guestimate based on unscaled indexing
+ // FIXME: For now, just conservatively guesstimate based on unscaled indexing
// range. We'll end up allocating an unnecessary spill slot a lot, but
// realistically that's not a big deal at this stage of the game.
for (MachineBasicBlock &MBB : MF) {
@@ -647,7 +647,7 @@ void AArch64FrameLowering::emitCalleeSavedSVELocations(
continue;
// Not all unwinders may know about SVE registers, so assume the lowest
- // common demoninator.
+ // common denominator.
assert(!Info.isSpilledToReg() && "Spilling to registers not implemented");
MCRegister Reg = Info.getReg();
if (!static_cast<const AArch64RegisterInfo &>(TRI).regNeedsCFI(Reg, Reg))
@@ -801,7 +801,7 @@ void AArch64FrameLowering::allocateStackSpace(
.addImm(InitialOffset.getFixed())
.addImm(InitialOffset.getScalable());
// The fixed allocation may leave unprobed bytes at the top of the
- // stack. If we have subsequent alocation (e.g. if we have variable-sized
+ // stack. If we have subsequent allocation (e.g. if we have variable-sized
// objects), we need to issue an extra probe, so these allocations start in
// a known state.
if (FollowupAllocs) {
@@ -2054,7 +2054,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
HasWinCFI = true;
// alloc_l can hold at most 256MB, so assume that NumBytes doesn't
// exceed this amount. We need to move at most 2^24 - 1 into x15.
- // This is at most two instructions, MOVZ follwed by MOVK.
+ // This is at most two instructions, MOVZ followed by MOVK.
// TODO: Fix to use multiple stack alloc unwind codes for stacks
// exceeding 256MB in size.
if (NumBytes >= (1 << 28))
@@ -2400,7 +2400,7 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
MachineInstr::FrameDestroy, PrologueSaveSize);
} else {
// If not, make sure to emit an add after the last ldp.
- // We're doing this by transfering the size to be restored from the
+ // We're doing this by transferring the size to be restored from the
// adjustment *before* the CSR pops to the adjustment *after* the CSR
// pops.
AfterCSRPopSize += PrologueSaveSize;
@@ -2949,7 +2949,7 @@ static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2,
const TargetRegisterInfo *TRI) {
// If we are generating register pairs for a Windows function that requires
// EH support, then pair consecutive registers only. There are no unwind
- // opcodes for saves/restores of non-consectuve register pairs.
+ // opcodes for saves/restores of non-consecutive register pairs.
// The unwind opcodes are save_regp, save_regp_x, save_fregp, save_frepg_x,
// save_lrpair.
// https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling
@@ -3187,7 +3187,7 @@ static void computeCalleeSaveRegisterPairs(
RPI.isPaired()) // RPI.FrameIdx must be the lower index of the pair
RPI.FrameIdx = CSI[i + RegInc].getFrameIdx();
- // Realign the scalable offset if necesary. This is relevant when
+ // Realign the scalable offset if necessary. This is relevant when
// spilling predicates on Windows.
if (RPI.isScalable() && ScalableByteOffset % Scale != 0) {
ScalableByteOffset = alignTo(ScalableByteOffset, Scale);
@@ -5022,7 +5022,7 @@ MachineBasicBlock::iterator tryMergeAdjacentSTG(MachineBasicBlock::iterator II,
}
// Find contiguous runs of tagged memory and emit shorter instruction
- // sequencies for them when possible.
+ // sequences for them when possible.
TagStoreEdit TSE(MBB, FirstZeroData);
std::optional<int64_t> EndOffset;
for (auto &Instr : Instrs) {
@@ -5591,7 +5591,7 @@ void AArch64FrameLowering::emitRemarks(
unsigned RegTy = StackAccess::AccessType::GPR;
if (MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector) {
// SPILL_PPR_TO_ZPR_SLOT_PSEUDO and FILL_PPR_FROM_ZPR_SLOT_PSEUDO
- // spill/fill the predicate as a data vector (so are an FPR acess).
+ // spill/fill the predicate as a data vector (so are an FPR access).
if (MI.getOpcode() != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO &&
MI.getOpcode() != AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO &&
AArch64::PPRRegClass.contains(MI.getOperand(0).getReg())) {
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 34f6db9374cb5..11cb91fbe02d4 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -991,7 +991,7 @@ bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
}
/// SelectArithUXTXRegister - Select a "UXTX register" operand. This
-/// operand is refered by the instructions have SP operand
+/// operand is referred by the instructions have SP operand
bool AArch64DAGToDAGISel::SelectArithUXTXRegister(SDValue N, SDValue &Reg,
SDValue &Shift) {
unsigned ShiftVal = 0;
@@ -2841,7 +2841,7 @@ static bool isBitfieldDstMask(uint64_t DstMask, const APInt &BitsToBeInserted,
// After #1, x useful bits are 0x7, then the useful bits of x, live through
// y.
// After #2, the useful bits of x are 0x4.
-// However, if x is used on an unpredicatable instruction, then all its bits
+// However, if x is used on an unpredictable instruction, then all its bits
// are useful.
// E.g.
// 1. y = x & 0x7
@@ -3611,7 +3611,7 @@ static bool tryBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits,
DstLSB = 0;
Width = ImmS - ImmR + 1;
// FIXME: This constraint is to catch bitfield insertion we may
- // want to widen the pattern if we want to grab general bitfied
+ // want to widen the pattern if we want to grab general bitfield
// move case
if (Width <= 0)
continue;
@@ -3999,7 +3999,7 @@ static int getIntOperandFromRegisterString(StringRef RegString) {
// Lower the read_register intrinsic to an MRS instruction node if the special
// register string argument is either of the form detailed in the ALCE (the
-// form described in getIntOperandsFromRegsterString) or is a named register
+// form described in getIntOperandsFromRegisterString) or is a named register
// known by the MRS SysReg mapper.
bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
@@ -4060,7 +4060,7 @@ bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
// Lower the write_register intrinsic to an MSR instruction node if the special
// register string argument is either of the form detailed in the ALCE (the
-// form described in getIntOperandsFromRegsterString) or is a named register
+// form described in getIntOperandsFromRegisterString) or is a named register
// known by the MSR SysReg mapper.
bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) {
const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
@@ -7278,7 +7278,7 @@ static EVT getPackedVectorTypeFromPredicateType(LLVMContext &Ctx, EVT PredVT,
}
/// Return the EVT of the data associated to a memory operation in \p
-/// Root. If such EVT cannot be retrived, it returns an invalid EVT.
+/// Root. If such EVT cannot be retrieved, it returns an invalid EVT.
static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) {
if (auto *MemIntr = dyn_cast<MemIntrinsicSDNode>(Root))
return MemIntr->getMemoryVT();
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 121720e7defd4..d30cfa257721f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5367,7 +5367,7 @@ static unsigned selectUmullSmull(SDValue &N0, SDValue &N1, SelectionDAG &DAG,
return AArch64ISD::UMULL;
} else if (VT == MVT::v2i64 && DAG.MaskedValueIsZero(N0, Mask) &&
DAG.MaskedValueIsZero(N1, Mask)) {
- // For v2i64 we look more aggresively at both operands being zero, to avoid
+ // For v2i64 we look more aggressively at both operands being zero, to avoid
// scalarization.
return AArch64ISD::UMULL;
}
@@ -5844,7 +5844,7 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
} else if (Ty.isVector() && Ty.isInteger() && isTypeLegal(Ty)) {
return DAG.getNode(ISD::ABS, dl, Ty, Op.getOperand(1));
} else {
- report_fatal_error("Unexpected type for AArch64 NEON intrinic");
+ report_fatal_error("Unexpected type for AArch64 NEON intrinsic");
}
}
case Intrinsic::aarch64_neon_pmull64: {
@@ -8630,9 +8630,9 @@ static bool checkZExtBool(SDValue Arg, const SelectionDAG &DAG) {
if (SizeInBits < 8)
return false;
- APInt RequredZero(SizeInBits, 0xFE);
+ APInt RequiredZero(SizeInBits, 0xFE);
KnownBits Bits = DAG.computeKnownBits(Arg, 4);
- bool ZExtBool = (Bits.Zero & RequredZero) == RequredZero;
+ bool ZExtBool = (Bits.Zero & RequiredZero) == RequiredZero;
return ZExtBool;
}
@@ -13536,7 +13536,7 @@ static SDValue GeneratePerfectShuffle(unsigned ID, SDValue V1,
OpLHS = DAG.getBitcast(MVT::v2f32, OpLHS);
} else {
assert(VT.getScalarSizeInBits() == 32 &&
- "Expected 16 or 32 bit shuffle elemements");
+ "Expected 16 or 32 bit shuffle elements");
Input = DAG.getBitcast(MVT::v2f64, Input);
OpLHS = DAG.getBitcast(MVT::v2f64, OpLHS);
}
@@ -13941,7 +13941,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
unsigned NewEltCount = VT.getSizeInBits() / LaneSize;
MVT NewVecTy = MVT::getVectorVT(NewEltTy, NewEltCount);
V1 = DAG.getBitcast(NewVecTy, V1);
- // Constuct the DUP instruction
+ // Construct the DUP instruction
V1 = constructDup(V1, Lane, dl, NewVecTy, Opcode, DAG);
// Cast back to the original type
return DAG.getBitcast(VT, V1);
@@ -16900,12 +16900,12 @@ bool AArch64TargetLowering::optimizeExtendOrTruncateConversion(
}
bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType,
- Align &RequiredAligment) const {
+ Align &RequiredAlignment) const {
if (!LoadedType.isSimple() ||
(!LoadedType.isInteger() && !LoadedType.isFloatingPoint()))
return false;
// Cyclone supports unaligned accesses.
- RequiredAligment = Align(1);
+ RequiredAlignment = Align(1);
unsigned NumBits = LoadedType.getSizeInBits();
return NumBits == 32 || NumBits == 64;
}
@@ -18028,7 +18028,7 @@ static SDValue performVecReduceAddCombineWithUADDLP(SDNode *N,
EXT1->getOperand(0)->getValueType(0) != MVT::v16i8)
return SDValue();
- // Pattern is dectected. Let's convert it to sequence of nodes.
+ // Pattern is detected. Let's convert it to sequence of nodes.
SDLoc DL(N);
// First, create the node pattern of UABD/SABD.
@@ -18246,10 +18246,10 @@ static SDValue performVecReduceAddCombine(SDNode *N, SelectionDAG &DAG,
DAG.getConstant(I * 16, DL, MVT::i64));
SDValue Dot =
DAG.getNode(DotOpcode, DL, Zeros.getValueType(), Zeros, Vec8Op0, Vec8Op1);
- SDValue VecReudceAdd8 =
+ SDValue VecReduceAdd8 =
DAG.getNode(ISD::VECREDUCE_ADD, DL, N->getValueType(0), Dot);
return DAG.getNode(ISD::ADD, DL, N->getValueType(0), VecReduceAdd16,
- ...
[truncated]
|
You can test this locally with the following command:git-clang-format --diff HEAD~1 HEAD --extensions cpp,h -- llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp llvm/lib/Target/AArch64/AArch64CollectLOH.cpp llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp llvm/lib/Target/AArch64/AArch64FastISel.cpp llvm/lib/Target/AArch64/AArch64FrameLowering.cpp llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp llvm/lib/Target/AArch64/AArch64ISelLowering.cpp llvm/lib/Target/AArch64/AArch64ISelLowering.h llvm/lib/Target/AArch64/AArch64InstrInfo.cpp llvm/lib/Target/AArch64/AArch64InstrInfo.h llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp llvm/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp llvm/lib/Target/AArch64/AArch64SLSHardening.cpp llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp llvm/lib/Target/AArch64/AArch64Subtarget.h llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp llvm/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h llvm/lib/Target/AArch64/SMEPeepholeOpt.cpp View the diff from clang-format here.diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 951cb93ea..2c9a836d2 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -10319,7 +10319,8 @@ unsigned llvm::getBLRCallOpcode(const MachineFunction &MF) {
MachineBasicBlock::iterator
AArch64InstrInfo::probedStackAlloc(MachineBasicBlock::iterator MBBI,
Register TargetReg, bool FrameSetup) const {
- assert(TargetReg != AArch64::SP && "New top of stack cannot already be in SP");
+ assert(TargetReg != AArch64::SP &&
+ "New top of stack cannot already be in SP");
MachineBasicBlock &MBB = *MBBI->getParent();
MachineFunction &MF = *MBB.getParent();
diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index 2f67ff55f..c3020faf6 100644
--- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -8038,7 +8038,8 @@ bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
return true;
}
- getTargetStreamer().emitAttributesSubsection(SubsectionName, IsOptional, Type);
+ getTargetStreamer().emitAttributesSubsection(SubsectionName, IsOptional,
+ Type);
return false;
}
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h
index 3abbc1b68..4dc561583 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.h
@@ -123,9 +123,9 @@ class AArch64RegisterBankInfo final : public AArch64GenRegisterBankInfo {
/// \returns true if \p MI is a PHI that its def is used by
/// any instruction that onlyUsesFP.
bool isPHIWithFPConstraints(const MachineInstr &MI,
- const MachineRegisterInfo &MRI,
- const TargetRegisterInfo &TRI,
- unsigned Depth = 0) const;
+ const MachineRegisterInfo &MRI,
+ const TargetRegisterInfo &TRI,
+ unsigned Depth = 0) const;
/// \returns true if \p MI only uses and defines FPRs.
bool hasFPConstraints(const MachineInstr &MI, const MachineRegisterInfo &MRI,
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
index f2144375f..174e17995 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
@@ -279,7 +279,7 @@ class AArch64TargetAsmStreamer : public AArch64TargetStreamer {
// Keep the data structure consistent with the case of ELF emission
// (important for llvm-mc asm parsing)
AArch64TargetStreamer::emitAttributesSubsection(SubsectionName, Optional,
- ParameterType);
+ ParameterType);
OS << "\n";
}
@@ -437,7 +437,7 @@ void AArch64TargetELFStreamer::emitAttributesSubsection(
StringRef VendorName, AArch64BuildAttributes::SubsectionOptional IsOptional,
AArch64BuildAttributes::SubsectionType ParameterType) {
AArch64TargetStreamer::emitAttributesSubsection(VendorName, IsOptional,
- ParameterType);
+ ParameterType);
}
void AArch64TargetELFStreamer::emitAttribute(StringRef VendorName, unsigned Tag,
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
index 9d9e23e99..010aa2d05 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
@@ -216,7 +216,8 @@ void AArch64InstPrinter::printInst(const MCInst *MI, uint64_t Address,
if ((Op2.getReg() == AArch64::WZR || Op2.getReg() == AArch64::XZR) &&
(ImmR == 0 || ImmS < ImmR) && STI.hasFeature(AArch64::HasV8_2aOps)) {
- // BFC takes precedence over its entire range, slightly differently to BFI.
+ // BFC takes precedence over its entire range, slightly differently to
+ // BFI.
int BitWidth = Opcode == AArch64::BFMXri ? 64 : 32;
int LSB = (BitWidth - ImmR) % BitWidth;
int Width = ImmS + 1;
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h
index d878f1e04..4730c2a55 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h
@@ -101,10 +101,10 @@ public:
virtual void emitARM64WinCFISavePReg(unsigned Reg, int Offset) {}
/// Build attributes implementation
- virtual void
- emitAttributesSubsection(StringRef VendorName,
- AArch64BuildAttributes::SubsectionOptional IsOptional,
- AArch64BuildAttributes::SubsectionType ParameterType);
+ virtual void emitAttributesSubsection(
+ StringRef VendorName,
+ AArch64BuildAttributes::SubsectionOptional IsOptional,
+ AArch64BuildAttributes::SubsectionType ParameterType);
virtual void emitAttribute(StringRef VendorName, unsigned Tag, unsigned Value,
std::string String);
void activateAttributesSubsection(StringRef VendorName);
|
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Fix a multitude of typos in the AArch64 codebase using the https://github.com/crate-ci/typos Rust package.