From 42204c94ba9fcb0b4b1335e648ce140a3eef8a9d Mon Sep 17 00:00:00 2001 From: Bill Wendling Date: Sun, 19 Nov 2023 01:00:46 -0800 Subject: [PATCH] Revert "[TargetInstrInfo] enable foldMemoryOperand for InlineAsm (#70743)" This reverts commit 99ee2db198d86f685bcb07a1495a7115ffc31d7e. It's causing ICEs in the ARM tests. See the comment here: https://github.com/llvm/llvm-project/commit/99ee2db198d86f685bcb07a1495a7115ffc31d7e --- llvm/include/llvm/CodeGen/TargetInstrInfo.h | 10 ---- llvm/lib/CodeGen/TargetInstrInfo.cpp | 62 --------------------- 2 files changed, 72 deletions(-) diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h index de065849eaa6eb..fe130d282ded15 100644 --- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h +++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h @@ -2188,16 +2188,6 @@ class TargetInstrInfo : public MCInstrInfo { // Get the call frame size just before MI. unsigned getCallFrameSizeAt(MachineInstr &MI) const; - /// Fills in the necessary MachineOperands to refer to a frame index. - /// The best way to understand this is to print `asm(""::"m"(x));` after - /// finalize-isel. Example: - /// INLINEASM ... 262190 /* mem:m */, %stack.0.x.addr, 1, $noreg, 0, $noreg - /// we would add placeholders for: ^ ^ ^ ^ - virtual void - getFrameIndexOperands(SmallVectorImpl &Ops) const { - llvm_unreachable("unknown number of operands necessary"); - } - private: mutable std::unique_ptr Formatter; unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode; diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index 5ede36505b5b00..3013a768bc4d56 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -565,64 +565,6 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI, return NewMI; } -static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI, - const TargetInstrInfo &TII) { - MachineOperand &MO = MI->getOperand(OpNo); - const VirtRegInfo &RI = AnalyzeVirtRegInBundle(*MI, MO.getReg()); - - // If the machine operand is tied, untie it first. - if (MO.isTied()) { - unsigned TiedTo = MI->findTiedOperandIdx(OpNo); - MI->untieRegOperand(OpNo); - // Intentional recursion! - foldInlineAsmMemOperand(MI, TiedTo, FI, TII); - } - - // Change the operand from a register to a frame index. - MO.ChangeToFrameIndex(FI, MO.getTargetFlags()); - - SmallVector NewOps; - TII.getFrameIndexOperands(NewOps); - assert(!NewOps.empty() && "getFrameIndexOperands didn't create any operands"); - MI->insert(MI->operands_begin() + OpNo + 1, NewOps); - - // Change the previous operand to a MemKind InlineAsm::Flag. The second param - // is the per-target number of operands that represent the memory operand - // excluding this one (MD). This includes MO. - InlineAsm::Flag F(InlineAsm::Kind::Mem, NewOps.size() + 1); - F.setMemConstraint(InlineAsm::ConstraintCode::m); - MachineOperand &MD = MI->getOperand(OpNo - 1); - MD.setImm(F); - - // Update mayload/maystore metadata. - MachineOperand &ExtraMO = MI->getOperand(InlineAsm::MIOp_ExtraInfo); - if (RI.Reads) - ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayLoad); - if (RI.Writes) - ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayStore); -} - -// Returns nullptr if not possible to fold. -static MachineInstr *foldInlineAsmMemOperand(MachineInstr &MI, - ArrayRef Ops, int FI, - const TargetInstrInfo &TII) { - assert(MI.isInlineAsm() && "wrong opcode"); - if (Ops.size() > 1) - return nullptr; - unsigned Op = Ops[0]; - assert(Op && "should never be first operand"); - assert(MI.getOperand(Op).isReg() && "shouldn't be folding non-reg operands"); - - if (!MI.mayFoldInlineAsmRegOp(Op)) - return nullptr; - - MachineInstr &NewMI = TII.duplicate(*MI.getParent(), MI.getIterator(), MI); - - foldInlineAsmMemOperand(&NewMI, Op, FI, TII); - - return &NewMI; -} - MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, ArrayRef Ops, int FI, LiveIntervals *LIS, @@ -670,8 +612,6 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, NewMI = foldPatchpoint(MF, MI, Ops, FI, *this); if (NewMI) MBB->insert(MI, NewMI); - } else if (MI.isInlineAsm()) { - NewMI = foldInlineAsmMemOperand(MI, Ops, FI, *this); } else { // Ask the target to do the actual folding. NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM); @@ -743,8 +683,6 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this); if (NewMI) NewMI = &*MBB.insert(MI, NewMI); - } else if (MI.isInlineAsm() && isLoadFromStackSlot(LoadMI, FrameIndex)) { - NewMI = foldInlineAsmMemOperand(MI, Ops, FrameIndex, *this); } else { // Ask the target to do the actual folding. NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);