From db782b44b3471c0ab41950c3f79d0ea7b916c135 Mon Sep 17 00:00:00 2001 From: Luke Lau Date: Fri, 5 Jul 2024 11:44:59 +0800 Subject: [PATCH] [RISCV] Don't forward AVL in VSETVLIInfo if it would clobber other definitions (#97264) This fixes a crash found when compiling OpenBLAS with -mllvm -verify-machineinstrs. When we "forward" the AVL from the output of a vsetvli, we might have to extend the LiveInterval of the AVL to where insert the new vsetvli. Most of the time we are able to extend the LiveInterval because there's only one val num (definition) for the register. But PHI elimination can assign multiple values to the same register, in which case we end up clobbering a different val num when extending: %x = PseudoVSETVLI %avl, ... %avl = ADDI ... %v = PseudoVADD ..., avl=%x ; %avl is forwarded to PseudoVADD: %x = PseudoVSETVLI %avl, ... %avl = ADDI ... %v = PseudoVADD ..., avl=%avl Here there's no way to extend the %avl from the vsetvli since %avl is redefined, i.e. we have two val nums. This fixes it by only forwarding it when we have exactly one val num, where it should be safe to extend it. --- llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 6 +++ .../RISCV/rvv/vsetvli-insert-crossbb.ll | 36 +++++++++++++++ .../RISCV/rvv/vsetvli-insert-crossbb.mir | 44 +++++++++++++++++++ .../RISCV/lsr-drop-solution.ll | 1 + 4 files changed, 87 insertions(+) diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index a1ae8a1250813f..9c76bb15035ebb 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -956,6 +956,12 @@ void RISCVInsertVSETVLI::forwardVSETVLIAVL(VSETVLIInfo &Info) const { VSETVLIInfo DefInstrInfo = getInfoForVSETVLI(*DefMI); if (!DefInstrInfo.hasSameVLMAX(Info)) return; + // If the AVL is a register with multiple definitions, don't forward it. We + // might not be able to extend its LiveInterval without clobbering other val + // nums. + if (DefInstrInfo.hasAVLReg() && + !LIS->getInterval(DefInstrInfo.getAVLReg()).containsOneValue()) + return; Info.setAVL(DefInstrInfo); } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll index 701192839d6aa3..f93022c9d132de 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -1125,3 +1125,39 @@ exit: call void @llvm.riscv.vse.nxv8i8( %1, ptr %p, i64 1) ret void } + +; Check that we don't forward an AVL if we wouldn't be able to extend its +; LiveInterval without clobbering other val nos. +define @unforwardable_avl(i64 %n, %v, i1 %cmp) { +; CHECK-LABEL: unforwardable_avl: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a0, e32, m2, ta, ma +; CHECK-NEXT: andi a1, a1, 1 +; CHECK-NEXT: .LBB27_1: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: bnez a1, .LBB27_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vadd.vv v10, v8, v8 +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vadd.vv v8, v10, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %n, i64 2, i64 1) + br label %for.body + +for.body: + ; Use %n in a PHI here so its virtual register is assigned to a second time here. + %1 = phi i64 [ %3, %for.body ], [ %n, %entry ] + %2 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %1, i64 0, i64 0) + %3 = add i64 %1, 1 + br i1 %cmp, label %for.body, label %for.cond.cleanup + +for.cond.cleanup: + %4 = tail call @llvm.riscv.vadd.nxv2f32.nxv2f32.i64( undef, %v, %v, i64 -1) + ; VL toggle needed here: If the %n AVL was forwarded here we wouldn't be able + ; to extend it's LiveInterval because it would clobber the assignment at %1. + %5 = tail call @llvm.riscv.vadd.nxv2f32.nxv2f32.i64( undef, %4, %v, i64 %0) + ret %5 +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir index 4091d1711b5841..ee10c9987be494 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -134,6 +134,10 @@ ret void } + define void @unforwardable_avl() { + ret void + } + declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , , i64) #1 @@ -990,3 +994,43 @@ body: | %x:gpr = PseudoVMV_X_S undef $noreg, 6 PseudoBR %bb.1 ... +--- +name: unforwardable_avl +tracksRegLiveness: true +body: | + ; CHECK-LABEL: name: unforwardable_avl + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $x10, $v8m2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %avl:gprnox0 = COPY $x10 + ; CHECK-NEXT: %outvl:gprnox0 = PseudoVSETVLI %avl, 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.2(0x80000000) + ; CHECK-NEXT: liveins: $v8m2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: dead %avl:gprnox0 = ADDI %avl, 1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $v8m2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: renamable $v10m2 = PseudoVADD_VV_M2 undef renamable $v10m2, renamable $v8m2, renamable $v8m2, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI %outvl, 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: renamable $v8m2 = PseudoVADD_VV_M2 undef renamable $v8m2, killed renamable $v10m2, renamable $v8m2, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoRET implicit $v8m2 + bb.0: + liveins: $x10, $v8m2 + %avl:gprnox0 = COPY $x10 + %outvl:gprnox0 = PseudoVSETVLI %avl:gprnox0, 209, implicit-def dead $vl, implicit-def dead $vtype + + bb.1: + liveins: $v8m2 + %avl:gprnox0 = ADDI %avl:gprnox0, 1 + + bb.2: + liveins: $v8m2 + renamable $v10m2 = PseudoVADD_VV_M2 undef renamable $v10m2, renamable $v8m2, renamable $v8m2, -1, 5, 0 + renamable $v8m2 = PseudoVADD_VV_M2 undef renamable $v8m2, killed renamable $v10m2, killed renamable $v8m2, %outvl:gprnox0, 5, 0 + PseudoRET implicit $v8m2 diff --git a/llvm/test/Transforms/LoopStrengthReduce/RISCV/lsr-drop-solution.ll b/llvm/test/Transforms/LoopStrengthReduce/RISCV/lsr-drop-solution.ll index 7353acd7228cdc..1ab19081b7de59 100644 --- a/llvm/test/Transforms/LoopStrengthReduce/RISCV/lsr-drop-solution.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/RISCV/lsr-drop-solution.ll @@ -20,6 +20,7 @@ define ptr @foo(ptr %a0, ptr %a1, i64 %a2) { ; CHECK-NEXT: mv a3, a0 ; CHECK-NEXT: .LBB0_3: # %do.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vse8.v v8, (a3) ; CHECK-NEXT: add a3, a3, a4