Skip to content

Commit

Permalink
[AMDGPU] Enable unaligned scratch accesses
Browse files Browse the repository at this point in the history
This allows us to emit wide generic and scratch memory accesses when we do not
have alignment information. In cases where accesses happen to be properly
aligned or where generic accesses do not go to scratch memory, this improves
performance of the generated code by a factor of up to 16x and reduces code
size, especially when lowering memcpy and memmove intrinsics.

Also: Make the use of the FeatureUnalignedScratchAccess feature more
consistent: Code has already assumed that unaligned accesses with the
specialized flat scratch instructions are allowed independent of
FeatureUnalignedScratchAccess at some places. This patch always uses this
interpretation.

Part of SWDEV-455845.
  • Loading branch information
ritter-x2a committed Oct 1, 2024
1 parent 16ba126 commit 49300b3
Show file tree
Hide file tree
Showing 12 changed files with 2,971 additions and 19,422 deletions.
18 changes: 9 additions & 9 deletions llvm/lib/Target/AMDGPU/AMDGPU.td
Original file line number Diff line number Diff line change
Expand Up @@ -1171,9 +1171,9 @@ def FeatureGFX9 : GCNSubtargetFeatureGeneration<"GFX9",
FeatureAddNoCarryInsts, FeatureGFX8Insts, FeatureGFX7GFX8GFX9Insts,
FeatureScalarFlatScratchInsts, FeatureScalarAtomics, FeatureR128A16,
FeatureA16, FeatureSMemTimeInst, FeatureFastDenormalF32, FeatureSupportsXNACK,
FeatureUnalignedBufferAccess, FeatureUnalignedDSAccess,
FeatureNegativeScratchOffsetBug, FeatureGWS, FeatureDefaultComponentZero,
FeatureVmemWriteVgprInOrder
FeatureUnalignedBufferAccess, FeatureUnalignedScratchAccess,
FeatureUnalignedDSAccess, FeatureNegativeScratchOffsetBug, FeatureGWS,
FeatureDefaultComponentZero,FeatureVmemWriteVgprInOrder
]
>;

Expand All @@ -1192,9 +1192,9 @@ def FeatureGFX10 : GCNSubtargetFeatureGeneration<"GFX10",
FeatureVOP3Literal, FeatureDPP8, FeatureExtendedImageInsts,
FeatureNoDataDepHazard, FeaturePkFmacF16Inst,
FeatureA16, FeatureSMemTimeInst, FeatureFastDenormalF32, FeatureG16,
FeatureUnalignedBufferAccess, FeatureUnalignedDSAccess, FeatureImageInsts,
FeatureGDS, FeatureGWS, FeatureDefaultComponentZero,
FeatureMaxHardClauseLength63,
FeatureUnalignedBufferAccess, FeatureUnalignedScratchAccess,
FeatureUnalignedDSAccess, FeatureImageInsts, FeatureGDS, FeatureGWS,
FeatureDefaultComponentZero, FeatureMaxHardClauseLength63,
FeatureAtomicFMinFMaxF32GlobalInsts, FeatureAtomicFMinFMaxF64GlobalInsts,
FeatureAtomicFMinFMaxF32FlatInsts, FeatureAtomicFMinFMaxF64FlatInsts,
FeatureVmemWriteVgprInOrder
Expand All @@ -1216,9 +1216,9 @@ def FeatureGFX11 : GCNSubtargetFeatureGeneration<"GFX11",
FeatureVOP3Literal, FeatureDPP8, FeatureExtendedImageInsts,
FeatureNoDataDepHazard, FeaturePkFmacF16Inst,
FeatureA16, FeatureFastDenormalF32, FeatureG16,
FeatureUnalignedBufferAccess, FeatureUnalignedDSAccess, FeatureGDS,
FeatureGWS, FeatureDefaultComponentZero,
FeatureMaxHardClauseLength32,
FeatureUnalignedBufferAccess, FeatureUnalignedScratchAccess,
FeatureUnalignedDSAccess, FeatureGDS, FeatureGWS,
FeatureDefaultComponentZero, FeatureMaxHardClauseLength32,
FeatureAtomicFMinFMaxF32GlobalInsts, FeatureAtomicFMinFMaxF32FlatInsts,
FeatureVmemWriteVgprInOrder
]
Expand Down
5 changes: 3 additions & 2 deletions llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -387,8 +387,9 @@ bool GCNTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
// them later if they may access private memory. We don't have enough context
// here, and legalization can handle it.
if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) {
return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) &&
ChainSizeInBytes <= ST->getMaxPrivateElementSize();
return (Alignment >= 4 || ST->hasUnalignedScratchAccessEnabled() ||
ST->enableFlatScratch()) &&
ChainSizeInBytes <= ST->getMaxPrivateElementSize();
}
return true;
}
Expand Down
4 changes: 4 additions & 0 deletions llvm/lib/Target/AMDGPU/GCNSubtarget.h
Original file line number Diff line number Diff line change
Expand Up @@ -590,6 +590,10 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
return UnalignedScratchAccess;
}

bool hasUnalignedScratchAccessEnabled() const {
return UnalignedScratchAccess && UnalignedAccessMode;
}

bool hasUnalignedAccessMode() const {
return UnalignedAccessMode;
}
Expand Down
17 changes: 4 additions & 13 deletions llvm/lib/Target/AMDGPU/SIISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1832,26 +1832,17 @@ bool SITargetLowering::allowsMisalignedMemoryAccessesImpl(
Subtarget->hasUnalignedDSAccessEnabled();
}

if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) {
bool AlignedBy4 = Alignment >= Align(4);
if (IsFast)
*IsFast = AlignedBy4;

return AlignedBy4 ||
Subtarget->enableFlatScratch() ||
Subtarget->hasUnalignedScratchAccess();
}

// FIXME: We have to be conservative here and assume that flat operations
// will access scratch. If we had access to the IR function, then we
// could determine if any private memory was used in the function.
if (AddrSpace == AMDGPUAS::FLAT_ADDRESS &&
!Subtarget->hasUnalignedScratchAccess()) {
if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
AddrSpace == AMDGPUAS::FLAT_ADDRESS) {
bool AlignedBy4 = Alignment >= Align(4);
if (IsFast)
*IsFast = AlignedBy4;

return AlignedBy4;
return AlignedBy4 || Subtarget->enableFlatScratch() ||
Subtarget->hasUnalignedScratchAccessEnabled();
}

// So long as they are correct, wide global memory operations perform better
Expand Down
5,370 changes: 465 additions & 4,905 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-flat.mir

Large diffs are not rendered by default.

2,696 changes: 490 additions & 2,206 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-private.mir

Large diffs are not rendered by default.

23 changes: 11 additions & 12 deletions llvm/test/CodeGen/AMDGPU/chain-hi-to-lo.ll
Original file line number Diff line number Diff line change
Expand Up @@ -703,12 +703,12 @@ define <2 x i16> @chain_hi_to_lo_private_other_dep(ptr addrspace(5) %ptr) {
; FLATSCR-LABEL: chain_hi_to_lo_private_other_dep:
; FLATSCR: ; %bb.0: ; %bb
; FLATSCR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; FLATSCR-NEXT: scratch_load_short_d16_hi v1, v0, off
; FLATSCR-NEXT: scratch_load_dword v0, v0, off
; FLATSCR-NEXT: s_mov_b32 s0, 0x7060302
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: v_lshlrev_b32_e32 v1, 16, v0
; FLATSCR-NEXT: v_pk_add_u16 v1, v1, 12 op_sel_hi:[1,0]
; FLATSCR-NEXT: scratch_load_short_d16 v1, v0, off offset:2
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: v_mov_b32_e32 v0, v1
; FLATSCR-NEXT: v_perm_b32 v0, v1, v0, s0
; FLATSCR-NEXT: s_setpc_b64 s[30:31]
;
; GFX10_DEFAULT-LABEL: chain_hi_to_lo_private_other_dep:
Expand All @@ -725,23 +725,22 @@ define <2 x i16> @chain_hi_to_lo_private_other_dep(ptr addrspace(5) %ptr) {
; FLATSCR_GFX10-LABEL: chain_hi_to_lo_private_other_dep:
; FLATSCR_GFX10: ; %bb.0: ; %bb
; FLATSCR_GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; FLATSCR_GFX10-NEXT: scratch_load_short_d16_hi v1, v0, off
; FLATSCR_GFX10-NEXT: scratch_load_dword v0, v0, off
; FLATSCR_GFX10-NEXT: s_waitcnt vmcnt(0)
; FLATSCR_GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v0
; FLATSCR_GFX10-NEXT: v_pk_add_u16 v1, v1, 12 op_sel_hi:[1,0]
; FLATSCR_GFX10-NEXT: scratch_load_short_d16 v1, v0, off offset:2
; FLATSCR_GFX10-NEXT: s_waitcnt vmcnt(0)
; FLATSCR_GFX10-NEXT: v_mov_b32_e32 v0, v1
; FLATSCR_GFX10-NEXT: v_perm_b32 v0, v1, v0, 0x7060302
; FLATSCR_GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: chain_hi_to_lo_private_other_dep:
; GFX11: ; %bb.0: ; %bb
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: scratch_load_d16_hi_b16 v1, v0, off
; GFX11-NEXT: scratch_load_b32 v0, v0, off
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_pk_add_u16 v1, v1, 12 op_sel_hi:[1,0]
; GFX11-NEXT: scratch_load_d16_b16 v1, v0, off offset:2
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v0, v1
; GFX11-NEXT: v_perm_b32 v0, v1, v0, 0x7060302
; GFX11-NEXT: s_setpc_b64 s[30:31]
bb:
%gep_lo = getelementptr inbounds i16, ptr addrspace(5) %ptr, i64 1
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/AMDGPU/flat-address-space.ll
Original file line number Diff line number Diff line change
Expand Up @@ -123,10 +123,8 @@ define amdgpu_kernel void @zextload_flat_i16(ptr addrspace(1) noalias %out, ptr
}

; GCN-LABEL: flat_scratch_unaligned_load:
; GCN: flat_load_{{ubyte|u8}}
; GCN: flat_load_{{ubyte|u8}}
; GCN: flat_load_{{ubyte|u8}}
; GCN: flat_load_{{ubyte|u8}}
; GFX9: flat_load_dword
; GFX10PLUS: flat_load_{{dword|b32}}
define amdgpu_kernel void @flat_scratch_unaligned_load() {
%scratch = alloca i32, addrspace(5)
%fptr = addrspacecast ptr addrspace(5) %scratch to ptr
Expand All @@ -136,10 +134,8 @@ define amdgpu_kernel void @flat_scratch_unaligned_load() {
}

; GCN-LABEL: flat_scratch_unaligned_store:
; GCN: flat_store_{{byte|b8}}
; GCN: flat_store_{{byte|b8}}
; GCN: flat_store_{{byte|b8}}
; GCN: flat_store_{{byte|b8}}
; GFX9: flat_store_dword
; GFX10PLUS: flat_store_{{dword|b32}}
define amdgpu_kernel void @flat_scratch_unaligned_store() {
%scratch = alloca i32, addrspace(5)
%fptr = addrspacecast ptr addrspace(5) %scratch to ptr
Expand Down
98 changes: 14 additions & 84 deletions llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
Original file line number Diff line number Diff line change
Expand Up @@ -16,47 +16,18 @@ define void @issue63986(i64 %0, i64 %idxprom) {
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: v_mov_b32_e32 v9, s7
; CHECK-NEXT: v_mov_b32_e32 v8, s6
; CHECK-NEXT: flat_load_ubyte v10, v[8:9] offset:5
; CHECK-NEXT: flat_load_ubyte v11, v[8:9] offset:6
; CHECK-NEXT: flat_load_ubyte v12, v[8:9] offset:7
; CHECK-NEXT: flat_load_ubyte v13, v[8:9] offset:3
; CHECK-NEXT: flat_load_ubyte v14, v[8:9] offset:2
; CHECK-NEXT: flat_load_ubyte v15, v[8:9] offset:1
; CHECK-NEXT: flat_load_ubyte v16, v[8:9]
; CHECK-NEXT: flat_load_ubyte v17, v[8:9] offset:4
; CHECK-NEXT: flat_load_ubyte v18, v[8:9] offset:13
; CHECK-NEXT: flat_load_ubyte v19, v[8:9] offset:14
; CHECK-NEXT: flat_load_ubyte v20, v[8:9] offset:15
; CHECK-NEXT: flat_load_ubyte v21, v[8:9] offset:11
; CHECK-NEXT: flat_load_ubyte v22, v[8:9] offset:10
; CHECK-NEXT: flat_load_ubyte v23, v[8:9] offset:9
; CHECK-NEXT: flat_load_ubyte v24, v[8:9] offset:8
; CHECK-NEXT: flat_load_ubyte v25, v[8:9] offset:12
; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[8:9]
; CHECK-NEXT: s_add_u32 s4, s4, 1
; CHECK-NEXT: s_addc_u32 s5, s5, 0
; CHECK-NEXT: v_add_co_u32_e32 v8, vcc, s6, v6
; CHECK-NEXT: v_mov_b32_e32 v13, s7
; CHECK-NEXT: v_add_co_u32_e32 v12, vcc, s6, v6
; CHECK-NEXT: v_cmp_ge_u64_e64 s[8:9], s[4:5], 2
; CHECK-NEXT: v_addc_co_u32_e32 v9, vcc, v9, v7, vcc
; CHECK-NEXT: v_addc_co_u32_e32 v13, vcc, v13, v7, vcc
; CHECK-NEXT: s_add_u32 s6, s6, 16
; CHECK-NEXT: s_addc_u32 s7, s7, 0
; CHECK-NEXT: s_and_b64 vcc, exec, s[8:9]
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[8:9], v13 offset:3
; CHECK-NEXT: flat_store_byte v[8:9], v14 offset:2
; CHECK-NEXT: flat_store_byte v[8:9], v15 offset:1
; CHECK-NEXT: flat_store_byte v[8:9], v16
; CHECK-NEXT: flat_store_byte v[8:9], v12 offset:7
; CHECK-NEXT: flat_store_byte v[8:9], v11 offset:6
; CHECK-NEXT: flat_store_byte v[8:9], v10 offset:5
; CHECK-NEXT: flat_store_byte v[8:9], v17 offset:4
; CHECK-NEXT: flat_store_byte v[8:9], v21 offset:11
; CHECK-NEXT: flat_store_byte v[8:9], v22 offset:10
; CHECK-NEXT: flat_store_byte v[8:9], v23 offset:9
; CHECK-NEXT: flat_store_byte v[8:9], v24 offset:8
; CHECK-NEXT: flat_store_byte v[8:9], v20 offset:15
; CHECK-NEXT: flat_store_byte v[8:9], v19 offset:14
; CHECK-NEXT: flat_store_byte v[8:9], v18 offset:13
; CHECK-NEXT: flat_store_byte v[8:9], v25 offset:12
; CHECK-NEXT: flat_store_dwordx4 v[12:13], v[8:11]
; CHECK-NEXT: s_cbranch_vccz .LBB0_2
; CHECK-NEXT: ; %bb.3: ; %loop-memcpy-residual-header
; CHECK-NEXT: s_mov_b32 s4, 0
Expand Down Expand Up @@ -128,47 +99,18 @@ define void @issue63986(i64 %0, i64 %idxprom) {
; CHECK-NEXT: ; => This Inner Loop Header: Depth=2
; CHECK-NEXT: v_mov_b32_e32 v10, s10
; CHECK-NEXT: v_mov_b32_e32 v11, s11
; CHECK-NEXT: flat_load_ubyte v12, v[10:11] offset:5
; CHECK-NEXT: flat_load_ubyte v13, v[10:11] offset:6
; CHECK-NEXT: flat_load_ubyte v14, v[10:11] offset:7
; CHECK-NEXT: flat_load_ubyte v15, v[10:11] offset:3
; CHECK-NEXT: flat_load_ubyte v16, v[10:11] offset:2
; CHECK-NEXT: flat_load_ubyte v17, v[10:11] offset:1
; CHECK-NEXT: flat_load_ubyte v18, v[10:11]
; CHECK-NEXT: flat_load_ubyte v19, v[10:11] offset:4
; CHECK-NEXT: flat_load_ubyte v20, v[10:11] offset:13
; CHECK-NEXT: flat_load_ubyte v21, v[10:11] offset:14
; CHECK-NEXT: flat_load_ubyte v22, v[10:11] offset:15
; CHECK-NEXT: flat_load_ubyte v23, v[10:11] offset:11
; CHECK-NEXT: flat_load_ubyte v24, v[10:11] offset:10
; CHECK-NEXT: flat_load_ubyte v25, v[10:11] offset:9
; CHECK-NEXT: flat_load_ubyte v26, v[10:11] offset:8
; CHECK-NEXT: flat_load_ubyte v27, v[10:11] offset:12
; CHECK-NEXT: flat_load_dwordx4 v[10:13], v[10:11]
; CHECK-NEXT: v_mov_b32_e32 v15, s11
; CHECK-NEXT: s_add_u32 s14, s14, 1
; CHECK-NEXT: v_add_co_u32_e32 v10, vcc, s10, v2
; CHECK-NEXT: v_addc_co_u32_e32 v11, vcc, v11, v3, vcc
; CHECK-NEXT: v_add_co_u32_e32 v14, vcc, s10, v2
; CHECK-NEXT: v_addc_co_u32_e32 v15, vcc, v15, v3, vcc
; CHECK-NEXT: s_addc_u32 s15, s15, 0
; CHECK-NEXT: s_add_u32 s10, s10, 16
; CHECK-NEXT: v_cmp_ge_u64_e32 vcc, s[14:15], v[4:5]
; CHECK-NEXT: s_addc_u32 s11, s11, 0
; CHECK-NEXT: s_or_b64 s[12:13], vcc, s[12:13]
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[10:11], v15 offset:3
; CHECK-NEXT: flat_store_byte v[10:11], v16 offset:2
; CHECK-NEXT: flat_store_byte v[10:11], v17 offset:1
; CHECK-NEXT: flat_store_byte v[10:11], v18
; CHECK-NEXT: flat_store_byte v[10:11], v14 offset:7
; CHECK-NEXT: flat_store_byte v[10:11], v13 offset:6
; CHECK-NEXT: flat_store_byte v[10:11], v12 offset:5
; CHECK-NEXT: flat_store_byte v[10:11], v19 offset:4
; CHECK-NEXT: flat_store_byte v[10:11], v23 offset:11
; CHECK-NEXT: flat_store_byte v[10:11], v24 offset:10
; CHECK-NEXT: flat_store_byte v[10:11], v25 offset:9
; CHECK-NEXT: flat_store_byte v[10:11], v26 offset:8
; CHECK-NEXT: flat_store_byte v[10:11], v22 offset:15
; CHECK-NEXT: flat_store_byte v[10:11], v21 offset:14
; CHECK-NEXT: flat_store_byte v[10:11], v20 offset:13
; CHECK-NEXT: flat_store_byte v[10:11], v27 offset:12
; CHECK-NEXT: flat_store_dwordx4 v[14:15], v[10:13]
; CHECK-NEXT: s_andn2_b64 exec, exec, s[12:13]
; CHECK-NEXT: s_cbranch_execnz .LBB0_14
; CHECK-NEXT: .LBB0_15: ; %Flow20
Expand Down Expand Up @@ -251,23 +193,11 @@ define void @issue63986_reduced_expanded(i64 %idxprom) {
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: .LBB1_8: ; %post-loop-memcpy-expansion
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: v_mov_b32_e32 v3, v2
; CHECK-NEXT: v_mov_b32_e32 v4, v2
; CHECK-NEXT: v_mov_b32_e32 v5, v2
; CHECK-NEXT: s_and_b64 vcc, exec, 0
; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:3
; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:2
; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:1
; CHECK-NEXT: flat_store_byte v[0:1], v2
; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:7
; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:6
; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:5
; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:4
; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:11
; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:10
; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:9
; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:8
; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:15
; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:14
; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:13
; CHECK-NEXT: flat_store_byte v[0:1], v2 offset:12
; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
; CHECK-NEXT: .LBB1_9: ; %loop-memcpy-expansion2
; CHECK-NEXT: s_mov_b64 vcc, vcc
; CHECK-NEXT: s_cbranch_vccz .LBB1_9
Expand Down
Loading

0 comments on commit 49300b3

Please sign in to comment.