Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[msan] Implement support for Arm NEON vst{2,3,4} instructions #99360

Merged
merged 28 commits into from
Jul 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
b098c85
[msan] Implement support for Arm NEON vst{2,3,4} instructions
thurstond Jul 3, 2024
508d6ee
Update code and test
thurstond Jul 16, 2024
f83255f
Update instrumentation and test
thurstond Jul 16, 2024
e400ab8
Simplify code and reformat
thurstond Jul 16, 2024
c4f0efb
Remove unnecessary braces
thurstond Jul 16, 2024
c925b0c
Skip last two operands
thurstond Jul 17, 2024
7d47057
Origin tracking
thurstond Jul 17, 2024
3bfaba7
Add preconditions for interleave functions
thurstond Jul 17, 2024
5fc60f0
Comment
thurstond Jul 17, 2024
6d1918a
Wording
thurstond Jul 17, 2024
7efd4bb
Remove track-origins from test
thurstond Jul 17, 2024
04b3180
Use arg_size() instead of (getNumOperands() - 1)
thurstond Jul 17, 2024
abe762b
Revert setOriginForNaryOp
thurstond Jul 17, 2024
5e3af71
Move pointer check earlier
thurstond Jul 17, 2024
0843c6f
Change from getOperand to getArgOperand
thurstond Jul 17, 2024
797dd3d
Formatting
thurstond Jul 17, 2024
f731c68
Clone instruction, as suggested by Vitaly
thurstond Jul 17, 2024
5ec1cd3
Formatting
thurstond Jul 17, 2024
53d5155
Fix origin painting (contains debugging output)
thurstond Jul 18, 2024
bab878c
Update origin
thurstond Jul 18, 2024
eb0939b
Formatting
thurstond Jul 18, 2024
ec56547
Formatting
thurstond Jul 18, 2024
64fb500
Add comment about origin tracking imprecision
thurstond Jul 18, 2024
2d0abae
Paint origin with kMinOriginAlignment
thurstond Jul 18, 2024
af86d86
Relax alignment back to 1 byte because it is not guaranteed to be more
thurstond Jul 18, 2024
6903e38
Update test with relaxed alignment
thurstond Jul 18, 2024
178527d
OriginPtr is aligned by getShadowOriginPtr
vitalybuka Jul 18, 2024
3c1b169
Update neon_vst_origins.ll
vitalybuka Jul 18, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
80 changes: 80 additions & 0 deletions llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,7 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Module.h"
Expand Down Expand Up @@ -2498,6 +2499,15 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
MSV->setOrigin(I, Origin);
}
}

/// Store the current combined value at the specified origin
/// location.
void DoneAndStoreOrigin(TypeSize TS, Value *OriginPtr) {
if (MSV->MS.TrackOrigins) {
assert(Origin);
MSV->paintOrigin(IRB, Origin, OriginPtr, TS, kMinOriginAlignment);
}
}
};

using ShadowAndOriginCombiner = Combiner<true>;
Expand Down Expand Up @@ -3865,6 +3875,69 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
setOriginForNaryOp(I);
}

/// Handle Arm NEON vector store intrinsics (vst{2,3,4}).
///
/// Arm NEON vector store intrinsics have the output address (pointer) as the
/// last argument, with the initial arguments being the inputs. They return
/// void.
void handleNEONVectorStoreIntrinsic(IntrinsicInst &I) {
IRBuilder<> IRB(&I);

// Don't use getNumOperands() because it includes the callee
int numArgOperands = I.arg_size();
assert(numArgOperands >= 1);

// The last arg operand is the output
Value *Addr = I.getArgOperand(numArgOperands - 1);
assert(Addr->getType()->isPointerTy());

if (ClCheckAccessAddress)
insertShadowCheck(Addr, &I);

// Every arg operand, other than the last one, is an input vector
IntrinsicInst *ShadowI = cast<IntrinsicInst>(I.clone());
for (int i = 0; i < numArgOperands - 1; i++) {
vitalybuka marked this conversation as resolved.
Show resolved Hide resolved
assert(isa<FixedVectorType>(I.getArgOperand(i)->getType()));
ShadowI->setArgOperand(i, getShadow(&I, i));
}

// MSan's GetShadowTy assumes the LHS is the type we want the shadow for
// e.g., for:
// [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
// we know the type of the output (and its shadow) is <16 x i8>.
//
// Arm NEON VST is unusual because the last argument is the output address:
// define void @st2_16b(<16 x i8> %A, <16 x i8> %B, ptr %P) {
// call void @llvm.aarch64.neon.st2.v16i8.p0
// (<16 x i8> [[A]], <16 x i8> [[B]], ptr [[P]])
// and we have no type information about P's operand. We must manually
// compute the type (<16 x i8> x 2).
FixedVectorType *OutputVectorTy = FixedVectorType::get(
cast<FixedVectorType>(I.getArgOperand(0)->getType())->getElementType(),
cast<FixedVectorType>(I.getArgOperand(0)->getType())->getNumElements() *
(numArgOperands - 1));
Type *ShadowTy = getShadowTy(OutputVectorTy);
Value *ShadowPtr, *OriginPtr;
// AArch64 NEON does not need alignment (unless OS requires it)
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, ShadowTy, Align(1), /*isStore*/ true);
ShadowI->setArgOperand(numArgOperands - 1, ShadowPtr);
ShadowI->insertAfter(&I);

if (MS.TrackOrigins) {
// TODO: if we modelled the vst* instruction more precisely, we could
// more accurately track the origins (e.g., if both inputs are
// uninitialized for vst2, we currently blame the second input, even
// though part of the output depends only on the first input).
OriginCombiner OC(this, IRB);
for (int i = 0; i < numArgOperands - 1; i++)
OC.Add(I.getArgOperand(i));

const DataLayout &DL = F.getDataLayout();
OC.DoneAndStoreOrigin(DL.getTypeStoreSize(OutputVectorTy), OriginPtr);
}
}

void visitIntrinsicInst(IntrinsicInst &I) {
switch (I.getIntrinsicID()) {
case Intrinsic::uadd_with_overflow:
Expand Down Expand Up @@ -4204,6 +4277,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
setOrigin(&I, getCleanOrigin());
break;

case Intrinsic::aarch64_neon_st2:
case Intrinsic::aarch64_neon_st3:
case Intrinsic::aarch64_neon_st4: {
handleNEONVectorStoreIntrinsic(I);
break;
}

default:
if (!handleUnknownIntrinsic(I))
visitInstruction(I);
Expand Down
Loading