Skip to content

Commit

Permalink
[scudo] Added LRU eviction policy to secondary cache.
Browse files Browse the repository at this point in the history
The logic for emptying the cache now follows an LRU eviction policy.
When the cache is full on any given free operation, the oldest entry
in the cache is evicted, and the memory associated with that cache
entry is unmapped.

Finding empty cache entries is now a constant operation with the use
of a stack of available cache entries.

Through the LRU structure, the cache retrieval algorithm now only iterates
through valid entries of the cache. Furthermore, the retrieval algorithm will
first search cache entries that have not been decommitted (i.e. madvise() has
not been called on their corresponding memory chunks) to reduce the likelihood of
returning a memory chunk to the user that would induce a page fault.
  • Loading branch information
JoshuaMBa committed Jul 23, 2024
1 parent d08527e commit ad5619c
Showing 1 changed file with 141 additions and 46 deletions.
187 changes: 141 additions & 46 deletions compiler-rt/lib/scudo/standalone/secondary.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include "stats.h"
#include "string_utils.h"
#include "thread_annotations.h"
#include "vector.h"

namespace scudo {

Expand Down Expand Up @@ -73,12 +74,18 @@ static inline void unmap(LargeBlock::Header *H) {
}

namespace {

struct CachedBlock {
static constexpr u16 CacheIndexMax = UINT16_MAX;
static constexpr u16 InvalidEntry = CacheIndexMax;

uptr CommitBase = 0;
uptr CommitSize = 0;
uptr BlockBegin = 0;
MemMapT MemMap = {};
u64 Time = 0;
u16 Next = 0;
u16 Prev = 0;

bool isValid() { return CommitBase != 0; }

Expand Down Expand Up @@ -188,10 +195,11 @@ template <typename Config> class MapAllocatorCache {
Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
"(%zu.%02zu%%)\n",
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
for (CachedBlock Entry : Entries) {
if (!Entry.isValid())
continue;
Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
Str->append("Cache Entry Info (Most Recent -> Least Recent):\n");

for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
CachedBlock &Entry = Entries[I];
Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
"BlockSize: %zu %s\n",
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
Expand All @@ -202,6 +210,10 @@ template <typename Config> class MapAllocatorCache {
static_assert(Config::getDefaultMaxEntriesCount() <=
Config::getEntriesArraySize(),
"");
// Ensure the cache entry array size fits in the LRU list Next and Prev
// index fields
static_assert(Config::getEntriesArraySize() <= CachedBlock::CacheIndexMax,
"Cache entry array is too large to be indexed.");

void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
DCHECK_EQ(EntriesCount, 0U);
Expand All @@ -213,23 +225,33 @@ template <typename Config> class MapAllocatorCache {
if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));

// The cache is initially empty
LRUHead = CachedBlock::InvalidEntry;
LRUTail = CachedBlock::InvalidEntry;

// Available entries will be retrieved starting from the beginning of the
// Entries array
AvailableHead = 0;
for (u32 I = 0; I < Config::getEntriesArraySize() - 1; I++)
Entries[I].Next = static_cast<u16>(I + 1);

Entries[Config::getEntriesArraySize() - 1].Next = CachedBlock::InvalidEntry;
}

void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
if (!canCache(H->CommitSize))
return unmap(H);

bool EntryCached = false;
bool EmptyCache = false;
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
const u64 Time = getMonotonicTimeFast();
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
u64 Time;
CachedBlock Entry;

Entry.CommitBase = H->CommitBase;
Entry.CommitSize = H->CommitSize;
Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
Entry.MemMap = H->MemMap;
Entry.Time = Time;
Entry.Time = UINT64_MAX;
if (useMemoryTagging<Config>(Options)) {
if (Interval == 0 && !SCUDO_FUCHSIA) {
// Release the memory and make it inaccessible at the same time by
Expand All @@ -243,17 +265,32 @@ template <typename Config> class MapAllocatorCache {
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
MAP_NOACCESS);
}
} else if (Interval == 0) {
Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
Entry.Time = 0;
}

// Usually only one entry will be evicted from the cache.
// Only in the rare event that the cache shrinks in real-time
// due to a decrease in the configurable value MaxEntriesCount
// will more than one cache entry be evicted.
// The vector is used to save the MemMaps of evicted entries so
// that the unmap call can be performed outside the lock
Vector<MemMapT, 1U> EvictionMemMaps;

do {
ScopedLock L(Mutex);

// Time must be computed under the lock to ensure
// that the LRU cache remains sorted with respect to
// time in a multithreaded environment
Time = getMonotonicTimeFast();
if (Entry.Time != 0)
Entry.Time = Time;

if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
// If we get here then memory tagging was disabled in between when we
// read Options and when we locked Mutex. We can't insert our entry into
// the quarantine or the cache because the permissions would be wrong so
// just unmap it.
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
break;
}
if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) {
Expand All @@ -269,30 +306,27 @@ template <typename Config> class MapAllocatorCache {
OldestTime = Entry.Time;
Entry = PrevEntry;
}
if (EntriesCount >= MaxCount) {
if (IsFullEvents++ == 4U)
EmptyCache = true;
} else {
for (u32 I = 0; I < MaxCount; I++) {
if (Entries[I].isValid())
continue;
if (I != 0)
Entries[I] = Entries[0];
Entries[0] = Entry;
EntriesCount++;
if (OldestTime == 0)
OldestTime = Entry.Time;
EntryCached = true;
break;
}

// All excess entries are evicted from the cache
while (needToEvict()) {
// Save MemMaps of evicted entries to perform unmap outside of lock
EvictionMemMaps.push_back(Entries[LRUTail].MemMap);
remove(LRUTail);
}

insert(Entry);

if (OldestTime == 0)
OldestTime = Entry.Time;
} while (0);
if (EmptyCache)
empty();
else if (Interval >= 0)

for (MemMapT &EvictMemMap : EvictionMemMaps)
EvictMemMap.unmap(EvictMemMap.getBase(), EvictMemMap.getCapacity());

if (Interval >= 0) {
// TODO: Add ReleaseToOS logic to LRU algorithm
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
if (!EntryCached)
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
}
}

bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
Expand All @@ -312,9 +346,8 @@ template <typename Config> class MapAllocatorCache {
return false;
u32 OptimalFitIndex = 0;
uptr MinDiff = UINTPTR_MAX;
for (u32 I = 0; I < MaxCount; I++) {
if (!Entries[I].isValid())
continue;
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
I = Entries[I].Next) {
const uptr CommitBase = Entries[I].CommitBase;
const uptr CommitSize = Entries[I].CommitSize;
const uptr AllocPos =
Expand Down Expand Up @@ -347,8 +380,7 @@ template <typename Config> class MapAllocatorCache {
}
if (Found) {
Entry = Entries[OptimalFitIndex];
Entries[OptimalFitIndex].invalidate();
EntriesCount--;
remove(OptimalFitIndex);
SuccessfulRetrieves++;
}
}
Expand Down Expand Up @@ -418,11 +450,9 @@ template <typename Config> class MapAllocatorCache {
}
}
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
for (u32 I = 0; I < MaxCount; I++) {
if (Entries[I].isValid()) {
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
Entries[I].CommitSize, 0);
}
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
Entries[I].CommitSize, 0);
}
QuarantinePos = -1U;
}
Expand All @@ -434,6 +464,66 @@ template <typename Config> class MapAllocatorCache {
void unmapTestOnly() { empty(); }

private:
bool needToEvict() REQUIRES(Mutex) {
return (EntriesCount >= atomic_load_relaxed(&MaxEntriesCount));
}

void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
DCHECK_LT(EntriesCount, atomic_load_relaxed(&MaxEntriesCount));

// Cache should be populated with valid entries when not empty
DCHECK_NE(AvailableHead, CachedBlock::InvalidEntry);

u32 FreeIndex = AvailableHead;
AvailableHead = Entries[AvailableHead].Next;

if (EntriesCount == 0) {
LRUTail = static_cast<u16>(FreeIndex);
} else {
// Check list order
if (EntriesCount > 1)
DCHECK_GE(Entries[LRUHead].Time, Entries[Entries[LRUHead].Next].Time);
Entries[LRUHead].Prev = static_cast<u16>(FreeIndex);
}

Entries[FreeIndex] = Entry;
Entries[FreeIndex].Next = LRUHead;
Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
LRUHead = static_cast<u16>(FreeIndex);
EntriesCount++;

// Availability stack should not have available entries when all entries
// are in use
if (EntriesCount == Config::getEntriesArraySize())
DCHECK_EQ(AvailableHead, CachedBlock::InvalidEntry);
}

void remove(uptr I) REQUIRES(Mutex) {
DCHECK(Entries[I].isValid());

Entries[I].invalidate();

if (I == LRUHead)
LRUHead = Entries[I].Next;
else
Entries[Entries[I].Prev].Next = Entries[I].Next;

if (I == LRUTail)
LRUTail = Entries[I].Prev;
else
Entries[Entries[I].Next].Prev = Entries[I].Prev;

Entries[I].Next = AvailableHead;
AvailableHead = static_cast<u16>(I);
EntriesCount--;

// Cache should not have valid entries when not empty
if (EntriesCount == 0) {
DCHECK_EQ(LRUHead, CachedBlock::InvalidEntry);
DCHECK_EQ(LRUTail, CachedBlock::InvalidEntry);
}
}

void empty() {
MemMapT MapInfo[Config::getEntriesArraySize()];
uptr N = 0;
Expand All @@ -443,11 +533,10 @@ template <typename Config> class MapAllocatorCache {
if (!Entries[I].isValid())
continue;
MapInfo[N] = Entries[I].MemMap;
Entries[I].invalidate();
remove(I);
N++;
}
EntriesCount = 0;
IsFullEvents = 0;
}
for (uptr I = 0; I < N; I++) {
MemMapT &MemMap = MapInfo[I];
Expand Down Expand Up @@ -484,14 +573,20 @@ template <typename Config> class MapAllocatorCache {
atomic_u32 MaxEntriesCount = {};
atomic_uptr MaxEntrySize = {};
u64 OldestTime GUARDED_BY(Mutex) = 0;
u32 IsFullEvents GUARDED_BY(Mutex) = 0;
atomic_s32 ReleaseToOsIntervalMs = {};
u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;

CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
Quarantine GUARDED_BY(Mutex) = {};

// The LRUHead of the cache is the most recently used cache entry
u16 LRUHead GUARDED_BY(Mutex) = 0;
// The LRUTail of the cache is the least recently used cache entry
u16 LRUTail GUARDED_BY(Mutex) = 0;
// The AvailableHead is the top of the stack of available entries
u16 AvailableHead GUARDED_BY(Mutex) = 0;
};

template <typename Config> class MapAllocator {
Expand Down

0 comments on commit ad5619c

Please sign in to comment.