diff --git a/libc/src/__support/block.h b/libc/src/__support/block.h index 6e7186f3224b9c2..96021b99587c87c 100644 --- a/libc/src/__support/block.h +++ b/libc/src/__support/block.h @@ -95,6 +95,26 @@ using cpp::optional; /// +----------+----------+--------------+ /// @endcode /// +/// As a space optimization, when a block is allocated, it consumes the prev +/// field of the following block: +/// +/// Block 1 (used): +/// +---------------------+--------------+ +/// | Header | Usable space | +/// +----------+----------+--------------+ +/// | prev | next | | +/// | 0......3 | 4......7 | 8........230 | +/// | 00000000 | 00000230 | | +/// +----------+----------+--------------+ +/// Block 2: +/// +---------------------+--------------+ +/// | B1 | Header | Usable space | +/// +----------+----------+--------------+ +/// | | next | | +/// | 0......3 | 4......7 | 8........827 | +/// | xxxxxxxx | 00000830 | f7f7....f7f7 | +/// +----------+----------+--------------+ +/// /// The next offset of a block matches the previous offset of its next block. /// The first block in a list is denoted by having a previous offset of `0`. /// @@ -110,9 +130,9 @@ using cpp::optional; template class Block { // Masks for the contents of the next_ field. - static constexpr size_t USED_MASK = 1 << 0; + static constexpr size_t PREV_FREE_MASK = 1 << 0; static constexpr size_t LAST_MASK = 1 << 1; - static constexpr size_t SIZE_MASK = ~(USED_MASK | LAST_MASK); + static constexpr size_t SIZE_MASK = ~(PREV_FREE_MASK | LAST_MASK); public: using offset_type = OffsetType; @@ -126,7 +146,8 @@ class Block { Block(const Block &other) = delete; Block &operator=(const Block &other) = delete; - /// Creates the first block for a given memory region. + /// Creates the first block for a given memory region, followed by a sentinel + /// last block. Returns the first block. static optional init(ByteSpan region); /// @returns A pointer to a `Block`, given a pointer to the start of the @@ -148,8 +169,22 @@ class Block { /// @returns The total size of the block in bytes, including the header. size_t outer_size() const { return next_ & SIZE_MASK; } + static size_t outer_size(size_t inner_size) { + // The usable region includes the prev_ field of the next block. + return inner_size - sizeof(prev_) + BLOCK_OVERHEAD; + } + /// @returns The number of usable bytes inside the block. - size_t inner_size() const { return outer_size() - BLOCK_OVERHEAD; } + size_t inner_size() const { + if (!next()) + return 0; + return inner_size(outer_size()); + } + + static size_t inner_size(size_t outer_size) { + // The usable region includes the prev_ field of the next block. + return outer_size - BLOCK_OVERHEAD + sizeof(prev_); + } /// @returns A pointer to the usable space inside this block. cpp::byte *usable_space() { @@ -167,8 +202,9 @@ class Block { /// Attempts to split this block. /// /// If successful, the block will have an inner size of `new_inner_size`, - /// rounded up to a `ALIGNMENT` boundary. The remaining space will be - /// returned as a new block. + /// rounded to ensure that the split point is on an ALIGNMENT boundary. The + /// remaining space will be returned as a new block. Note that the prev_ field + /// of the next block counts as part of the inner size of the returnd block. /// /// This method may fail if the remaining space is too small to hold a new /// block. If this method fails for any reason, the original block is @@ -182,40 +218,39 @@ class Block { /// is the last block. Block *next() const; - /// @returns The block immediately before this one, or a null pointer if this - /// is the first block. - Block *prev() const; + /// @returns The free block immediately before this one, otherwise nullptr. + Block *prev_free() const; - /// Indicates whether the block is in use. - /// - /// @returns `true` if the block is in use or `false` if not. - bool used() const { return next_ & USED_MASK; } + /// @returns Whether the block is unavailable for allocation. + bool used() const { return !next() || !next()->prev_free(); } /// Marks this block as in use. - void mark_used() { next_ |= USED_MASK; } + void mark_used() { + LIBC_ASSERT(next() && "last block is always considered used"); + next()->next_ &= ~PREV_FREE_MASK; + } /// Marks this block as free. - void mark_free() { next_ &= ~USED_MASK; } + void mark_free() { + LIBC_ASSERT(next() && "last block is always considered used"); + next()->next_ |= PREV_FREE_MASK; + // The next block's prev_ field becomes alive, as it is no longer part of + // this block's used space. + *new (&next()->prev_) offset_type = outer_size(); + } /// Marks this block as the last one in the chain. Makes next() return /// nullptr. - constexpr void mark_last() { next_ |= LAST_MASK; } - - /// @brief Checks if a block is valid. - /// - /// @returns `true` if and only if the following conditions are met: - /// * The block is aligned. - /// * The prev/next fields match with the previous and next blocks. - bool is_valid() const { - return check_status() == internal::BlockStatus::VALID; - } + void mark_last() { next_ |= LAST_MASK; } - constexpr Block(size_t prev_outer_size, size_t outer_size); + constexpr Block(size_t outer_size); bool is_usable_space_aligned(size_t alignment) const { return reinterpret_cast(usable_space()) % alignment == 0; } + /// @returns The new inner size of this block that would give the usable + /// space of the next block the given alignment. size_t padding_for_alignment(size_t alignment) const { if (is_usable_space_aligned(alignment)) return 0; @@ -235,9 +270,11 @@ class Block { // ^ // Alignment requirement // - uintptr_t start = reinterpret_cast(usable_space()); alignment = cpp::max(alignment, ALIGNMENT); - return align_up(start + BLOCK_OVERHEAD, alignment) - start; + uintptr_t start = reinterpret_cast(usable_space()); + uintptr_t next_usable_space = align_up(start + BLOCK_OVERHEAD, alignment); + uintptr_t next_block = next_usable_space - BLOCK_OVERHEAD; + return next_block - start + sizeof(prev_); } // Check that we can `allocate` a block with a given alignment and size from @@ -272,21 +309,16 @@ class Block { private: /// Construct a block to represent a span of bytes. Overwrites only enough /// memory for the block header; the rest of the span is left alone. - static Block *as_block(size_t prev_outer_size, ByteSpan bytes); - - /// Returns a `BlockStatus` that is either VALID or indicates the reason why - /// the block is invalid. - /// - /// If the block is invalid at multiple points, this function will only return - /// one of the reasons. - internal::BlockStatus check_status() const; + static Block *as_block(ByteSpan bytes); /// Like `split`, but assumes the caller has already checked to parameters to /// ensure the split will succeed. Block *split_impl(size_t new_inner_size); /// Offset from this block to the previous block. 0 if this is the first - /// block. + /// block. This field is only alive when the previous block is free; + /// otherwise, its memory is reused as part of the previous block's usable + /// space. offset_type prev_ = 0; /// Offset from this block to the next block. Valid even if this is the last @@ -296,14 +328,12 @@ class Block { /// Information about the current state of the block is stored in the two low /// order bits of the next_ value. These are guaranteed free by a minimum /// alignment (and thus, alignment of the size) of 4. The lowest bit is the - /// `used` flag, and the other bit is the `last` flag. + /// `prev_free` flag, and the other bit is the `last` flag. /// - /// * If the `used` flag is set, the block's usable memory has been allocated - /// and is being used. - /// * If the `last` flag is set, the block does not have a next block. - /// * If the `used` flag is set, the alignment represents the requested value - /// when the memory was allocated, which may be less strict than the actual - /// alignment. + /// * If the `prev_free` flag is set, the block isn't the first and the + /// previous block is free. + /// * If the `last` flag is set, the block is the sentinel last block. It is + /// summarily considered used and has no next block. } __attribute__((packed, aligned(cpp::max(kAlign, size_t{4})))); // Public template method implementations. @@ -332,29 +362,35 @@ Block::init(ByteSpan region) { return {}; region = result.value(); - if (region.size() < BLOCK_OVERHEAD) + // Two blocks are allocated: a free block and a sentinel last block. + if (region.size() < 2 * BLOCK_OVERHEAD) return {}; if (cpp::numeric_limits::max() < region.size()) return {}; - Block *block = as_block(0, region); - block->mark_last(); + Block *block = as_block(region.first(region.size() - BLOCK_OVERHEAD)); + Block *last = as_block(region.last(BLOCK_OVERHEAD)); + block->mark_free(); + last->mark_last(); return block; } template bool Block::can_allocate(size_t alignment, size_t size) const { - if (is_usable_space_aligned(alignment) && inner_size() >= size) - return true; // Size and alignment constraints met. - - // Either the alignment isn't met or we don't have enough size. - // If we don't meet alignment, we can always adjust such that we do meet the - // alignment. If we meet the alignment but just don't have enough size. This - // check will fail anyway. - size_t adjustment = padding_for_alignment(alignment); - return inner_size() >= size + adjustment; + if (inner_size() < size) + return false; + if (is_usable_space_aligned(alignment)) + return true; + + // Alignment isn't met, so a padding block is needed. Determine amount of + // inner_size() consumed by the padding block. + size_t padding_size = padding_for_alignment(alignment) - sizeof(prev_); + + // Check that there is room for the allocation in the following aligned block. + size_t aligned_inner_size = inner_size() - padding_size - BLOCK_OVERHEAD; + return size <= aligned_inner_size; } template @@ -369,26 +405,19 @@ Block::allocate(Block *block, size_t alignment, BlockInfo info{block, /*prev=*/nullptr, /*next=*/nullptr}; if (!info.block->is_usable_space_aligned(alignment)) { - size_t adjustment = info.block->padding_for_alignment(alignment); - LIBC_ASSERT((adjustment - BLOCK_OVERHEAD) % ALIGNMENT == 0 && - "The adjustment calculation should always return a new size " - "that's a multiple of ALIGNMENT"); - Block *original = info.block; optional maybe_aligned_block = - original->split(adjustment - BLOCK_OVERHEAD); + original->split(info.block->padding_for_alignment(alignment)); LIBC_ASSERT(maybe_aligned_block.has_value() && "This split should always result in a new block. The check in " "`can_allocate` ensures that we have enough space here to make " "two blocks."); - if (Block *prev = original->prev()) { - // If there is a block before this, we can merge the current one with the - // newly created one. + if (Block *prev = original->prev_free()) { + // If there is a free block before this, we can merge the current one with + // the newly created one. prev->merge_next(); } else { - // Otherwise, this was the very first block in the chain. Now we can make - // it the new first block. info.prev = original; } @@ -410,9 +439,14 @@ optional *> Block::split(size_t new_inner_size) { if (used()) return {}; + // The prev_ field of the next block is always available, so there is a + // minimum size to a block created through splitting. + if (new_inner_size < sizeof(prev_)) + return {}; size_t old_inner_size = inner_size(); - new_inner_size = align_up(new_inner_size, ALIGNMENT); + new_inner_size = + align_up(new_inner_size - sizeof(prev_), ALIGNMENT) + sizeof(prev_); if (old_inner_size < new_inner_size) return {}; @@ -425,41 +459,26 @@ Block::split(size_t new_inner_size) { template Block * Block::split_impl(size_t new_inner_size) { - size_t outer_size1 = new_inner_size + BLOCK_OVERHEAD; - bool has_next = next(); + size_t outer_size1 = outer_size(new_inner_size); + LIBC_ASSERT(outer_size1 % ALIGNMENT == 0 && "new size must be aligned"); ByteSpan new_region = region().subspan(outer_size1); - LIBC_ASSERT(!used() && "used blocks cannot be split"); - // The low order bits of outer_size1 should both be zero, and is the correct - // value for the flags is false. - next_ = outer_size1; - LIBC_ASSERT(!used() && next() && "incorrect first split flags"); - Block *new_block = as_block(outer_size1, new_region); - - if (has_next) { - // The two flags are both false, so next_ is a plain size. - LIBC_ASSERT(!new_block->used() && next() && "flags disrupt use of size"); - new_block->next()->prev_ = new_block->next_; - } else { - new_block->mark_last(); - } + next_ &= ~SIZE_MASK; + next_ |= outer_size1; + + Block *new_block = as_block(new_region); + mark_free(); // Free status for this block is now stored in new_block. + new_block->next()->prev_ = new_region.size(); return new_block; } template bool Block::merge_next() { - if (used() || !next() || next()->used()) + if (used() || next()->used()) return false; - - // Extend the size and copy the last() flag from the next block to this one. - next_ &= SIZE_MASK; - next_ += next()->next_; - - if (next()) { - // The two flags are both false, so next_ is a plain size. - LIBC_ASSERT(!used() && next() && "flags disrupt use of size"); - next()->prev_ = next_; - } - + size_t new_size = outer_size() + next()->outer_size(); + next_ &= ~SIZE_MASK; + next_ |= new_size; + next()->prev_ = new_size; return true; } @@ -472,39 +491,23 @@ Block *Block::next() const { } template -Block *Block::prev() const { - uintptr_t addr = (prev_ == 0) ? 0 : reinterpret_cast(this) - prev_; - return reinterpret_cast(addr); +Block *Block::prev_free() const { + if (!(next_ & PREV_FREE_MASK)) + return nullptr; + return reinterpret_cast(reinterpret_cast(this) - prev_); } // Private template method implementations. template -constexpr Block::Block(size_t prev_outer_size, - size_t outer_size) { - prev_ = prev_outer_size; +constexpr Block::Block(size_t outer_size) + : next_(outer_size) { LIBC_ASSERT(outer_size % ALIGNMENT == 0 && "block sizes must be aligned"); - next_ = outer_size; -} - -template -Block * -Block::as_block(size_t prev_outer_size, ByteSpan bytes) { - return ::new (bytes.data()) Block(prev_outer_size, bytes.size()); } template -internal::BlockStatus Block::check_status() const { - if (reinterpret_cast(this) % ALIGNMENT != 0) - return internal::BlockStatus::MISALIGNED; - - if (next() && (this >= next() || this != next()->prev())) - return internal::BlockStatus::NEXT_MISMATCHED; - - if (prev() && (this <= prev() || this != prev()->next())) - return internal::BlockStatus::PREV_MISMATCHED; - - return internal::BlockStatus::VALID; +Block *Block::as_block(ByteSpan bytes) { + return ::new (bytes.data()) Block(bytes.size()); } } // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/__support/freelist_heap.h b/libc/src/__support/freelist_heap.h index fed00d06716cfb0..6c860d039553abf 100644 --- a/libc/src/__support/freelist_heap.h +++ b/libc/src/__support/freelist_heap.h @@ -41,15 +41,6 @@ template class FreeListHeap { static constexpr size_t MIN_ALIGNMENT = cpp::max(BlockType::ALIGNMENT, alignof(max_align_t)); - struct HeapStats { - size_t total_bytes; - size_t bytes_allocated; - size_t cumulative_allocated; - size_t cumulative_freed; - size_t total_allocate_calls; - size_t total_free_calls; - }; - constexpr FreeListHeap() : begin_(&_end), end_(&__llvm_libc_heap_limit) {} constexpr FreeListHeap(span region) @@ -63,8 +54,6 @@ template class FreeListHeap { void *realloc(void *ptr, size_t size); void *calloc(size_t num, size_t size); - const HeapStats &heap_stats() const { return heap_stats_; } - cpp::span region() const { return {begin_, end_}; } private: @@ -82,7 +71,6 @@ template class FreeListHeap { cpp::byte *begin_; cpp::byte *end_; FreeListType freelist_{DEFAULT_BUCKETS}; - HeapStats heap_stats_{}; }; template @@ -100,7 +88,6 @@ class FreeListHeapBuffer : public FreeListHeap { template void FreeListHeap::init() { LIBC_ASSERT(!is_initialized_ && "duplicate initialization"); - heap_stats_.total_bytes = region().size(); auto result = BlockType::init(region()); BlockType *block = *result; freelist_.add_chunk(block_to_span(block)); @@ -139,10 +126,6 @@ void *FreeListHeap::allocate_impl(size_t alignment, size_t size) { chunk_block->mark_used(); - heap_stats_.bytes_allocated += size; - heap_stats_.cumulative_allocated += size; - heap_stats_.total_allocate_calls += 1; - return chunk_block->usable_space(); } @@ -171,35 +154,26 @@ template void FreeListHeap::free(void *ptr) { LIBC_ASSERT(is_valid_ptr(bytes) && "Invalid pointer"); BlockType *chunk_block = BlockType::from_usable_space(bytes); - - size_t size_freed = chunk_block->inner_size(); + LIBC_ASSERT(chunk_block->next() && "sentinel last block cannot be freed"); LIBC_ASSERT(chunk_block->used() && "The block is not in-use"); chunk_block->mark_free(); // Can we combine with the left or right blocks? - BlockType *prev = chunk_block->prev(); - BlockType *next = nullptr; + BlockType *prev_free = chunk_block->prev_free(); + BlockType *next = chunk_block->next(); - if (chunk_block->next()) - next = chunk_block->next(); - - if (prev != nullptr && !prev->used()) { + if (prev_free != nullptr) { // Remove from freelist and merge - freelist_.remove_chunk(block_to_span(prev)); - chunk_block = chunk_block->prev(); + freelist_.remove_chunk(block_to_span(prev_free)); + chunk_block = prev_free; chunk_block->merge_next(); } - - if (next != nullptr && !next->used()) { + if (!next->used()) { freelist_.remove_chunk(block_to_span(next)); chunk_block->merge_next(); } // Add back to the freelist freelist_.add_chunk(block_to_span(chunk_block)); - - heap_stats_.bytes_allocated -= size_freed; - heap_stats_.cumulative_freed += size_freed; - heap_stats_.total_free_calls += 1; } // Follows constract of the C standard realloc() function diff --git a/libc/test/src/__support/block_test.cpp b/libc/test/src/__support/block_test.cpp index ecce00b7926f9eb..62d7fae67bdc3d9 100644 --- a/libc/test/src/__support/block_test.cpp +++ b/libc/test/src/__support/block_test.cpp @@ -49,10 +49,20 @@ TEST_FOR_EACH_BLOCK_TYPE(CanCreateSingleAlignedBlock) { ASSERT_TRUE(result.has_value()); BlockType *block = *result; - EXPECT_EQ(block->outer_size(), kN); - EXPECT_EQ(block->inner_size(), kN - BlockType::BLOCK_OVERHEAD); - EXPECT_EQ(block->prev(), static_cast(nullptr)); - EXPECT_EQ(block->next(), static_cast(nullptr)); + BlockType *last = block->next(); + ASSERT_NE(last, static_cast(nullptr)); + constexpr size_t last_outer_size = BlockType::BLOCK_OVERHEAD; + EXPECT_EQ(last->outer_size(), last_outer_size); + EXPECT_EQ(last->prev_free(), block); + EXPECT_TRUE(last->used()); + + EXPECT_EQ(block->outer_size(), kN - last_outer_size); + constexpr size_t last_prev_field_size = + sizeof(typename BlockType::offset_type); + EXPECT_EQ(block->inner_size(), kN - last_outer_size - + BlockType::BLOCK_OVERHEAD + + last_prev_field_size); + EXPECT_EQ(block->prev_free(), static_cast(nullptr)); EXPECT_FALSE(block->used()); } @@ -88,26 +98,29 @@ TEST(LlvmLibcBlockTest, CannotCreateTooLargeBlock) { TEST_FOR_EACH_BLOCK_TYPE(CanSplitBlock) { constexpr size_t kN = 1024; - constexpr size_t kSplitN = 512; + constexpr size_t prev_field_size = sizeof(typename BlockType::offset_type); + // Give the split position a large alignment. + constexpr size_t kSplitN = 512 + prev_field_size; alignas(BlockType::ALIGNMENT) array bytes; auto result = BlockType::init(bytes); ASSERT_TRUE(result.has_value()); auto *block1 = *result; + size_t orig_size = block1->outer_size(); result = block1->split(kSplitN); ASSERT_TRUE(result.has_value()); - auto *block2 = *result; EXPECT_EQ(block1->inner_size(), kSplitN); - EXPECT_EQ(block1->outer_size(), kSplitN + BlockType::BLOCK_OVERHEAD); + EXPECT_EQ(block1->outer_size(), + kSplitN - prev_field_size + BlockType::BLOCK_OVERHEAD); - EXPECT_EQ(block2->outer_size(), kN - kSplitN - BlockType::BLOCK_OVERHEAD); + EXPECT_EQ(block2->outer_size(), orig_size - block1->outer_size()); EXPECT_FALSE(block2->used()); EXPECT_EQ(block1->next(), block2); - EXPECT_EQ(block2->prev(), block1); + EXPECT_EQ(block2->prev_free(), block1); } TEST_FOR_EACH_BLOCK_TYPE(CanSplitBlockUnaligned) { @@ -117,26 +130,27 @@ TEST_FOR_EACH_BLOCK_TYPE(CanSplitBlockUnaligned) { auto result = BlockType::init(bytes); ASSERT_TRUE(result.has_value()); BlockType *block1 = *result; + size_t orig_size = block1->outer_size(); - // We should split at sizeof(BlockType) + kSplitN bytes. Then - // we need to round that up to an alignof(BlockType) boundary. constexpr size_t kSplitN = 513; - uintptr_t split_addr = reinterpret_cast(block1) + kSplitN; + constexpr size_t prev_field_size = sizeof(typename BlockType::offset_type); + uintptr_t split_addr = + reinterpret_cast(block1) + (kSplitN - prev_field_size); + // Round split_addr up to a multiple of the alignment. split_addr += alignof(BlockType) - (split_addr % alignof(BlockType)); - uintptr_t split_len = split_addr - (uintptr_t)&bytes; + uintptr_t split_len = split_addr - (uintptr_t)&bytes + prev_field_size; result = block1->split(kSplitN); ASSERT_TRUE(result.has_value()); BlockType *block2 = *result; EXPECT_EQ(block1->inner_size(), split_len); - EXPECT_EQ(block1->outer_size(), split_len + BlockType::BLOCK_OVERHEAD); - EXPECT_EQ(block2->outer_size(), kN - block1->outer_size()); + EXPECT_EQ(block2->outer_size(), orig_size - block1->outer_size()); EXPECT_FALSE(block2->used()); EXPECT_EQ(block1->next(), block2); - EXPECT_EQ(block2->prev(), block1); + EXPECT_EQ(block2->prev_free(), block1); } TEST_FOR_EACH_BLOCK_TYPE(CanSplitMidBlock) { @@ -167,9 +181,9 @@ TEST_FOR_EACH_BLOCK_TYPE(CanSplitMidBlock) { BlockType *block3 = *result; EXPECT_EQ(block1->next(), block3); - EXPECT_EQ(block3->prev(), block1); + EXPECT_EQ(block3->prev_free(), block1); EXPECT_EQ(block3->next(), block2); - EXPECT_EQ(block2->prev(), block3); + EXPECT_EQ(block2->prev_free(), block3); } TEST_FOR_EACH_BLOCK_TYPE(CannotSplitTooSmallBlock) { @@ -187,7 +201,7 @@ TEST_FOR_EACH_BLOCK_TYPE(CannotSplitTooSmallBlock) { TEST_FOR_EACH_BLOCK_TYPE(CannotSplitBlockWithoutHeaderSpace) { constexpr size_t kN = 1024; - constexpr size_t kSplitN = kN - BlockType::BLOCK_OVERHEAD - 1; + constexpr size_t kSplitN = kN - 2 * BlockType::BLOCK_OVERHEAD - 1; alignas(BlockType::ALIGNMENT) array bytes; auto result = BlockType::init(bytes); @@ -224,8 +238,9 @@ TEST_FOR_EACH_BLOCK_TYPE(CannotMakeSecondBlockLargerInSplit) { ASSERT_FALSE(result.has_value()); } -TEST_FOR_EACH_BLOCK_TYPE(CanMakeZeroSizeFirstBlock) { - // This block does support splitting with zero payload size. +TEST_FOR_EACH_BLOCK_TYPE(CannotMakeZeroSizeFirstBlock) { + // This block doesn't support splitting with zero payload size, since the + // prev_ field of the next block is always available. constexpr size_t kN = 1024; alignas(BlockType::ALIGNMENT) array bytes; @@ -234,13 +249,28 @@ TEST_FOR_EACH_BLOCK_TYPE(CanMakeZeroSizeFirstBlock) { BlockType *block = *result; result = block->split(0); + EXPECT_FALSE(result.has_value()); +} + +TEST_FOR_EACH_BLOCK_TYPE(CanMakeMinimalSizeFirstBlock) { + // This block does support splitting with minimal payload size. + constexpr size_t kN = 1024; + constexpr size_t minimal_size = sizeof(typename BlockType::offset_type); + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block = *result; + + result = block->split(minimal_size); ASSERT_TRUE(result.has_value()); - EXPECT_EQ(block->inner_size(), static_cast(0)); + EXPECT_EQ(block->inner_size(), minimal_size); } -TEST_FOR_EACH_BLOCK_TYPE(CanMakeZeroSizeSecondBlock) { - // Likewise, the split block can be zero-width. +TEST_FOR_EACH_BLOCK_TYPE(CanMakeMinimalSizeSecondBlock) { + // Likewise, the split block can be minimal-width. constexpr size_t kN = 1024; + constexpr size_t minimal_size = sizeof(typename BlockType::offset_type); alignas(BlockType::ALIGNMENT) array bytes; auto result = BlockType::init(bytes); @@ -251,7 +281,7 @@ TEST_FOR_EACH_BLOCK_TYPE(CanMakeZeroSizeSecondBlock) { ASSERT_TRUE(result.has_value()); BlockType *block2 = *result; - EXPECT_EQ(block2->inner_size(), static_cast(0)); + EXPECT_EQ(block2->inner_size(), minimal_size); } TEST_FOR_EACH_BLOCK_TYPE(CanMarkBlockUsed) { @@ -261,12 +291,11 @@ TEST_FOR_EACH_BLOCK_TYPE(CanMarkBlockUsed) { auto result = BlockType::init(bytes); ASSERT_TRUE(result.has_value()); BlockType *block = *result; + size_t orig_size = block->outer_size(); block->mark_used(); EXPECT_TRUE(block->used()); - - // Size should be unaffected. - EXPECT_EQ(block->outer_size(), kN); + EXPECT_EQ(block->outer_size(), orig_size); block->mark_free(); EXPECT_FALSE(block->used()); @@ -290,13 +319,16 @@ TEST_FOR_EACH_BLOCK_TYPE(CanMergeWithNextBlock) { // Do the three way merge from "CanSplitMidBlock", and let's // merge block 3 and 2 constexpr size_t kN = 1024; - constexpr size_t kSplit1 = 512; - constexpr size_t kSplit2 = 256; + // Give the split positions large alignments. + constexpr size_t prev_field_size = sizeof(typename BlockType::offset_type); + constexpr size_t kSplit1 = 512 + prev_field_size; + constexpr size_t kSplit2 = 256 + prev_field_size; alignas(BlockType::ALIGNMENT) array bytes; auto result = BlockType::init(bytes); ASSERT_TRUE(result.has_value()); BlockType *block1 = *result; + size_t orig_size = block1->outer_size(); result = block1->split(kSplit1); ASSERT_TRUE(result.has_value()); @@ -308,9 +340,9 @@ TEST_FOR_EACH_BLOCK_TYPE(CanMergeWithNextBlock) { EXPECT_TRUE(block3->merge_next()); EXPECT_EQ(block1->next(), block3); - EXPECT_EQ(block3->prev(), block1); + EXPECT_EQ(block3->prev_free(), block1); EXPECT_EQ(block1->inner_size(), kSplit2); - EXPECT_EQ(block3->outer_size(), kN - block1->outer_size()); + EXPECT_EQ(block3->outer_size(), orig_size - block1->outer_size()); } TEST_FOR_EACH_BLOCK_TYPE(CannotMergeWithFirstOrLastBlock) { @@ -347,67 +379,6 @@ TEST_FOR_EACH_BLOCK_TYPE(CannotMergeUsedBlock) { EXPECT_FALSE(block->merge_next()); } -TEST_FOR_EACH_BLOCK_TYPE(CanCheckValidBlock) { - constexpr size_t kN = 1024; - constexpr size_t kSplit1 = 512; - constexpr size_t kSplit2 = 256; - - alignas(BlockType::ALIGNMENT) array bytes; - auto result = BlockType::init(bytes); - ASSERT_TRUE(result.has_value()); - BlockType *block1 = *result; - - result = block1->split(kSplit1); - ASSERT_TRUE(result.has_value()); - BlockType *block2 = *result; - - result = block2->split(kSplit2); - ASSERT_TRUE(result.has_value()); - BlockType *block3 = *result; - - EXPECT_TRUE(block1->is_valid()); - EXPECT_TRUE(block2->is_valid()); - EXPECT_TRUE(block3->is_valid()); -} - -TEST_FOR_EACH_BLOCK_TYPE(CanCheckInvalidBlock) { - constexpr size_t kN = 1024; - constexpr size_t kSplit1 = 128; - constexpr size_t kSplit2 = 384; - constexpr size_t kSplit3 = 256; - - array bytes{}; - auto result = BlockType::init(bytes); - ASSERT_TRUE(result.has_value()); - BlockType *block1 = *result; - - result = block1->split(kSplit1); - ASSERT_TRUE(result.has_value()); - BlockType *block2 = *result; - - result = block2->split(kSplit2); - ASSERT_TRUE(result.has_value()); - BlockType *block3 = *result; - - result = block3->split(kSplit3); - ASSERT_TRUE(result.has_value()); - - // Corrupt a Block header. - // This must not touch memory outside the original region, or the test may - // (correctly) abort when run with address sanitizer. - // To remain as agostic to the internals of `Block` as possible, the test - // copies a smaller block's header to a larger block. - EXPECT_TRUE(block1->is_valid()); - EXPECT_TRUE(block2->is_valid()); - EXPECT_TRUE(block3->is_valid()); - auto *src = reinterpret_cast(block1); - auto *dst = reinterpret_cast(block2); - LIBC_NAMESPACE::memcpy(dst, src, sizeof(BlockType)); - EXPECT_FALSE(block1->is_valid()); - EXPECT_FALSE(block2->is_valid()); - EXPECT_FALSE(block3->is_valid()); -} - TEST_FOR_EACH_BLOCK_TYPE(CanGetBlockFromUsableSpace) { constexpr size_t kN = 1024; @@ -435,10 +406,10 @@ TEST_FOR_EACH_BLOCK_TYPE(CanGetConstBlockFromUsableSpace) { } TEST_FOR_EACH_BLOCK_TYPE(CanAllocate) { - constexpr size_t kN = 1024; + constexpr size_t kN = 1024 + BlockType::BLOCK_OVERHEAD; // Ensure we can allocate everything up to the block size within this block. - for (size_t i = 0; i < kN - BlockType::BLOCK_OVERHEAD; ++i) { + for (size_t i = 0; i < kN - 2 * BlockType::BLOCK_OVERHEAD; ++i) { alignas(BlockType::ALIGNMENT) array bytes{}; auto result = BlockType::init(bytes); ASSERT_TRUE(result.has_value()); @@ -458,12 +429,12 @@ TEST_FOR_EACH_BLOCK_TYPE(CanAllocate) { ASSERT_TRUE(result.has_value()); BlockType *block = *result; - // Given a block of size kN (assuming it's also a power of two), we should be - // able to allocate a block within it that's aligned to half its size. This is + // Given a block of size N (assuming it's also a power of two), we should be + // able to allocate a block within it that's aligned to N/2. This is // because regardless of where the buffer is located, we can always find a // starting location within it that meets this alignment. - EXPECT_TRUE(block->can_allocate(kN / 2, 1)); - auto info = BlockType::allocate(block, kN / 2, 1); + EXPECT_TRUE(block->can_allocate(block->outer_size() / 2, 1)); + auto info = BlockType::allocate(block, block->outer_size() / 2, 1); EXPECT_NE(info.block, static_cast(nullptr)); } @@ -477,7 +448,8 @@ TEST_FOR_EACH_BLOCK_TYPE(AllocateAlreadyAligned) { // This should result in no new blocks. constexpr size_t kAlignment = BlockType::ALIGNMENT; - constexpr size_t kExpectedSize = BlockType::ALIGNMENT; + constexpr size_t prev_field_size = sizeof(typename BlockType::offset_type); + constexpr size_t kExpectedSize = BlockType::ALIGNMENT + prev_field_size; EXPECT_TRUE(block->can_allocate(kAlignment, kExpectedSize)); auto [aligned_block, prev, next] = @@ -495,7 +467,7 @@ TEST_FOR_EACH_BLOCK_TYPE(AllocateAlreadyAligned) { EXPECT_NE(next, static_cast(nullptr)); EXPECT_EQ(aligned_block->next(), next); EXPECT_EQ(reinterpret_cast(next) + next->outer_size(), - bytes.data() + bytes.size()); + bytes.data() + bytes.size() - BlockType::BLOCK_OVERHEAD); } TEST_FOR_EACH_BLOCK_TYPE(AllocateNeedsAlignment) { @@ -508,7 +480,7 @@ TEST_FOR_EACH_BLOCK_TYPE(AllocateNeedsAlignment) { // Ensure first the usable_data is only aligned to the block alignment. ASSERT_EQ(block->usable_space(), bytes.data() + BlockType::BLOCK_OVERHEAD); - ASSERT_EQ(block->prev(), static_cast(nullptr)); + ASSERT_EQ(block->prev_free(), static_cast(nullptr)); // Now pick an alignment such that the usable space is not already aligned to // it. We want to explicitly test that the block will split into one before @@ -525,7 +497,7 @@ TEST_FOR_EACH_BLOCK_TYPE(AllocateNeedsAlignment) { // Check the previous block was created appropriately. Since this block is the // first block, a new one should be made before this. EXPECT_NE(prev, static_cast(nullptr)); - EXPECT_EQ(aligned_block->prev(), prev); + EXPECT_EQ(aligned_block->prev_free(), prev); EXPECT_EQ(prev->next(), aligned_block); EXPECT_EQ(prev->outer_size(), reinterpret_cast(aligned_block) - reinterpret_cast(prev)); @@ -537,7 +509,8 @@ TEST_FOR_EACH_BLOCK_TYPE(AllocateNeedsAlignment) { // Check the next block. EXPECT_NE(next, static_cast(nullptr)); EXPECT_EQ(aligned_block->next(), next); - EXPECT_EQ(reinterpret_cast(next) + next->outer_size(), &*bytes.end()); + EXPECT_EQ(reinterpret_cast(next) + next->outer_size(), + bytes.data() + bytes.size() - BlockType::BLOCK_OVERHEAD); } TEST_FOR_EACH_BLOCK_TYPE(PreviousBlockMergedIfNotFirst) { @@ -552,7 +525,7 @@ TEST_FOR_EACH_BLOCK_TYPE(PreviousBlockMergedIfNotFirst) { auto result2 = block->split(kN / 2); ASSERT_TRUE(result2.has_value()); BlockType *newblock = *result2; - ASSERT_EQ(newblock->prev(), block); + ASSERT_EQ(newblock->prev_free(), block); size_t old_prev_size = block->outer_size(); // Now pick an alignment such that the usable space is not already aligned to @@ -571,7 +544,7 @@ TEST_FOR_EACH_BLOCK_TYPE(PreviousBlockMergedIfNotFirst) { // Now there should be no new previous block. Instead, the padding we did // create should be merged into the original previous block. EXPECT_EQ(prev, static_cast(nullptr)); - EXPECT_EQ(aligned_block->prev(), block); + EXPECT_EQ(aligned_block->prev_free(), block); EXPECT_EQ(block->next(), aligned_block); EXPECT_GT(block->outer_size(), old_prev_size); } @@ -587,10 +560,11 @@ TEST_FOR_EACH_BLOCK_TYPE(CanRemergeBlockAllocations) { auto result = BlockType::init(bytes); ASSERT_TRUE(result.has_value()); BlockType *block = *result; + BlockType *last = block->next(); // Ensure first the usable_data is only aligned to the block alignment. ASSERT_EQ(block->usable_space(), bytes.data() + BlockType::BLOCK_OVERHEAD); - ASSERT_EQ(block->prev(), static_cast(nullptr)); + ASSERT_EQ(block->prev_free(), static_cast(nullptr)); // Now pick an alignment such that the usable space is not already aligned to // it. We want to explicitly test that the block will split into one before @@ -606,20 +580,20 @@ TEST_FOR_EACH_BLOCK_TYPE(CanRemergeBlockAllocations) { // Check we have the appropriate blocks. ASSERT_NE(prev, static_cast(nullptr)); - ASSERT_EQ(aligned_block->prev(), prev); - EXPECT_NE(next, static_cast(nullptr)); + ASSERT_EQ(aligned_block->prev_free(), prev); EXPECT_NE(next, static_cast(nullptr)); EXPECT_EQ(aligned_block->next(), next); - EXPECT_EQ(next->next(), static_cast(nullptr)); + EXPECT_EQ(next->next(), last); // Now check for successful merges. EXPECT_TRUE(prev->merge_next()); EXPECT_EQ(prev->next(), next); EXPECT_TRUE(prev->merge_next()); - EXPECT_EQ(prev->next(), static_cast(nullptr)); + EXPECT_EQ(prev->next(), last); // We should have the original buffer. EXPECT_EQ(reinterpret_cast(prev), &*bytes.begin()); - EXPECT_EQ(prev->outer_size(), bytes.size()); - EXPECT_EQ(reinterpret_cast(prev) + prev->outer_size(), &*bytes.end()); + EXPECT_EQ(prev->outer_size(), bytes.size() - BlockType::BLOCK_OVERHEAD); + EXPECT_EQ(reinterpret_cast(prev) + prev->outer_size(), + &*bytes.end() - BlockType::BLOCK_OVERHEAD); } diff --git a/libc/test/src/__support/freelist_heap_test.cpp b/libc/test/src/__support/freelist_heap_test.cpp index fc4348aed6b56a3..973900dfdf56eaa 100644 --- a/libc/test/src/__support/freelist_heap_test.cpp +++ b/libc/test/src/__support/freelist_heap_test.cpp @@ -99,7 +99,7 @@ TEST(LlvmLibcFreeListHeap, ReturnsNullWhenFull) { // Use aligned_allocate so we don't need to worry about ensuring the `buf` // being aligned to max_align_t. EXPECT_NE(allocator.aligned_allocate( - 1, N - FreeListHeap<>::BlockType::BLOCK_OVERHEAD), + 1, N - 2 * FreeListHeap<>::BlockType::BLOCK_OVERHEAD), static_cast(nullptr)); EXPECT_EQ(allocator.allocate(1), static_cast(nullptr)); } diff --git a/libc/test/src/__support/freelist_malloc_test.cpp b/libc/test/src/__support/freelist_malloc_test.cpp index 66a923f77821834..9cbdec89f6576e9 100644 --- a/libc/test/src/__support/freelist_malloc_test.cpp +++ b/libc/test/src/__support/freelist_malloc_test.cpp @@ -14,59 +14,42 @@ #include "test/UnitTest/Test.h" using LIBC_NAMESPACE::freelist_heap; +using LIBC_NAMESPACE::FreeListHeap; using LIBC_NAMESPACE::FreeListHeapBuffer; -TEST(LlvmLibcFreeListMalloc, MallocStats) { +TEST(LlvmLibcFreeListMalloc, Malloc) { constexpr size_t kAllocSize = 256; constexpr size_t kCallocNum = 4; constexpr size_t kCallocSize = 64; - void *ptr1 = LIBC_NAMESPACE::malloc(kAllocSize); - - const auto &freelist_heap_stats = freelist_heap->heap_stats(); + typedef FreeListHeap<>::BlockType Block; - ASSERT_NE(ptr1, static_cast(nullptr)); - EXPECT_EQ(freelist_heap_stats.bytes_allocated, kAllocSize); - EXPECT_EQ(freelist_heap_stats.cumulative_allocated, kAllocSize); - EXPECT_EQ(freelist_heap_stats.cumulative_freed, size_t(0)); + void *ptr1 = LIBC_NAMESPACE::malloc(kAllocSize); + auto *block = Block::from_usable_space(ptr1); + EXPECT_GE(block->inner_size(), kAllocSize); LIBC_NAMESPACE::free(ptr1); - EXPECT_EQ(freelist_heap_stats.bytes_allocated, size_t(0)); - EXPECT_EQ(freelist_heap_stats.cumulative_allocated, kAllocSize); - EXPECT_EQ(freelist_heap_stats.cumulative_freed, kAllocSize); + ASSERT_NE(block->next(), static_cast(nullptr)); + ASSERT_EQ(block->next()->next(), static_cast(nullptr)); + size_t heap_size = block->inner_size(); void *ptr2 = LIBC_NAMESPACE::calloc(kCallocNum, kCallocSize); - ASSERT_NE(ptr2, static_cast(nullptr)); - EXPECT_EQ(freelist_heap_stats.bytes_allocated, kCallocNum * kCallocSize); - EXPECT_EQ(freelist_heap_stats.cumulative_allocated, - kAllocSize + kCallocNum * kCallocSize); - EXPECT_EQ(freelist_heap_stats.cumulative_freed, kAllocSize); + ASSERT_EQ(ptr2, ptr1); + EXPECT_GE(block->inner_size(), kCallocNum * kCallocSize); - for (size_t i = 0; i < kCallocNum * kCallocSize; ++i) { + for (size_t i = 0; i < kCallocNum * kCallocSize; ++i) EXPECT_EQ(reinterpret_cast(ptr2)[i], uint8_t(0)); - } LIBC_NAMESPACE::free(ptr2); - EXPECT_EQ(freelist_heap_stats.bytes_allocated, size_t(0)); - EXPECT_EQ(freelist_heap_stats.cumulative_allocated, - kAllocSize + kCallocNum * kCallocSize); - EXPECT_EQ(freelist_heap_stats.cumulative_freed, - kAllocSize + kCallocNum * kCallocSize); + EXPECT_EQ(block->inner_size(), heap_size); constexpr size_t ALIGN = kAllocSize; void *ptr3 = LIBC_NAMESPACE::aligned_alloc(ALIGN, kAllocSize); EXPECT_NE(ptr3, static_cast(nullptr)); EXPECT_EQ(reinterpret_cast(ptr3) % ALIGN, size_t(0)); - EXPECT_EQ(freelist_heap_stats.bytes_allocated, kAllocSize); - EXPECT_EQ(freelist_heap_stats.cumulative_allocated, - kAllocSize + kCallocNum * kCallocSize + kAllocSize); - EXPECT_EQ(freelist_heap_stats.cumulative_freed, - kAllocSize + kCallocNum * kCallocSize); + auto *aligned_block = reinterpret_cast(ptr3); + EXPECT_GE(aligned_block->inner_size(), kAllocSize); LIBC_NAMESPACE::free(ptr3); - EXPECT_EQ(freelist_heap_stats.bytes_allocated, size_t(0)); - EXPECT_EQ(freelist_heap_stats.cumulative_allocated, - kAllocSize + kCallocNum * kCallocSize + kAllocSize); - EXPECT_EQ(freelist_heap_stats.cumulative_freed, - kAllocSize + kCallocNum * kCallocSize + kAllocSize); + EXPECT_EQ(block->inner_size(), heap_size); }