Skip to content

Commit

Permalink
[GR-18214] Compacting garbage collection (non-default).
Browse files Browse the repository at this point in the history
PullRequest: graal/15068
  • Loading branch information
peter-hofer committed May 23, 2024
2 parents c6302a0 + 42cd023 commit 1db13d4
Show file tree
Hide file tree
Showing 36 changed files with 2,257 additions and 374 deletions.
1 change: 1 addition & 0 deletions substratevm/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ This changelog summarizes major changes to GraalVM Native Image.
* (GR-43837) `--report-unsupported-elements-at-runtime` is now enabled by default and the option is deprecated.
* (GR-53359) Provide the `.debug_gdb_scripts` section that triggers auto-loading of `svmhelpers.py` in GDB. Remove single and double quotes from `ClassLoader.nameAndId` in the debuginfo.
* (GR-47365) Include dynamic proxy metadata in the reflection metadata with the syntax `"type": { "proxy": [<interface list>] }`. This allows members of proxy classes to be accessed reflectively. `proxy-config.json` is now deprecated but will still be honored.
* (GR-18214) In-place compacting garbage collection for the Serial GC old generation with `-H:+CompactingOldGen`.

## GraalVM for JDK 22 (Internal Version 24.0.0)
* (GR-48304) Red Hat added support for the JFR event ThreadAllocationStatistics.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -155,11 +155,12 @@ public String getName() {
public boolean shouldCollectCompletely(boolean followingIncrementalCollection) { // should_{attempt_scavenge,full_GC}
guaranteeSizeParametersInitialized();

if (!followingIncrementalCollection && shouldCollectYoungGenSeparately(true)) {
if (!followingIncrementalCollection && shouldCollectYoungGenSeparately(!SerialGCOptions.useCompactingOldGen())) {
/*
* Default to always doing an incremental collection first because we expect most of the
* objects in the young generation to be garbage, and we can reuse their leftover chunks
* for copying the live objects in the old generation with fewer allocations.
* With a copying collector, default to always doing an incremental collection first
* because we expect most of the objects in the young generation to be garbage, and we
* can reuse their leftover chunks for copying the live objects in the old generation
* with fewer allocations. With a compacting collector, there is no benefit.
*/
return false;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
*/
package com.oracle.svm.core.genscavenge;

import org.graalvm.nativeimage.c.struct.RawField;
import org.graalvm.nativeimage.c.struct.RawStructure;
import org.graalvm.word.Pointer;
import org.graalvm.word.UnsignedWord;
Expand All @@ -33,6 +34,7 @@
import com.oracle.svm.core.Uninterruptible;
import com.oracle.svm.core.genscavenge.remset.RememberedSet;
import com.oracle.svm.core.heap.ObjectVisitor;
import com.oracle.svm.core.heap.RestrictHeapAccess;
import com.oracle.svm.core.util.PointerUtils;

import jdk.graal.compiler.api.directives.GraalDirectives;
Expand All @@ -50,17 +52,14 @@
* Most allocation within a AlignedHeapChunk is via fast-path allocation snippets, but a slow-path
* allocation method is available.
* <p>
* Objects in a AlignedHeapChunk have to be promoted by copying from their current HeapChunk to a
* destination HeapChunk.
* <p>
* An AlignedHeapChunk is laid out:
* An AlignedHeapChunk is laid out as follows:
*
* <pre>
* +===============+-------+--------+----------------------+
* | AlignedHeader | Card | First | Object ... |
* | Fields | Table | Object | |
* | | | Table | |
* +===============+-------+--------+----------------------+
* +===============+-------+--------+-----------------+-----------------+
* | AlignedHeader | Card | First | Initial Object | Object ... |
* | Fields | Table | Object | Move Info (only | |
* | | | Table | Compacting GC) | |
* +===============+-------+--------+-----------------+-----------------+
* </pre>
*
* The size of both the CardTable and the FirstObjectTable depends on the used {@link RememberedSet}
Expand All @@ -78,15 +77,22 @@ private AlignedHeapChunk() { // all static
*/
@RawStructure
public interface AlignedHeader extends HeapChunk.Header<AlignedHeader> {
@RawField
boolean getShouldSweepInsteadOfCompact();

@RawField
void setShouldSweepInsteadOfCompact(boolean value);
}

@Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true)
public static void initialize(AlignedHeader chunk, UnsignedWord chunkSize) {
assert chunkSize.equal(HeapParameters.getAlignedHeapChunkSize()) : "expecting all aligned chunks to be the same size";
HeapChunk.initialize(chunk, AlignedHeapChunk.getObjectsStart(chunk), chunkSize);
chunk.setShouldSweepInsteadOfCompact(false);
}

public static void reset(AlignedHeader chunk) {
HeapChunk.initialize(chunk, AlignedHeapChunk.getObjectsStart(chunk), HeapChunk.getEndOffset(chunk));
initialize(chunk, HeapChunk.getEndOffset(chunk));
}

@Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true)
Expand All @@ -98,6 +104,10 @@ public static Pointer getObjectsEnd(AlignedHeader that) {
return HeapChunk.getEndPointer(that);
}

public static boolean isEmpty(AlignedHeader that) {
return HeapChunk.getTopOffset(that).equal(getObjectsStartOffset());
}

/** Allocate uninitialized memory within this AlignedHeapChunk. */
@Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true)
static Pointer allocateMemory(AlignedHeader that, UnsignedWord size) {
Expand Down Expand Up @@ -146,4 +156,20 @@ static boolean walkObjectsFromInline(AlignedHeader that, Pointer start, ObjectVi
public static UnsignedWord getObjectsStartOffset() {
return RememberedSet.get().getHeaderSizeOfAlignedChunk();
}

@Fold
public static UnsignedWord getUsableSizeForObjects() {
return HeapParameters.getAlignedHeapChunkSize().subtract(getObjectsStartOffset());
}

public interface Visitor {
/**
* Visit an {@link AlignedHeapChunk}.
*
* @param chunk The {@link AlignedHeapChunk} to be visited.
* @return {@code true} if visiting should continue, {@code false} if visiting should stop.
*/
@RestrictHeapAccess(access = RestrictHeapAccess.Access.NO_ALLOCATION, reason = "Must not allocate while visiting the heap.")
boolean visitChunk(AlignedHeapChunk.AlignedHeader chunk);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ public interface CollectionPolicy {
static String getInitialPolicyName() {
if (SubstrateOptions.UseEpsilonGC.getValue()) {
return "NeverCollect";
} else if (!SubstrateOptions.useRememberedSet()) {
} else if (!SerialGCOptions.useRememberedSet()) {
return "OnlyCompletely";
}
String name = SerialGCOptions.InitialCollectionPolicy.getValue();
Expand Down
Loading

0 comments on commit 1db13d4

Please sign in to comment.