Skip to content

Commit

Permalink
[GR-55542] [GR-55588] [GR-34673] [GR-38054] Improve serial GC heap ve…
Browse files Browse the repository at this point in the history
…rification and fix minor GC issues.

PullRequest: graal/18336
  • Loading branch information
christianhaeubl committed Aug 1, 2024
2 parents a560360 + 0555a4d commit 89e5586
Show file tree
Hide file tree
Showing 21 changed files with 317 additions and 250 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,9 @@ public static void initialize(AlignedHeader chunk, UnsignedWord chunkSize) {
}

public static void reset(AlignedHeader chunk) {
initialize(chunk, HeapChunk.getEndOffset(chunk));
long alignedChunkSize = SerialAndEpsilonGCOptions.AlignedHeapChunkSize.getValue();
assert HeapChunk.getEndOffset(chunk).rawValue() == alignedChunkSize;
initialize(chunk, WordFactory.unsigned(alignedChunkSize));
}

@Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ public boolean hasLastIncrementalCollectionOverflowedSurvivors() {
return lastIncrementalCollectionOverflowedSurvivors;
}

void beforeCollection(boolean completeCollection) {
void beforeCollectOnce(boolean completeCollection) {
/* Gather some space statistics. */
HeapImpl heap = HeapImpl.getHeapImpl();
YoungGeneration youngGen = heap.getYoungGeneration();
Expand Down Expand Up @@ -149,33 +149,7 @@ void onSurvivorOverflowed() {
lastIncrementalCollectionOverflowedSurvivors = true;
}

void afterCollection(boolean completeCollection, Timer collectionTimer) {
if (completeCollection) {
afterCompleteCollection(collectionTimer);
} else {
afterIncrementalCollection(collectionTimer);
}
}

private void afterIncrementalCollection(Timer collectionTimer) {
/*
* Aggregating collection information is needed because any given collection policy may not
* be called for all collections, but may want to make decisions based on the aggregate
* values.
*/
incrementalCollectionCount += 1;
afterCollectionCommon();
lastIncrementalCollectionPromotedChunkBytes = oldChunkBytesAfter.subtract(oldChunkBytesBefore);
incrementalCollectionTotalNanos += collectionTimer.getMeasuredNanos();
}

private void afterCompleteCollection(Timer collectionTimer) {
completeCollectionCount += 1;
afterCollectionCommon();
completeCollectionTotalNanos += collectionTimer.getMeasuredNanos();
}

private void afterCollectionCommon() {
void afterCollectOnce(boolean completeCollection) {
HeapImpl heap = HeapImpl.getHeapImpl();
YoungGeneration youngGen = heap.getYoungGeneration();
OldGeneration oldGen = heap.getOldGeneration();
Expand Down Expand Up @@ -207,5 +181,24 @@ private void afterCollectionCommon() {
totalCollectedObjectBytes = totalCollectedObjectBytes.add(collectedObjectBytes);
}
}

if (!completeCollection) {
/*
* Aggregating collection information is needed because a collection policy might not be
* called for all collections, but may want to make decisions based on the aggregate
* values.
*/
lastIncrementalCollectionPromotedChunkBytes = oldChunkBytesAfter.subtract(oldChunkBytesBefore);
}
}

void updateCollectionCountAndTime(boolean completeCollection, long collectionTime) {
if (completeCollection) {
completeCollectionCount += 1;
completeCollectionTotalNanos += collectionTime;
} else {
incrementalCollectionCount += 1;
incrementalCollectionTotalNanos += collectionTime;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,12 @@
package com.oracle.svm.core.genscavenge;

import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE;
import static com.oracle.svm.core.genscavenge.HeapVerifier.Occasion.After;
import static com.oracle.svm.core.genscavenge.HeapVerifier.Occasion.Before;
import static com.oracle.svm.core.genscavenge.HeapVerifier.Occasion.During;

import java.lang.ref.Reference;

import com.oracle.svm.core.deopt.DeoptimizationSlotPacking;
import com.oracle.svm.core.interpreter.InterpreterSupport;
import org.graalvm.nativeimage.CurrentIsolate;
import org.graalvm.nativeimage.IsolateThread;
import org.graalvm.nativeimage.Platform;
Expand All @@ -56,6 +57,7 @@
import com.oracle.svm.core.code.CodeInfoTable;
import com.oracle.svm.core.code.RuntimeCodeInfoAccess;
import com.oracle.svm.core.code.RuntimeCodeInfoMemory;
import com.oracle.svm.core.deopt.DeoptimizationSlotPacking;
import com.oracle.svm.core.deopt.DeoptimizedFrame;
import com.oracle.svm.core.deopt.Deoptimizer;
import com.oracle.svm.core.genscavenge.AlignedHeapChunk.AlignedHeader;
Expand All @@ -79,12 +81,13 @@
import com.oracle.svm.core.heap.RestrictHeapAccess;
import com.oracle.svm.core.heap.RuntimeCodeCacheCleaner;
import com.oracle.svm.core.heap.VMOperationInfos;
import com.oracle.svm.core.interpreter.InterpreterSupport;
import com.oracle.svm.core.jdk.RuntimeSupport;
import com.oracle.svm.core.jfr.JfrGCWhen;
import com.oracle.svm.core.jfr.JfrTicks;
import com.oracle.svm.core.jfr.events.AllocationRequiringGCEvent;
import com.oracle.svm.core.log.Log;
import com.oracle.svm.core.os.CommittedMemoryProvider;
import com.oracle.svm.core.os.ChunkBasedCommittedMemoryProvider;
import com.oracle.svm.core.snippets.ImplicitExceptions;
import com.oracle.svm.core.snippets.KnownIntrinsics;
import com.oracle.svm.core.stack.JavaFrame;
Expand Down Expand Up @@ -226,20 +229,33 @@ assert getCollectionEpoch().equal(data.getRequestingEpoch()) ||

timers.mutator.closeAt(data.getRequestingNanoTime());
timers.resetAllExceptMutator();
/* The type of collection will be determined later on. */
completeCollection = false;

JfrGCHeapSummaryEvent.emit(JfrGCWhen.BEFORE_GC);
GCCause cause = GCCause.fromId(data.getCauseId());
printGCBefore(cause);

ThreadLocalAllocation.disableAndFlushForAllThreads();
GenScavengeMemoryPoolMXBeans.singleton().notifyBeforeCollection();
HeapImpl.getAccounting().notifyBeforeCollection();
Timer collectionTimer = timers.collection.open();
try {
ThreadLocalAllocation.disableAndFlushForAllThreads();
GenScavengeMemoryPoolMXBeans.singleton().notifyBeforeCollection();
HeapImpl.getAccounting().notifyBeforeCollection();

verifyHeap(Before);

boolean outOfMemory = collectImpl(cause, data.getRequestingNanoTime(), data.getForceFullGC());
data.setOutOfMemory(outOfMemory);
boolean outOfMemory = collectImpl(cause, data.getRequestingNanoTime(), data.getForceFullGC());
data.setOutOfMemory(outOfMemory);

verifyHeap(After);
} finally {
collectionTimer.close();
}

accounting.updateCollectionCountAndTime(completeCollection, collectionTimer.getMeasuredNanos());
HeapImpl.getAccounting().notifyAfterCollection();
GenScavengeMemoryPoolMXBeans.singleton().notifyAfterCollection();
ChunkBasedCommittedMemoryProvider.get().afterGarbageCollection();

printGCAfter(cause);
JfrGCHeapSummaryEvent.emit(JfrGCWhen.AFTER_GC);
Expand All @@ -258,6 +274,7 @@ private boolean collectImpl(GCCause cause, long requestingNanoTime, boolean forc
// objects
ReferenceObjectProcessing.setSoftReferencesAreWeak(true);
try {
verifyHeap(During);
outOfMemory = doCollectImpl(cause, requestingNanoTime, true, true);
} finally {
ReferenceObjectProcessing.setSoftReferencesAreWeak(false);
Expand All @@ -272,7 +289,7 @@ private boolean collectImpl(GCCause cause, long requestingNanoTime, boolean forc
private boolean doCollectImpl(GCCause cause, long requestingNanoTime, boolean forceFullGC, boolean forceNoIncremental) {
checkSanityBeforeCollection();

CommittedMemoryProvider.get().beforeGarbageCollection();
ChunkBasedCommittedMemoryProvider.get().beforeGarbageCollection();

boolean incremental = !forceNoIncremental && !policy.shouldCollectCompletely(false);
boolean outOfMemory = false;
Expand All @@ -287,7 +304,8 @@ private boolean doCollectImpl(GCCause cause, long requestingNanoTime, boolean fo
}
if (!incremental || outOfMemory || forceFullGC || policy.shouldCollectCompletely(incremental)) {
if (incremental) { // uncommit unaligned chunks
CommittedMemoryProvider.get().afterGarbageCollection();
ChunkBasedCommittedMemoryProvider.get().uncommitUnusedMemory();
verifyHeap(During);
}
long startTicks = JfrGCEvents.startGCPhasePause();
try {
Expand All @@ -298,34 +316,26 @@ private boolean doCollectImpl(GCCause cause, long requestingNanoTime, boolean fo
}

HeapImpl.getChunkProvider().freeExcessAlignedChunks();
CommittedMemoryProvider.get().afterGarbageCollection();
ChunkBasedCommittedMemoryProvider.get().uncommitUnusedMemory();

checkSanityAfterCollection();
return outOfMemory;
}

private boolean doCollectOnce(GCCause cause, long requestingNanoTime, boolean complete, boolean followsIncremental) {
assert !followsIncremental || complete : "An incremental collection cannot be followed by another incremental collection";
assert !completeCollection || complete : "After a complete collection, no further incremental collections may happen";
completeCollection = complete;

accounting.beforeCollection(completeCollection);
accounting.beforeCollectOnce(completeCollection);
policy.onCollectionBegin(completeCollection, requestingNanoTime);

Timer collectionTimer = timers.collection.open();
try {
if (!followsIncremental) { // we would have verified the heap after the incremental GC
verifyBeforeGC();
}
doCollectCore(!complete);
verifyAfterGC();
if (complete) {
lastWholeHeapExaminedTimeMillis = System.currentTimeMillis();
}
} finally {
collectionTimer.close();
doCollectCore(!complete);
if (complete) {
lastWholeHeapExaminedTimeMillis = System.currentTimeMillis();
}

accounting.afterCollection(completeCollection, collectionTimer);
accounting.afterCollectOnce(completeCollection);
policy.onCollectionEnd(completeCollection, cause);

UnsignedWord usedBytes = getChunkBytes();
Expand All @@ -335,44 +345,40 @@ private boolean doCollectOnce(GCCause cause, long requestingNanoTime, boolean co
return usedBytes.aboveThan(policy.getMaximumHeapSize()); // out of memory?
}

private void verifyBeforeGC() {
if (SubstrateGCOptions.VerifyHeap.getValue() && SerialGCOptions.VerifyBeforeGC.getValue()) {
Timer verifyBeforeTimer = timers.verifyBefore.open();
try {
boolean success = true;
success &= HeapVerifier.singleton().verify(HeapVerifier.Occasion.BEFORE_COLLECTION);
success &= StackVerifier.verifyAllThreads();

if (!success) {
String kind = getGCKind();
Log.log().string("Heap verification failed before ").string(kind).string(" garbage collection.").newline();
VMError.shouldNotReachHere("Heap verification failed");
}
} finally {
verifyBeforeTimer.close();
private void verifyHeap(HeapVerifier.Occasion occasion) {
if (SubstrateGCOptions.VerifyHeap.getValue() && shouldVerify(occasion)) {
if (SubstrateGCOptions.VerboseGC.getValue()) {
printGCPrefixAndTime().string("Verifying ").string(occasion.name()).string(" GC ").newline();
}
}
}

private void verifyAfterGC() {
if (SubstrateGCOptions.VerifyHeap.getValue() && SerialGCOptions.VerifyAfterGC.getValue()) {
Timer verifyAfterTime = timers.verifyAfter.open();
try {
boolean success = true;
success &= HeapVerifier.singleton().verify(HeapVerifier.Occasion.AFTER_COLLECTION);
success &= StackVerifier.verifyAllThreads();

if (!success) {
String kind = getGCKind();
Log.log().string("Heap verification failed after ").string(kind).string(" garbage collection.").newline();
VMError.shouldNotReachHere("Heap verification failed");
}
} finally {
verifyAfterTime.close();
long start = System.nanoTime();

boolean success = true;
success &= HeapVerifier.singleton().verify(occasion);
success &= StackVerifier.verifyAllThreads();

if (!success) {
String kind = getGCKind();
Log.log().string("Heap verification ").string(occasion.name()).string(" GC failed (").string(kind).string(" garbage collection)").newline();
throw VMError.shouldNotReachHere("Heap verification failed");
}

if (SubstrateGCOptions.VerboseGC.getValue()) {
printGCPrefixAndTime().string("Verifying ").string(occasion.name()).string(" GC ")
.rational(TimeUtils.nanoSecondsSince(start), TimeUtils.nanosPerMilli, 3).string("ms").newline();
}
}
}

private static boolean shouldVerify(HeapVerifier.Occasion occasion) {
return switch (occasion) {
case Before -> SerialGCOptions.VerifyBeforeGC.getValue();
case During -> SerialGCOptions.VerifyDuringGC.getValue();
case After -> SerialGCOptions.VerifyAfterGC.getValue();
default -> throw VMError.shouldNotReachHere("Unexpected heap verification occasion.");
};
}

private String getGCKind() {
return isCompleteCollection() ? "complete" : "incremental";
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
import com.oracle.svm.core.jdk.UninterruptibleUtils;
import com.oracle.svm.core.jdk.UninterruptibleUtils.AtomicUnsigned;
import com.oracle.svm.core.log.Log;
import com.oracle.svm.core.os.CommittedMemoryProvider;
import com.oracle.svm.core.os.ChunkBasedCommittedMemoryProvider;
import com.oracle.svm.core.thread.VMOperation;
import com.oracle.svm.core.thread.VMThreads;
import com.oracle.svm.core.util.UnsignedUtils;
Expand Down Expand Up @@ -89,7 +89,7 @@ AlignedHeader produceAlignedChunk() {
AlignedHeader result = popUnusedAlignedChunk();
if (result.isNull()) {
/* Unused list was empty, need to allocate memory. */
result = (AlignedHeader) CommittedMemoryProvider.get().allocateAlignedChunk(chunkSize, HeapParameters.getAlignedHeapChunkAlignment());
result = (AlignedHeader) ChunkBasedCommittedMemoryProvider.get().allocateAlignedChunk(chunkSize, HeapParameters.getAlignedHeapChunkAlignment());
if (result.isNull()) {
throw OutOfMemoryUtil.reportOutOfMemoryError(ALIGNED_OUT_OF_MEMORY_ERROR);
}
Expand Down Expand Up @@ -240,7 +240,7 @@ private void freeUnusedAlignedChunksAtSafepoint(UnsignedWord count) {
UnalignedHeader produceUnalignedChunk(UnsignedWord objectSize) {
UnsignedWord chunkSize = UnalignedHeapChunk.getChunkSizeForObject(objectSize);

UnalignedHeader result = (UnalignedHeader) CommittedMemoryProvider.get().allocateUnalignedChunk(chunkSize);
UnalignedHeader result = (UnalignedHeader) ChunkBasedCommittedMemoryProvider.get().allocateUnalignedChunk(chunkSize);
if (result.isNull()) {
throw OutOfMemoryUtil.reportOutOfMemoryError(UNALIGNED_OUT_OF_MEMORY_ERROR);
}
Expand All @@ -249,14 +249,14 @@ UnalignedHeader produceUnalignedChunk(UnsignedWord objectSize) {
assert objectSize.belowOrEqual(HeapChunk.availableObjectMemory(result)) : "UnalignedHeapChunk insufficient for requested object";

/* Avoid zapping if unaligned chunks are pre-zeroed. */
if (!CommittedMemoryProvider.get().areUnalignedChunksZeroed() && HeapParameters.getZapProducedHeapChunks()) {
if (!ChunkBasedCommittedMemoryProvider.get().areUnalignedChunksZeroed() && HeapParameters.getZapProducedHeapChunks()) {
zap(result, HeapParameters.getProducedHeapChunkZapWord());
}
return result;
}

public static boolean areUnalignedChunksZeroed() {
return CommittedMemoryProvider.get().areUnalignedChunksZeroed();
return ChunkBasedCommittedMemoryProvider.get().areUnalignedChunksZeroed();
}

/**
Expand Down Expand Up @@ -306,12 +306,12 @@ static void freeUnalignedChunkList(UnalignedHeader first) {

@Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true)
private static void freeAlignedChunk(AlignedHeader chunk) {
CommittedMemoryProvider.get().freeAlignedChunk(chunk, HeapParameters.getAlignedHeapChunkSize(), HeapParameters.getAlignedHeapChunkAlignment());
ChunkBasedCommittedMemoryProvider.get().freeAlignedChunk(chunk, HeapParameters.getAlignedHeapChunkSize(), HeapParameters.getAlignedHeapChunkAlignment());
}

@Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true)
private static void freeUnalignedChunk(UnalignedHeader chunk) {
CommittedMemoryProvider.get().freeUnalignedChunk(chunk, unalignedChunkSize(chunk));
ChunkBasedCommittedMemoryProvider.get().freeUnalignedChunk(chunk, unalignedChunkSize(chunk));
}

@Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true)
Expand Down
Loading

0 comments on commit 89e5586

Please sign in to comment.