Skip to content

Commit

Permalink
Relocate calculating the rates at the end of First PGC After GMP
Browse files Browse the repository at this point in the history
 - recalculating PGCCompactionRate, HeapOccupancyTrend,
  ScannableBytesRatio after PGC and before estimating
  defragmentReclaimableRegions for calculating GMPIntermission 
  and desiredCompactWork accurate.
 - update calculatePGCCompactionRate tracepoint for provide more 
 detail information.

Signed-off-by: Lin Hu <linhu@ca.ibm.com>
  • Loading branch information
LinHu2016 committed Mar 1, 2018
1 parent 58537f3 commit e0b26f1
Show file tree
Hide file tree
Showing 3 changed files with 79 additions and 22 deletions.
14 changes: 8 additions & 6 deletions runtime/gc_vlhgc/IncrementalGenerationalGC.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@

/*******************************************************************************
* Copyright (c) 1991, 2017 IBM Corp. and others
* Copyright (c) 1991, 2018 IBM Corp. and others
*
* This program and the accompanying materials are made available under
* the terms of the Eclipse Public License 2.0 which accompanies this
Expand Down Expand Up @@ -1216,6 +1216,7 @@ MM_IncrementalGenerationalGC::partialGarbageCollect(MM_EnvironmentVLHGC *env, MM
MM_CompactGroupPersistentStats::updateStatsBeforeCollect(env, persistentStats);
if (_schedulingDelegate.isGlobalSweepRequired()) {
Assert_MM_true(NULL == env->_cycleState->_externalCycleState);

_reclaimDelegate.runGlobalSweepBeforePGC(env, allocDescription, env->_cycleState->_activeSubSpace, env->_cycleState->_gcCode);

/* TODO: lpnguyen make another statisticsDelegate or something that both schedulingDelegate and reclaimDelegate can see
Expand All @@ -1228,11 +1229,6 @@ MM_IncrementalGenerationalGC::partialGarbageCollect(MM_EnvironmentVLHGC *env, MM

double optimalEmptinessRegionThreshold = _reclaimDelegate.calculateOptimalEmptinessRegionThreshold(env, regionConsumptionRate, avgSurvivorRegions, avgCopyForwardRate, scanTimeCostPerGMP);
_schedulingDelegate.setAutomaticDefragmentEmptinessThreshold(optimalEmptinessRegionThreshold);

/* recalculate ratios due to sweep */
_schedulingDelegate.calculatePGCCompactionRate(env, _schedulingDelegate.getCurrentEdenSizeInRegions(env) * _regionManager->getRegionSize());
_schedulingDelegate.calculateHeapOccupancyTrend(env);
_schedulingDelegate.calculateScannableBytesRatio(env);
}

if (env->_cycleState->_shouldRunCopyForward) {
Expand Down Expand Up @@ -1373,6 +1369,10 @@ MM_IncrementalGenerationalGC::partialGarbageCollectUsingCopyForward(MM_Environme
_reclaimDelegate.performAtomicSweep(env, allocDescription, env->_cycleState->_activeSubSpace, env->_cycleState->_gcCode);
}

/* calculatePGCCompactionRate() has to be after PGC due to half of Eden regions has not been marked after final GMP (the sweep could not collect those regions) */
/* calculatePGCCompactionRate() has to be before estimateReclaimableRegions(), which need to use the result of calculatePGCCompactionRate() - region->_defragmentationTarget */
_schedulingDelegate.recalculateRatesOnFirstPGCAfterGMP(env);

/* Need to understand how to do the estimates here found within the following two calls */
UDATA defragmentReclaimableRegions = 0;
UDATA reclaimableRegions = 0;
Expand Down Expand Up @@ -1445,6 +1445,8 @@ MM_IncrementalGenerationalGC::partialGarbageCollectUsingMarkCompact(MM_Environme
Trc_MM_ReclaimDelegate_runReclaimComplete_Exit(env->getLanguageVMThread(), 0);
}

_schedulingDelegate.recalculateRatesOnFirstPGCAfterGMP(env);

UDATA defragmentReclaimableRegions = 0;
UDATA reclaimableRegions = 0;
_reclaimDelegate.estimateReclaimableRegions(env, 0.0 /* copy-forward loss */, &reclaimableRegions, &defragmentReclaimableRegions);
Expand Down
68 changes: 53 additions & 15 deletions runtime/gc_vlhgc/SchedulingDelegate.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 1991, 2017 IBM Corp. and others
* Copyright (c) 1991, 2018 IBM Corp. and others
*
* This program and the accompanying materials are made available under
* the terms of the Eclipse Public License 2.0 which accompanies this
Expand Down Expand Up @@ -284,10 +284,11 @@ MM_SchedulingDelegate::partialGarbageCollectCompleted(MM_EnvironmentVLHGC *env,
/* measure scan rate in PGC, only if we did M/S/C collect */
measureScanRate(env, measureScanRateHistoricWeightForPGC);
}

measureConsumptionForPartialGC(env, reclaimableRegions, defragmentReclaimableRegions);
calculateAutomaticGMPIntermission(env);
calculateEdenSize(env);

estimateMacroDefragmentationWork(env);

/* Calculate the time spent in the current Partial GC */
Expand Down Expand Up @@ -669,6 +670,18 @@ MM_SchedulingDelegate::calculateScannableBytesRatio(MM_EnvironmentVLHGC *env)
}
}

void
MM_SchedulingDelegate::recalculateRatesOnFirstPGCAfterGMP(MM_EnvironmentVLHGC *env)
{
if (isFirstPGCAfterGMP()) {
calculatePGCCompactionRate(env, getCurrentEdenSizeInRegions(env) * _regionManager->getRegionSize());
calculateHeapOccupancyTrend(env);
calculateScannableBytesRatio(env);

firstPGCAfterGMPCompleted();
}
}

double
MM_SchedulingDelegate::getAverageEmptinessOfCopyForwardedRegions()
{
Expand Down Expand Up @@ -708,62 +721,89 @@ MM_SchedulingDelegate::calculatePGCCompactionRate(MM_EnvironmentVLHGC *env, UDAT
const double defragmentEmptinessThreshold = getDefragmentEmptinessThreshold(env);
Assert_MM_true( (defragmentEmptinessThreshold >= 0.0) && (defragmentEmptinessThreshold <= 1.0) );
const UDATA regionSize = _regionManager->getRegionSize();
UDATA totalFreeMemory = 0;
UDATA totalLiveData = 0;

UDATA totalLiveDataInCollectableRegions = 0;
UDATA totalLiveDataInNonCollectibleRegions = 0;
UDATA fullyCompactedData = 0;

UDATA freeMemoryInCollectibleRegions = 0;
UDATA freeMemoryInNonCollectibleRegions = 0;
UDATA freeMemoryInFullyCompactedRegions = 0;
UDATA freeRegionMemory = 0;

UDATA collectibleRegions = 0;
UDATA nonCollectibleRegions = 0;
UDATA freeRegions = 0;
UDATA fullyCompactedRegions = 0;

UDATA estimatedFreeMemory = 0;
UDATA defragmentedMemory = 0;

GC_HeapRegionIteratorVLHGC regionIterator(_regionManager, MM_HeapRegionDescriptor::MANAGED);
MM_HeapRegionDescriptorVLHGC *region = NULL;

while (NULL != (region = regionIterator.nextRegion())) {
region->_defragmentationTarget = false;
MM_MemoryPoolBumpPointer *memoryPool = (MM_MemoryPoolBumpPointer *)region->getMemoryPool();
if (region->containsObjects()) {
Assert_MM_true(region->_sweepData._alreadySwept);
UDATA freeMemory = memoryPool->getFreeMemoryAndDarkMatterBytes();
if (!region->getRememberedSetCardList()->isAccurate()) {
/* Overflowed regions or those that RSCL is being rebuilt will not be be compacted */
nonCollectibleRegions += 1;
freeMemoryInNonCollectibleRegions += freeMemory;
totalLiveDataInNonCollectibleRegions += (regionSize - freeMemory);
} else {
MM_MemoryPoolBumpPointer *memoryPool = (MM_MemoryPoolBumpPointer *)region->getMemoryPool();
UDATA freeMemory = memoryPool->getFreeMemoryAndDarkMatterBytes();
double emptiness = (double)freeMemory / (double)regionSize;
Assert_MM_true( (emptiness >= 0.0) && (emptiness <= 1.0) );

/* Only consider regions which are likely to become more dense if we copy-and-forward them */
if (emptiness > defragmentEmptinessThreshold) {
collectibleRegions += 1;
freeMemoryInCollectibleRegions += freeMemory;
/* see ReclaimDelegate::deriveCompactScore() for an explanation of potentialWastedWork */
UDATA compactGroup = MM_CompactGroupManager::getCompactGroupNumber(env, region);
double weightedSurvivalRate = MM_GCExtensions::getExtensions(env)->compactGroupPersistentStats[compactGroup]._weightedSurvivalRate;
double potentialWastedWork = (1.0 - weightedSurvivalRate) * (1.0 - emptiness);

/* the probability that we'll recover the free memory is determined by the potential gainful work, so use that determine how much memory we're likely to actually compact */
defragmentedMemory += (UDATA)((double)freeMemory * (1.0 - potentialWastedWork));
totalLiveData += (UDATA)((double)(regionSize - freeMemory) * (1.0 - potentialWastedWork));
totalLiveDataInCollectableRegions += (UDATA)((double)(regionSize - freeMemory) * (1.0 - potentialWastedWork));
region->_defragmentationTarget = true;

} else {
/* if method calculatePGCCompactionRate() is called right after the sweep before PGC(the first PGC after GMP), half of Eden regions were allocated after the final GMP, those Eden regions didn't have been marked, they would be showed as fullyCompacted regions */
fullyCompactedRegions += 1;
freeMemoryInFullyCompactedRegions += freeMemory;
fullyCompactedData += (regionSize - freeMemory);
}
}
} else if (region->isFreeOrIdle()) {
freeRegions += 1;
freeRegionMemory += regionSize;
}
}

/* Adjust totalFreeMemory - we are only interested in area that shortfall can be fed from.
/* Adjust estimatedFreeMemory - we are only interested in area that shortfall can be fed from.
* Thus exclude Eden and Survivor size. Survivor space needs to accommodate for Nursery set, Dynamic collection set and Compaction set
*/
UDATA surivivorSize = (UDATA)(regionSize * (_averageSurvivorSetRegionCount + _extensions->tarokKickoffHeadroomRegionCount));
UDATA reservedFreeMemory = edenSizeInBytes + surivivorSize;
totalFreeMemory = MM_Math::saturatingSubtract(defragmentedMemory + freeRegionMemory, reservedFreeMemory);
estimatedFreeMemory = MM_Math::saturatingSubtract(defragmentedMemory + freeRegionMemory, reservedFreeMemory);
double bytesDiscardedPerByteCopied = (_averageCopyForwardBytesCopied > 0.0) ? (_averageCopyForwardBytesDiscarded / _averageCopyForwardBytesCopied) : 0.0;
double estimatedFreeMemoryDiscarded = (double)totalLiveData * bytesDiscardedPerByteCopied;
double recoverableFreeMemory = (double)totalFreeMemory - estimatedFreeMemoryDiscarded;
double estimatedFreeMemoryDiscarded = (double)totalLiveDataInCollectableRegions * bytesDiscardedPerByteCopied;
double recoverableFreeMemory = (double)estimatedFreeMemory - estimatedFreeMemoryDiscarded;

if (0.0 < recoverableFreeMemory) {
_bytesCompactedToFreeBytesRatio = ((double)totalLiveData)/recoverableFreeMemory;
_bytesCompactedToFreeBytesRatio = ((double)totalLiveDataInCollectableRegions)/recoverableFreeMemory;
} else {
_bytesCompactedToFreeBytesRatio = (double)(_regionManager->getTableRegionCount() + 1);
}
Trc_MM_SchedulingDelegate_calculatePGCCompactionRate_liveToFreeRatio(env->getLanguageVMThread(), _bytesCompactedToFreeBytesRatio, totalLiveData, totalFreeMemory, fullyCompactedData, reservedFreeMemory, defragmentEmptinessThreshold, surivivorSize, defragmentedMemory, freeRegionMemory, edenSizeInBytes);

Trc_MM_SchedulingDelegate_calculatePGCCompactionRate_liveToFreeRatio1(env->getLanguageVMThread(), (totalLiveDataInCollectableRegions + totalLiveDataInNonCollectibleRegions + fullyCompactedData), totalLiveDataInCollectableRegions, totalLiveDataInNonCollectibleRegions, fullyCompactedData);
Trc_MM_SchedulingDelegate_calculatePGCCompactionRate_liveToFreeRatio2(env->getLanguageVMThread(), (freeMemoryInCollectibleRegions + freeMemoryInNonCollectibleRegions + freeRegionMemory), freeMemoryInCollectibleRegions, freeMemoryInNonCollectibleRegions, freeRegionMemory, freeMemoryInFullyCompactedRegions);
Trc_MM_SchedulingDelegate_calculatePGCCompactionRate_liveToFreeRatio3(env->getLanguageVMThread(), (collectibleRegions + nonCollectibleRegions + fullyCompactedRegions + freeRegions), collectibleRegions, nonCollectibleRegions, fullyCompactedRegions, freeRegions);
Trc_MM_SchedulingDelegate_calculatePGCCompactionRate_liveToFreeRatio4(env->getLanguageVMThread(), _bytesCompactedToFreeBytesRatio, edenSizeInBytes, surivivorSize, reservedFreeMemory, defragmentEmptinessThreshold, defragmentedMemory, estimatedFreeMemory);
}

UDATA
Expand All @@ -778,7 +818,6 @@ MM_SchedulingDelegate::getDesiredCompactWork()
return desiredCompactWork;
}

/*
bool
MM_SchedulingDelegate::isFirstPGCAfterGMP()
{
Expand All @@ -790,7 +829,6 @@ MM_SchedulingDelegate::firstPGCAfterGMPCompleted()
{
_didGMPCompleteSinceLastReclaim = false;
}
*/

void
MM_SchedulingDelegate::copyForwardCompleted(MM_EnvironmentVLHGC *env)
Expand Down
19 changes: 18 additions & 1 deletion runtime/gc_vlhgc/SchedulingDelegate.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 1991, 2017 IBM Corp. and others
* Copyright (c) 1991, 2018 IBM Corp. and others
*
* This program and the accompanying materials are made available under
* the terms of the Eclipse Public License 2.0 which accompanies this
Expand Down Expand Up @@ -331,13 +331,30 @@ class MM_SchedulingDelegate : public MM_BaseNonVirtual
*/
void calculateHeapOccupancyTrend(MM_EnvironmentVLHGC *env);

/**
* recalculate PGCCompactionRate, HeapOccupancyTrend, ScannableBytesRatio at the end of First PGC After GMP
* it should be called before estimating defragmentReclaimableRegions in order to calculate GMPIntermission more accurate.
* TODO: might need to recalculate desiredCompactWork for sliding Compact of PGC (MacroDefragment part, right now it is calculated at the end of TaxationEntryPoint,
* but we need to decide sliding compaction before Copyforward PGC).
*/
void recalculateRatesOnFirstPGCAfterGMP(MM_EnvironmentVLHGC *env);

/**
* Calculate desired amount of work to be compacted this PGC cycle
* @param env[in] the master GC thread
* @return desired bytes to be compacted
*/
UDATA getDesiredCompactWork();

/**
* @return true if it is first PGC after GMP completed (so we can calculate compact-bytes/free-bytes ratio, etc.)
*/
bool isFirstPGCAfterGMP();
/**
* clear the flag that indicate this was the first PGC after GMP completed
*/
void firstPGCAfterGMPCompleted();

/**
* return whether the following PGC is required to do global sweep (typically, first PGC after GMP completed)
*/
Expand Down

0 comments on commit e0b26f1

Please sign in to comment.