diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp index 2361a50e76dc5..e963bcc35bb47 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -141,7 +141,7 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll // If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need // to decrease the capacity of the fragmented memory by the scaled loss. - size_t live_data_for_evacuation = r->get_live_data_bytes(); + const size_t live_data_for_evacuation = r->get_live_data_bytes(); size_t lost_available = r->free(); if ((lost_available > 0) && (excess_fragmented_available > 0)) { @@ -169,7 +169,9 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll // We were not able to account for the lost free memory within fragmented memory, so we need to take this // allocation out of unfragmented memory. Unfragmented memory does not need to account for loss of free. if (live_data_for_evacuation > unfragmented_available) { - // There is not room to evacuate this region or any that come after it in within the candidates array. + // There is no room to evacuate this region or any that come after it in within the candidates array. + log_debug(gc, cset)("Not enough unfragmented memory (%zu) to hold evacuees (%zu) from region: (%zu)", + unfragmented_available, live_data_for_evacuation, r->index()); break; } else { unfragmented_available -= live_data_for_evacuation; @@ -187,7 +189,9 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll evacuation_need = 0; } if (evacuation_need > unfragmented_available) { - // There is not room to evacuate this region or any that come after it in within the candidates array. + // There is no room to evacuate this region or any that come after it in within the candidates array. + log_debug(gc, cset)("Not enough unfragmented memory (%zu) to hold evacuees (%zu) from region: (%zu)", + unfragmented_available, live_data_for_evacuation, r->index()); break; } else { unfragmented_available -= evacuation_need; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.cpp index 86ff6f22c7211..c1a0521c58135 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.cpp @@ -181,6 +181,19 @@ void ShenandoahAgeCensus::update_census(size_t age0_pop) { } +size_t ShenandoahAgeCensus::get_tenurable_bytes(const uint tenuring_threshold) const { + assert(_epoch < MAX_SNAPSHOTS, "Out of bounds"); + size_t total = 0; + const AgeTable* pv = _global_age_tables[_epoch]; + for (uint i = 0; i < MAX_COHORTS; i++) { + if (i >= tenuring_threshold) { + total += pv->sizes[i]; + } + } + return total * HeapWordSize; +} + + // Reset the epoch for the global age tables, // clearing all history. void ShenandoahAgeCensus::reset_global() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.hpp b/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.hpp index 39ea4ee9002ac..53d9ef5df9a9e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.hpp @@ -211,6 +211,12 @@ class ShenandoahAgeCensus: public CHeapObj { // allocated when the concurrent marking was in progress. void update_census(size_t age0_pop); + // Return the total size of the population above the given threshold for the current epoch + size_t get_tenurable_bytes(uint tenuring_threshold) const; + + // As above, but use the current tenuring threshold by default + size_t get_tenurable_bytes() const { return get_tenurable_bytes(tenuring_threshold()); } + // Reset the epoch, clearing accumulated census history // Note: this isn't currently used, but reserved for planned // future usage. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp index 745d45ace1e25..e58a7f4079608 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp @@ -225,9 +225,9 @@ void ShenandoahCollectionSet::summarize(size_t total_garbage, size_t immediate_g count()); if (garbage() > 0) { - const size_t young_evac_bytes = get_young_bytes_reserved_for_evacuation(); - const size_t promote_evac_bytes = get_young_bytes_to_be_promoted(); - const size_t old_evac_bytes = get_old_bytes_reserved_for_evacuation(); + const size_t young_evac_bytes = get_live_bytes_in_untenurable_regions(); + const size_t promote_evac_bytes = get_live_bytes_in_tenurable_regions(); + const size_t old_evac_bytes = get_live_bytes_in_old_regions(); const size_t total_evac_bytes = young_evac_bytes + promote_evac_bytes + old_evac_bytes; ls.print_cr("Evacuation Targets: " "YOUNG: " PROPERFMT ", " "PROMOTE: " PROPERFMT ", " "OLD: " PROPERFMT ", " "TOTAL: " PROPERFMT, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp index d4a590a3d89a6..415573195f537 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp @@ -109,14 +109,14 @@ class ShenandoahCollectionSet : public CHeapObj { // Prints a summary of the collection set when gc+ergo=info void summarize(size_t total_garbage, size_t immediate_garbage, size_t immediate_regions) const; - // Returns the amount of live bytes in young regions in the collection set. It is not known how many of these bytes will be promoted. - inline size_t get_young_bytes_reserved_for_evacuation() const; + // Returns the amount of live bytes in young regions with an age below the tenuring threshold. + inline size_t get_live_bytes_in_untenurable_regions() const; // Returns the amount of live bytes in old regions in the collection set. - inline size_t get_old_bytes_reserved_for_evacuation() const; + inline size_t get_live_bytes_in_old_regions() const; // Returns the amount of live bytes in young regions with an age above the tenuring threshold. - inline size_t get_young_bytes_to_be_promoted() const; + inline size_t get_live_bytes_in_tenurable_regions() const; // Returns the amount of free bytes in young regions in the collection set. size_t get_young_available_bytes_collected() const { return _young_available_bytes_collected; } @@ -125,7 +125,7 @@ class ShenandoahCollectionSet : public CHeapObj { inline size_t get_old_garbage() const; bool is_preselected(size_t region_idx) { - assert(_preselected_regions != nullptr, "Missing etsablish after abandon"); + assert(_preselected_regions != nullptr, "Missing establish after abandon"); return _preselected_regions[region_idx]; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp index 4adcec4fbb552..3ff5f2f81d70b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp @@ -54,15 +54,15 @@ bool ShenandoahCollectionSet::is_in_loc(void* p) const { return _biased_cset_map[index] == 1; } -size_t ShenandoahCollectionSet::get_old_bytes_reserved_for_evacuation() const { +size_t ShenandoahCollectionSet::get_live_bytes_in_old_regions() const { return _old_bytes_to_evacuate; } -size_t ShenandoahCollectionSet::get_young_bytes_reserved_for_evacuation() const { +size_t ShenandoahCollectionSet::get_live_bytes_in_untenurable_regions() const { return _young_bytes_to_evacuate - _young_bytes_to_promote; } -size_t ShenandoahCollectionSet::get_young_bytes_to_be_promoted() const { +size_t ShenandoahCollectionSet::get_live_bytes_in_tenurable_regions() const { return _young_bytes_to_promote; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 0c55613efcc23..f5f8ee1c584b7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -24,6 +24,7 @@ */ #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" +#include "gc/shenandoah/shenandoahAgeCensus.hpp" #include "gc/shenandoah/shenandoahCollectionSetPreselector.hpp" #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahFreeSet.hpp" @@ -251,6 +252,28 @@ void ShenandoahGeneration::parallel_heap_region_iterate_free(ShenandoahHeapRegio ShenandoahHeap::heap()->parallel_heap_region_iterate(cl); } +// Here's the algebra. +// Let SOEP = ShenandoahOldEvacRatioPercent, +// OE = old evac, +// YE = young evac, and +// TE = total evac = OE + YE +// By definition: +// SOEP/100 = OE/TE +// = OE/(OE+YE) +// => SOEP/(100-SOEP) = OE/((OE+YE)-OE) // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c) +// = OE/YE +// => OE = YE*SOEP/(100-SOEP) +size_t get_maximum_old_evacuation_reserve(size_t maximum_young_evacuation_reserve, size_t old_available) { + // We have to be careful in the event that SOEP is set to 100 by the user. + assert(ShenandoahOldEvacRatioPercent <= 100, "Error"); + if (ShenandoahOldEvacRatioPercent == 100) { + return old_available; + } + + const size_t ratio_of_old_in_collection_set = (maximum_young_evacuation_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent); + return MIN2(ratio_of_old_in_collection_set, old_available); +} + void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap) { shenandoah_assert_generational(); @@ -275,26 +298,12 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap // maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted), // clamped by the old generation space available. - // - // Here's the algebra. - // Let SOEP = ShenandoahOldEvacRatioPercent, - // OE = old evac, - // YE = young evac, and - // TE = total evac = OE + YE - // By definition: - // SOEP/100 = OE/TE - // = OE/(OE+YE) - // => SOEP/(100-SOEP) = OE/((OE+YE)-OE) // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c) - // = OE/YE - // => OE = YE*SOEP/(100-SOEP) - - // We have to be careful in the event that SOEP is set to 100 by the user. - assert(ShenandoahOldEvacRatioPercent <= 100, "Error"); const size_t old_available = old_generation->available(); - const size_t maximum_old_evacuation_reserve = (ShenandoahOldEvacRatioPercent == 100) ? - old_available : MIN2((maximum_young_evacuation_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), - old_available); + const size_t maximum_old_evacuation_reserve = get_maximum_old_evacuation_reserve(maximum_young_evacuation_reserve, old_available); + + log_debug(gc, cset)("max_young_evac_reserver: %zu, max_old_evac_reserve: %zu, old_available: %zu", + maximum_young_evacuation_reserve, maximum_old_evacuation_reserve, old_available); // Second priority is to reclaim garbage out of old-gen if there are old-gen collection candidates. Third priority // is to promote as much as we have room to promote. However, if old-gen memory is in short supply, this means young @@ -350,6 +359,7 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap // and identify regions that will promote in place. These use the tenuring threshold. const size_t consumed_by_advance_promotion = select_aged_regions(old_promo_reserve); assert(consumed_by_advance_promotion <= maximum_old_evacuation_reserve, "Cannot promote more than available old-gen memory"); + assert(consumed_by_advance_promotion <= old_promo_reserve, "Cannot promote more than was reserved"); // Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion. Do not transfer this // to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood @@ -367,10 +377,10 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap // void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, ShenandoahCollectionSet* const collection_set) { shenandoah_assert_generational(); - // We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may - // be able to increase regions_available_to_loan + // We may find that old_evacuation_reserve is not fully consumed, in which case we may be able to transfer old + // unaffiliated regions back to young. - // The role of adjust_evacuation_budgets() is to compute the correct value of regions_available_to_loan and to make + // The role of adjust_evacuation_budgets() is to compute the correct value of regions to transfer to young and to make // effective use of this memory, including the remnant memory within these regions that may result from rounding loan to // integral number of regions. Excess memory that is available to be loaned is applied to an allocation supplement, // which allows mutators to allocate memory beyond the current capacity of young-gen on the promise that the loan @@ -382,11 +392,11 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, // available that results from a decrease in memory consumed by old evacuation is not necessarily available to be loaned // to young-gen. - size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); ShenandoahOldGeneration* const old_generation = heap->old_generation(); ShenandoahYoungGeneration* const young_generation = heap->young_generation(); - size_t old_evacuated = collection_set->get_old_bytes_reserved_for_evacuation(); + const size_t old_evacuated = collection_set->get_live_bytes_in_old_regions(); size_t old_evacuated_committed = (size_t) (ShenandoahOldEvacWaste * double(old_evacuated)); size_t old_evacuation_reserve = old_generation->get_evacuation_reserve(); @@ -399,50 +409,38 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, // Leave old_evac_reserve as previously configured } else if (old_evacuated_committed < old_evacuation_reserve) { // This happens if the old-gen collection consumes less than full budget. + log_debug(gc, cset)("Shrinking old evac reserve to match old_evac_commited: %zu", old_evacuated_committed); old_evacuation_reserve = old_evacuated_committed; old_generation->set_evacuation_reserve(old_evacuation_reserve); } - size_t young_advance_promoted = collection_set->get_young_bytes_to_be_promoted(); - size_t young_advance_promoted_reserve_used = (size_t) (ShenandoahPromoEvacWaste * double(young_advance_promoted)); + const size_t young_evacuated = collection_set->get_live_bytes_in_untenurable_regions(); + const size_t young_evacuated_commited = (size_t) (ShenandoahEvacWaste * double(young_evacuated)); + assert(young_evacuated_commited <= young_generation->available_with_reserve(), "Cannot evacuate more than is available in young"); + young_generation->set_evacuation_reserve(young_evacuated_commited); - size_t young_evacuated = collection_set->get_young_bytes_reserved_for_evacuation(); - size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * double(young_evacuated)); - - size_t total_young_available = young_generation->available_with_reserve(); - assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate more than is available in young"); - young_generation->set_evacuation_reserve(young_evacuated_reserve_used); - - size_t old_available = old_generation->available(); // Now that we've established the collection set, we know how much memory is really required by old-gen for evacuation // and promotion reserves. Try shrinking OLD now in case that gives us a bit more runway for mutator allocations during // evac and update phases. - size_t old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used; - - if (old_available < old_consumed) { - // This can happen due to round-off errors when adding the results of truncated integer arithmetic. - // We've already truncated old_evacuated_committed. Truncate young_advance_promoted_reserve_used here. - assert(young_advance_promoted_reserve_used <= (33 * (old_available - old_evacuated_committed)) / 32, - "Round-off errors should be less than 3.125%%, committed: %zu, reserved: %zu", - young_advance_promoted_reserve_used, old_available - old_evacuated_committed); - young_advance_promoted_reserve_used = old_available - old_evacuated_committed; - old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used; - } + const size_t old_available = old_generation->available(); + const size_t promoted_reserve = old_generation->get_promoted_reserve(); + const size_t old_consumed = old_evacuated_committed + promoted_reserve; - assert(old_available >= old_consumed, "Cannot consume (%zu) more than is available (%zu)", - old_consumed, old_available); + assert(old_available >= old_consumed, "Cannot consume (%zu) more than is available (%zu)", old_consumed, old_available); size_t excess_old = old_available - old_consumed; - size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions(); - size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes; + const size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions(); + const size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes; assert(old_available >= unaffiliated_old, "Unaffiliated old is a subset of old available"); + log_debug(gc, cset)("excess_old is: %zu, unaffiliated_old_regions is: %zu", excess_old, unaffiliated_old_regions); // Make sure old_evac_committed is unaffiliated - if (old_evacuated_committed > 0) { - if (unaffiliated_old > old_evacuated_committed) { - size_t giveaway = unaffiliated_old - old_evacuated_committed; - size_t giveaway_regions = giveaway / region_size_bytes; // round down + if (old_consumed > 0) { + if (unaffiliated_old > old_consumed) { + const size_t giveaway = unaffiliated_old - old_consumed; + const size_t giveaway_regions = giveaway / region_size_bytes; // round down if (giveaway_regions > 0) { excess_old = MIN2(excess_old, giveaway_regions * region_size_bytes); + log_debug(gc, cset)("Changed excess_old to: %zu", excess_old); } else { excess_old = 0; } @@ -461,24 +459,22 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, } } else if (unaffiliated_old_regions > 0) { // excess_old < unaffiliated old: we can give back MIN(excess_old/region_size_bytes, unaffiliated_old_regions) - size_t excess_regions = excess_old / region_size_bytes; + const size_t excess_regions = excess_old / region_size_bytes; regions_to_xfer = MIN2(excess_regions, unaffiliated_old_regions); } if (regions_to_xfer > 0) { - bool result = ShenandoahGenerationalHeap::cast(heap)->generation_sizer()->transfer_to_young(regions_to_xfer); assert(excess_old >= regions_to_xfer * region_size_bytes, "Cannot transfer (%zu, %zu) more than excess old (%zu)", regions_to_xfer, region_size_bytes, excess_old); + log_debug(gc, ergo)("Giving away %zu old regions (old_available, %zu, old_evac: %zu, unaffiliated_old: %zu)", + regions_to_xfer, old_available, old_evacuated_committed, unaffiliated_old); + bool result = ShenandoahGenerationalHeap::cast(heap)->generation_sizer()->transfer_to_young(regions_to_xfer); excess_old -= regions_to_xfer * region_size_bytes; log_debug(gc, ergo)("%s transferred %zu excess regions to young before start of evacuation", result? "Successfully": "Unsuccessfully", regions_to_xfer); } - // Add in the excess_old memory to hold unanticipated promotions, if any. If there are more unanticipated - // promotions than fit in reserved memory, they will be deferred until a future GC pass. - size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old; - old_generation->set_promoted_reserve(total_promotion_reserve); old_generation->reset_promoted_expended(); } @@ -524,7 +520,7 @@ inline void assert_no_in_place_promotions() { // that this allows us to more accurately budget memory to hold the results of evacuation. Memory for evacuation // of aged regions must be reserved in the old generation. Memory for evacuation of all other regions must be // reserved in the young generation. -size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { +size_t ShenandoahGeneration::select_aged_regions(const size_t old_promotion_reserve) { // There should be no regions configured for subsequent in-place-promotions carried over from the previous cycle. assert_no_in_place_promotions(); @@ -537,7 +533,6 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { const size_t pip_used_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahGenerationalMinPIPUsage) / 100; - size_t old_consumed = 0; size_t promo_potential = 0; size_t candidates = 0; @@ -560,7 +555,7 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { } if (heap->is_tenurable(r)) { if ((r->garbage() < old_garbage_threshold) && (r->used() > pip_used_threshold)) { - // We prefer to promote this region in place because is has a small amount of garbage and a large usage. + // We prefer to promote this region in place because it has a small amount of garbage and a large usage. HeapWord* tams = ctx->top_at_mark_start(r); HeapWord* original_top = r->top(); if (!heap->is_concurrent_old_mark_in_progress() && tams == original_top) { @@ -620,17 +615,21 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { // Note that we keep going even if one region is excluded from selection. // Subsequent regions may be selected if they have smaller live data. } + + log_info(gc, ergo)("Promotion potential of aged regions with sufficient garbage: %zu", promo_potential); + // Sort in increasing order according to live data bytes. Note that candidates represents the number of regions // that qualify to be promoted by evacuation. + size_t old_consumed = 0; if (candidates > 0) { size_t selected_regions = 0; size_t selected_live = 0; QuickSort::sort(sorted_regions, candidates, compare_by_aged_live); for (size_t i = 0; i < candidates; i++) { ShenandoahHeapRegion* const region = sorted_regions[i]._region; - size_t region_live_data = sorted_regions[i]._live_data; - size_t promotion_need = (size_t) (region_live_data * ShenandoahPromoEvacWaste); - if (old_consumed + promotion_need <= old_available) { + const size_t region_live_data = sorted_regions[i]._live_data; + const size_t promotion_need = (size_t) (region_live_data * ShenandoahPromoEvacWaste); + if (old_consumed + promotion_need <= old_promotion_reserve) { old_consumed += promotion_need; candidate_regions_for_promotion_by_copy[region->index()] = true; selected_regions++; @@ -644,14 +643,26 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { // We keep going even if one region is excluded from selection because we need to accumulate all eligible // regions that are not preselected into promo_potential } - log_debug(gc)("Preselected %zu regions containing %zu live bytes," + log_info(gc, ergo)("Preselected %zu regions containing %zu live bytes," " consuming: %zu of budgeted: %zu", - selected_regions, selected_live, old_consumed, old_available); + selected_regions, selected_live, old_consumed, old_promotion_reserve); } + const uint tenuring_threshold = heap->age_census()->tenuring_threshold(); + const size_t tenurable_next_cycle = heap->age_census()->get_tenurable_bytes(tenuring_threshold - 1); + const size_t tenurable_this_cycle = heap->age_census()->get_tenurable_bytes(tenuring_threshold); + + log_info(gc, ergo)("Promotion potential: %zu, tenurable next cycle: %zu, tenurable this cycle: %zu, selected for promotion: %zu", + promo_potential, tenurable_next_cycle, tenurable_this_cycle, old_consumed); + heap->old_generation()->set_pad_for_promote_in_place(promote_in_place_pad); - heap->old_generation()->set_promotion_potential(promo_potential); - return old_consumed; + heap->old_generation()->set_promotion_potential(tenurable_next_cycle); + + assert(old_consumed <= old_promotion_reserve, "Consumed more (%zu) than we reserved (%zu)", old_consumed, old_promotion_reserve); + + // old_consumed may exceed tenurable_this_cycle because it has been scaled by ShenandoahPromoEvacWaste. + old_consumed = MAX2(old_consumed, tenurable_this_cycle); + return MIN2(old_consumed, old_promotion_reserve); } void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index e6597b3c1e49c..4b7534fa45104 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -97,7 +97,7 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { // regions, which are marked in the preselected_regions() indicator // array of the heap's collection set, which should be initialized // to false. - size_t select_aged_regions(size_t old_available); + size_t select_aged_regions(size_t old_promotion_reserve); size_t available(size_t capacity) const; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 34f217ada250b..f679b3df0b536 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -110,7 +110,6 @@ void ShenandoahGenerationalHeap::initialize_heuristics() { _generation_sizer.heap_size_changed(max_capacity()); size_t initial_capacity_young = _generation_sizer.max_young_size(); size_t max_capacity_young = _generation_sizer.max_young_size(); - size_t initial_capacity_old = max_capacity() - max_capacity_young; size_t max_capacity_old = max_capacity() - initial_capacity_young; _young_generation = new ShenandoahYoungGeneration(max_workers(), max_capacity_young); @@ -267,6 +266,7 @@ oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, Shena // the requested object does not fit within the current plab but the plab still has an "abundance" of memory, // where abundance is defined as >= ShenGenHeap::plab_min_size(). In the former case, we try shrinking the // desired PLAB size to the minimum and retry PLAB allocation to avoid cascading of shared memory allocations. + // Shrinking the desired PLAB size may allow us to eke out a small PLAB while staying beneath evacuation reserve. if (plab->words_remaining() < plab_min_size()) { ShenandoahThreadLocalData::set_plab_size(thread, plab_min_size()); copy = allocate_from_plab(thread, size, is_promotion); @@ -290,15 +290,21 @@ oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, Shena if (copy == nullptr) { // If we failed to allocate in LAB, we'll try a shared allocation. - if (!is_promotion || !has_plab || (size > PLAB::min_size())) { + if (!is_promotion || !has_plab || (size > PLAB::max_size())) { ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen, is_promotion); copy = allocate_memory(req); alloc_from_lab = false; + if (is_promotion && copy != nullptr) { + log_debug(gc, plab)("Made a shared promotion of size: %zu, actual PLAB size for thread: %zu, min PLAB: %zu, max PLAB: %zu", + size * HeapWordSize, ShenandoahThreadLocalData::get_plab_actual_size(thread) * HeapWordSize, + PLAB::min_size() * HeapWordSize, plab_max_size() * HeapWordSize); + } } // else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate. - // We choose not to promote objects smaller than PLAB::min_size() by way of shared allocations, as this is too - // costly. Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future - // evacuation pass. This condition is denoted by: is_promotion && has_plab && (size <= PLAB::min_size()) + // We choose not to promote objects smaller than PLAB::max_size() by way of shared allocations, as this is too + // costly (such objects should use the PLAB). Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) + // and will promote in a future evacuation pass. This condition is denoted by: is_promotion && has_plab && (size + // <= PLAB::max_size()) } #ifdef ASSERT } @@ -408,17 +414,21 @@ inline HeapWord* ShenandoahGenerationalHeap::allocate_from_plab(Thread* thread, assert(UseTLAB, "TLABs should be enabled"); PLAB* plab = ShenandoahThreadLocalData::plab(thread); - HeapWord* obj; if (plab == nullptr) { assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), "Performance: thread should have PLAB: %s", thread->name()); // No PLABs in this thread, fallback to shared allocation + log_debug(gc, plab)("Thread has no PLAB"); return nullptr; - } else if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) { + } + + if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) { + log_develop_trace(gc, plab)("Thread is not allowed to use PLAB for promotions"); return nullptr; } + // if plab->word_size() <= 0, thread's plab not yet initialized for this pass, so allow_plab_promotions() is not trustworthy - obj = plab->allocate(size); + HeapWord* obj = plab->allocate(size); if ((obj == nullptr) && (plab->words_remaining() < plab_min_size())) { // allocate_from_plab_slow will establish allow_plab_promotions(thread) for future invocations obj = allocate_from_plab_slow(thread, size, is_promotion); @@ -436,9 +446,8 @@ inline HeapWord* ShenandoahGenerationalHeap::allocate_from_plab(Thread* thread, // Establish a new PLAB and allocate size HeapWords within it. HeapWord* ShenandoahGenerationalHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) { - // New object should fit the PLAB size - assert(mode()->is_generational(), "PLABs only relevant to generational GC"); + const size_t plab_min_size = this->plab_min_size(); // PLABs are aligned to card boundaries to avoid synchronization with concurrent // allocations in other PLABs. @@ -451,73 +460,84 @@ HeapWord* ShenandoahGenerationalHeap::allocate_from_plab_slow(Thread* thread, si } // Expand aggressively, doubling at each refill in this epoch, ceiling at plab_max_size() - size_t future_size = MIN2(cur_size * 2, plab_max_size()); + const size_t future_size = MIN2(cur_size * 2, plab_max_size()); // Doubling, starting at a card-multiple, should give us a card-multiple. (Ceiling and floor // are card multiples.) assert(is_aligned(future_size, CardTable::card_size_in_words()), "Card multiple by construction, future_size: %zu" - ", card_size: %zu, cur_size: %zu, max: %zu", - future_size, (size_t) CardTable::card_size_in_words(), cur_size, plab_max_size()); + ", card_size: %u, cur_size: %zu, max: %zu", + future_size, CardTable::card_size_in_words(), cur_size, plab_max_size()); // Record new heuristic value even if we take any shortcut. This captures // the case when moderately-sized objects always take a shortcut. At some point, // heuristics should catch up with them. Note that the requested cur_size may // not be honored, but we remember that this is the preferred size. - log_debug(gc, free)("Set new PLAB size: %zu", future_size); + log_debug(gc, plab)("Set next PLAB refill size: %zu bytes", future_size * HeapWordSize); ShenandoahThreadLocalData::set_plab_size(thread, future_size); + if (cur_size < size) { // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation. // This avoids retiring perfectly good PLABs in order to represent a single large object allocation. - log_debug(gc, free)("Current PLAB size (%zu) is too small for %zu", cur_size, size); + log_debug(gc, plab)("Current PLAB size (%zu) is too small for %zu", cur_size * HeapWordSize, size * HeapWordSize); return nullptr; } - // Retire current PLAB, and allocate a new one. PLAB* plab = ShenandoahThreadLocalData::plab(thread); - if (plab->words_remaining() < plab_min_size) { - // Retire current PLAB. This takes care of any PLAB book-keeping. - // retire_plab() registers the remnant filler object with the remembered set scanner without a lock. - // Since PLABs are card-aligned, concurrent registrations in other PLABs don't interfere. - retire_plab(plab, thread); - - size_t actual_size = 0; - HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size); - if (plab_buf == nullptr) { - if (min_size == plab_min_size) { - // Disable PLAB promotions for this thread because we cannot even allocate a minimal PLAB. This allows us - // to fail faster on subsequent promotion attempts. - ShenandoahThreadLocalData::disable_plab_promotions(thread); - } - return nullptr; - } else { - ShenandoahThreadLocalData::enable_plab_retries(thread); - } - // Since the allocated PLAB may have been down-sized for alignment, plab->allocate(size) below may still fail. - if (ZeroTLAB) { - // ... and clear it. - Copy::zero_to_words(plab_buf, actual_size); - } else { - // ...and zap just allocated object. -#ifdef ASSERT - // Skip mangling the space corresponding to the object header to - // ensure that the returned space is not considered parsable by - // any concurrent GC thread. - size_t hdr_size = oopDesc::header_size(); - Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal); -#endif // ASSERT - } - assert(is_aligned(actual_size, CardTable::card_size_in_words()), "Align by design"); - plab->set_buf(plab_buf, actual_size); - if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) { - return nullptr; - } - return plab->allocate(size); - } else { + if (plab->words_remaining() >= plab_min_size) { // If there's still at least min_size() words available within the current plab, don't retire it. Let's nibble // away on this plab as long as we can. Meanwhile, return nullptr to force this particular allocation request // to be satisfied with a shared allocation. By packing more promotions into the previously allocated PLAB, we // reduce the likelihood of evacuation failures, and we reduce the need for downsizing our PLABs. + log_debug(gc, plab)("Existing PLAB is still viable (words remaining: %zu, plab_min_size: %zu)", plab->words_remaining(), plab_min_size); + return nullptr; + } + + // The current plab has fewer words remaining than the minimum PLAB. Retire it. This takes care of any PLAB book-keeping. + // retire_plab() registers the remnant filler object with the remembered set scanner without a lock. + // Since PLABs are card-aligned, concurrent registrations in other PLABs don't interfere. + retire_plab(plab, thread); + + size_t actual_size = 0; + HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size); + if (plab_buf == nullptr) { + if (min_size == plab_min_size) { + // Disable PLAB promotions for this thread because we cannot even allocate a minimal PLAB. This allows us + // to fail faster on subsequent promotion attempts. + log_debug(gc, plab)("Disable PLAB promotions because we can't allocate minimum sized PLAB: %zu", min_size * HeapWordSize); + ShenandoahThreadLocalData::disable_plab_promotions(thread); + } return nullptr; } + + log_debug(gc, plab)("Allocated new PLAB of size: %zu bytes, enable PLAB retries", actual_size * HeapWordSize); + ShenandoahThreadLocalData::enable_plab_retries(thread); + + + if (ZeroTLAB) { + // ... and clear it. + Copy::zero_to_words(plab_buf, actual_size); + } else { + // ...and zap just allocated object. +#ifdef ASSERT + // Skip mangling the space corresponding to the object header to + // ensure that the returned space is not considered parsable by + // any concurrent GC thread. + const size_t hdr_size = oopDesc::header_size(); + Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal); +#endif // ASSERT + } + + assert(is_aligned(actual_size, CardTable::card_size_in_words()), "Align by design"); + plab->set_buf(plab_buf, actual_size); + if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) { + // Thinking here is that the thread has exhausted promotion reserve, but there may yet be old objects + // to evacuate and this plab could be used for those. + log_debug(gc, plab)("Thread has new PLAB of size %zu, but is not allowed to promote %zu. Mixed evac in progress? %s", + actual_size * HeapWordSize, size * HeapWordSize, BOOL_TO_STR(collection_set()->has_old_regions())); + return nullptr; + } + + // Since the allocated PLAB may have been down-sized for alignment, plab->allocate(size) below may still fail. + return plab->allocate(size); } HeapWord* ShenandoahGenerationalHeap::allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size) { @@ -553,6 +573,7 @@ void ShenandoahGenerationalHeap::retire_plab(PLAB* plab, Thread* thread) { ShenandoahThreadLocalData::reset_plab_promoted(thread); ShenandoahThreadLocalData::set_plab_actual_size(thread, 0); if (not_promoted > 0) { + log_debug(gc, plab)("Retire PLAB, unexpend unpromoted: %zu", not_promoted * HeapWordSize); old_generation()->unexpend_promoted(not_promoted); } const size_t original_waste = plab->waste(); @@ -564,8 +585,8 @@ void ShenandoahGenerationalHeap::retire_plab(PLAB* plab, Thread* thread) { if (top != nullptr && plab->waste() > original_waste && is_in_old(top)) { // If retiring the plab created a filler object, then we need to register it with our card scanner so it can // safely walk the region backing the plab. - log_debug(gc)("retire_plab() is registering remnant of size %zu at " PTR_FORMAT, - plab->waste() - original_waste, p2i(top)); + log_debug(gc, plab)("retire_plab() is registering remnant of size %zu at " PTR_FORMAT, + (plab->waste() - original_waste) * HeapWordSize, p2i(top)); // No lock is necessary because the PLAB memory is aligned on card boundaries. old_generation()->card_scan()->register_object_without_lock(top); } @@ -1088,6 +1109,10 @@ void ShenandoahGenerationalHeap::complete_concurrent_cycle() { entry_global_coalesce_and_fill(); } + log_info(gc, cset)("Concurrent cycle complete, promotions reserved: %zu, promotions expended: %zu, failed count: %zu, failed bytes: %zu", + old_generation()->get_promoted_reserve(), old_generation()->get_promoted_expended(), + old_generation()->get_promotion_failed_count(), old_generation()->get_promotion_failed_words() * HeapWordSize); + TransferResult result; { ShenandoahHeapLocker locker(lock()); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index b2fd32d2fd0b9..ac2f977883e94 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1264,7 +1264,7 @@ void ShenandoahHeap::evacuate_collection_set(bool concurrent) { void ShenandoahHeap::concurrent_prepare_for_update_refs() { { - // Java threads take this lock while they are being attached and added to the list of thread. + // Java threads take this lock while they are being attached and added to the list of threads. // If another thread holds this lock before we update the gc state, it will receive a stale // gc state, but they will have been added to the list of java threads and so will be corrected // by the following handshake. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp index 1724fc2849f76..707c2690b5818 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp @@ -69,12 +69,6 @@ void ShenandoahOldGC::op_final_mark() { heap->set_unload_classes(false); heap->prepare_concurrent_roots(); - // Believe verification following old-gen concurrent mark needs to be different than verification following - // young-gen concurrent mark, so am commenting this out for now: - // if (ShenandoahVerify) { - // heap->verifier()->verify_after_concmark(); - // } - if (VerifyAfterGC) { Universe::verify(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp index ac3107eb396ec..176dec5e79082 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp @@ -204,6 +204,8 @@ ShenandoahOldGeneration::ShenandoahOldGeneration(uint max_queues, size_t max_cap _promoted_expended(0), _promotion_potential(0), _pad_for_promote_in_place(0), + _promotion_failure_count(0), + _promotion_failure_words(0), _promotable_humongous_regions(0), _promotable_regular_regions(0), _is_parsable(true), @@ -240,7 +242,9 @@ void ShenandoahOldGeneration::augment_promoted_reserve(size_t increment) { void ShenandoahOldGeneration::reset_promoted_expended() { shenandoah_assert_heaplocked_or_safepoint(); - AtomicAccess::store(&_promoted_expended, (size_t) 0); + AtomicAccess::store(&_promoted_expended, static_cast(0)); + AtomicAccess::store(&_promotion_failure_count, static_cast(0)); + AtomicAccess::store(&_promotion_failure_words, static_cast(0)); } size_t ShenandoahOldGeneration::expend_promoted(size_t increment) { @@ -299,6 +303,8 @@ ShenandoahOldGeneration::configure_plab_for_current_thread(const ShenandoahAlloc if (can_promote(actual_size)) { // Assume the entirety of this PLAB will be used for promotion. This prevents promotion from overreach. // When we retire this plab, we'll unexpend what we don't really use. + log_debug(gc, plab)("Thread can promote using PLAB of %zu bytes. Expended: %zu, available: %zu", + actual_size, get_promoted_expended(), get_promoted_reserve()); expend_promoted(actual_size); ShenandoahThreadLocalData::enable_plab_promotions(thread); ShenandoahThreadLocalData::set_plab_actual_size(thread, actual_size); @@ -306,9 +312,12 @@ ShenandoahOldGeneration::configure_plab_for_current_thread(const ShenandoahAlloc // Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations. ShenandoahThreadLocalData::disable_plab_promotions(thread); ShenandoahThreadLocalData::set_plab_actual_size(thread, 0); + log_debug(gc, plab)("Thread cannot promote using PLAB of %zu bytes. Expended: %zu, available: %zu, mixed evacuations? %s", + actual_size, get_promoted_expended(), get_promoted_reserve(), BOOL_TO_STR(ShenandoahHeap::heap()->collection_set()->has_old_regions())); } } else if (req.is_promotion()) { // Shared promotion. + log_debug(gc, plab)("Expend shared promotion of %zu bytes", actual_size); expend_promoted(actual_size); } } @@ -675,12 +684,14 @@ void ShenandoahOldGeneration::handle_failed_promotion(Thread* thread, size_t siz static size_t epoch_report_count = 0; auto heap = ShenandoahGenerationalHeap::heap(); - size_t promotion_reserve; - size_t promotion_expended; - const size_t gc_id = heap->control_thread()->get_gc_id(); + AtomicAccess::inc(&_promotion_failure_count); + AtomicAccess::add(&_promotion_failure_words, size); + if ((gc_id != last_report_epoch) || (epoch_report_count++ < MaxReportsPerEpoch)) { + size_t promotion_expended; + size_t promotion_reserve; { // Promotion failures should be very rare. Invest in providing useful diagnostic info. ShenandoahHeapLocker locker(heap->lock()); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp index abc865c31cd1e..b0680e2d02f76 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp @@ -65,16 +65,21 @@ class ShenandoahOldGeneration : public ShenandoahGeneration { // remaining in a PLAB when it is retired. size_t _promoted_expended; - // Represents the quantity of live bytes we expect to promote in place during the next - // evacuation cycle. This value is used by the young heuristic to trigger mixed collections. + // Represents the quantity of live bytes we expect to promote during the next evacuation + // cycle. This value is used by the young heuristic to trigger mixed collections. // It is also used when computing the optimum size for the old generation. size_t _promotion_potential; // When a region is selected to be promoted in place, the remaining free memory is filled // in to prevent additional allocations (preventing premature promotion of newly allocated - // objects. This field records the total amount of padding used for such regions. + // objects). This field records the total amount of padding used for such regions. size_t _pad_for_promote_in_place; + // Keep track of the number and size of promotions that failed. Perhaps we should use this to increase + // the size of the old generation for the next collection cycle. + size_t _promotion_failure_count; + size_t _promotion_failure_words; + // During construction of the collection set, we keep track of regions that are eligible // for promotion in place. These fields track the count of those humongous and regular regions. // This data is used to force the evacuation phase even when the collection set is otherwise @@ -119,6 +124,10 @@ class ShenandoahOldGeneration : public ShenandoahGeneration { // This is used on the allocation path to gate promotions that would exceed the reserve size_t get_promoted_expended() const; + // Return the count and size (in words) of failed promotions since the last reset + size_t get_promotion_failed_count() const { return _promotion_failure_count; } + size_t get_promotion_failed_words() const { return _promotion_failure_words; } + // Test if there is enough memory reserved for this promotion bool can_promote(size_t requested_bytes) const { size_t promotion_avail = get_promoted_reserve(); @@ -137,9 +146,10 @@ class ShenandoahOldGeneration : public ShenandoahGeneration { // See description in field declaration void set_region_balance(ssize_t balance) { _region_balance = balance; } ssize_t get_region_balance() const { return _region_balance; } + // See description in field declaration - void set_promotion_potential(size_t val) { _promotion_potential = val; }; - size_t get_promotion_potential() const { return _promotion_potential; }; + void set_promotion_potential(size_t val) { _promotion_potential = val; } + size_t get_promotion_potential() const { return _promotion_potential; } // See description in field declaration void set_pad_for_promote_in_place(size_t pad) { _pad_for_promote_in_place = pad; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahTrace.cpp b/src/hotspot/share/gc/shenandoah/shenandoahTrace.cpp index a786f8ae216b3..bbb44348355b6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahTrace.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahTrace.cpp @@ -37,9 +37,9 @@ void ShenandoahTracer::report_evacuation_info(const ShenandoahCollectionSet* cse e.set_cSetRegions(cset->count()); e.set_cSetUsedBefore(cset->used()); e.set_cSetUsedAfter(cset->live()); - e.set_collectedOld(cset->get_old_bytes_reserved_for_evacuation()); - e.set_collectedPromoted(cset->get_young_bytes_to_be_promoted()); - e.set_collectedYoung(cset->get_young_bytes_reserved_for_evacuation()); + e.set_collectedOld(cset->get_live_bytes_in_old_regions()); + e.set_collectedPromoted(cset->get_live_bytes_in_tenurable_regions()); + e.set_collectedYoung(cset->get_live_bytes_in_untenurable_regions()); e.set_regionsPromotedHumongous(regions_promoted_humongous); e.set_regionsPromotedRegular(regions_promoted_regular); e.set_regularPromotedGarbage(regular_promoted_garbage); diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp index d1531c51236f5..8bd59beb93b7a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp @@ -387,13 +387,13 @@ \ product(uintx, ShenandoahOldEvacRatioPercent, 75, EXPERIMENTAL, \ "The maximum proportion of evacuation from old-gen memory, " \ - "expressed as a percentage. The default value 75 denotes that no" \ - "more than 75% of the collection set evacuation workload may be " \ - "towards evacuation of old-gen heap regions. This limits both the"\ - "promotion of aged regions and the compaction of existing old " \ - "regions. A value of 75 denotes that the total evacuation work" \ - "may increase to up to four times the young gen evacuation work." \ - "A larger value allows quicker promotion and allows" \ + "expressed as a percentage. The default value 75 denotes that " \ + "no more than 75% of the collection set evacuation workload may " \ + "be towards evacuation of old-gen heap regions. This limits both "\ + "the promotion of aged regions and the compaction of existing " \ + "old regions. A value of 75 denotes that the total evacuation " \ + "work may increase to up to four times the young gen evacuation " \ + "work. A larger value allows quicker promotion and allows " \ "a smaller number of mixed evacuations to process " \ "the entire list of old-gen collection candidates at the cost " \ "of an increased disruption of the normal cadence of young-gen " \ @@ -401,7 +401,7 @@ "focus entirely on old-gen memory, allowing no young-gen " \ "regions to be collected, likely resulting in subsequent " \ "allocation failures because the allocation pool is not " \ - "replenished. A value of 0 allows a mixed evacuation to" \ + "replenished. A value of 0 allows a mixed evacuation to " \ "focus entirely on young-gen memory, allowing no old-gen " \ "regions to be collected, likely resulting in subsequent " \ "promotion failures and triggering of stop-the-world full GC " \ diff --git a/test/hotspot/gtest/gc/shenandoah/test_shenandoahAgeCensus.cpp b/test/hotspot/gtest/gc/shenandoah/test_shenandoahAgeCensus.cpp index c53d0a155544c..0b89c59634ac5 100644 --- a/test/hotspot/gtest/gc/shenandoah/test_shenandoahAgeCensus.cpp +++ b/test/hotspot/gtest/gc/shenandoah/test_shenandoahAgeCensus.cpp @@ -63,7 +63,7 @@ class ShenandoahAgeCensusTest : public ::testing::Test { total += _cohort_populations[i]; } } - return total; + return total * HeapWordSize; } void promote_all_tenurable(const size_t tenuring_threshold) { @@ -87,6 +87,13 @@ TEST_F(ShenandoahAgeCensusTest, initialize) { EXPECT_EQ(census.tenuring_threshold(), ShenandoahAgeCensus::MAX_COHORTS); } +TEST_F(ShenandoahAgeCensusTest, get_tenurable_bytes) { + ShenandoahAgeCensus census(1); + update(census); + EXPECT_EQ(get_total_population_older_than(1), census.get_tenurable_bytes(1)); + EXPECT_LT(census.get_tenurable_bytes(2), census.get_tenurable_bytes(1)); +} + TEST_F(ShenandoahAgeCensusTest, ignore_small_populations) { // Small populations are ignored so we do not return early before reaching the youngest cohort. ShenandoahAgeCensus census(1);