- 
                Notifications
    
You must be signed in to change notification settings  - Fork 6.1k
 
8369048: GenShen: Defer ShenFreeSet::available() during rebuild #27612
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
56291b4
              0944fa8
              623b0f0
              99d0175
              bec73da
              1a5e483
              717e7da
              78739b5
              d9a8a51
              a689839
              File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
| 
          
            
          
           | 
    @@ -28,8 +28,12 @@ | |
| 
     | 
||
| #include "gc/shenandoah/shenandoahHeap.hpp" | ||
| #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" | ||
| #include "gc/shenandoah/shenandoahLock.hpp" | ||
| #include "gc/shenandoah/shenandoahSimpleBitMap.hpp" | ||
| 
     | 
||
| typedef ShenandoahLock ShenandoahRebuildLock; | ||
| typedef ShenandoahLocker ShenandoahRebuildLocker; | ||
| 
     | 
||
| // Each ShenandoahHeapRegion is associated with a ShenandoahFreeSetPartitionId. | ||
| enum class ShenandoahFreeSetPartitionId : uint8_t { | ||
| Mutator, // Region is in the Mutator free set: available memory is available to mutators. | ||
| 
          
            
          
           | 
    @@ -234,7 +238,7 @@ class ShenandoahRegionPartitions { | |
| // Return available_in assuming caller does not hold the heap lock. In production builds, available is | ||
| // returned without acquiring the lock. In debug builds, the global heap lock is acquired in order to | ||
| // enforce a consistency assert. | ||
| inline size_t available_in_not_locked(ShenandoahFreeSetPartitionId which_partition) const { | ||
| inline size_t available_in_locked_for_rebuild(ShenandoahFreeSetPartitionId which_partition) const { | ||
| assert (which_partition < NumPartitions, "selected free set must be valid"); | ||
| shenandoah_assert_not_heaplocked(); | ||
| #ifdef ASSERT | ||
| 
          
            
          
           | 
    @@ -316,6 +320,9 @@ class ShenandoahFreeSet : public CHeapObj<mtGC> { | |
| ShenandoahHeap* const _heap; | ||
| ShenandoahRegionPartitions _partitions; | ||
| 
     | 
||
| // This locks the rebuild process (in combination with the global heap lock) | ||
| 
         There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Explain the role of this & the global heap lock vis-a-vis the rebuild process. Also may be call it   | 
||
| ShenandoahRebuildLock _lock; | ||
| 
     | 
||
| HeapWord* allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r); | ||
| 
     | 
||
| // Return the address of memory allocated, setting in_new_region to true iff the allocation is taken | ||
| 
          
            
          
           | 
    @@ -415,6 +422,11 @@ class ShenandoahFreeSet : public CHeapObj<mtGC> { | |
| 
     | 
||
| ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions); | ||
| 
     | 
||
| 
     | 
||
| ShenandoahRebuildLock* lock() { | ||
| 
         There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 
  | 
||
| return &_lock; | ||
| } | ||
| 
     | 
||
| // Public because ShenandoahRegionPartitions assertions require access. | ||
| inline size_t alloc_capacity(ShenandoahHeapRegion *r) const; | ||
| inline size_t alloc_capacity(size_t idx) const; | ||
| 
          
            
          
           | 
    @@ -480,7 +492,10 @@ class ShenandoahFreeSet : public CHeapObj<mtGC> { | |
| // locked action can be seen by these unlocked functions. | ||
| inline size_t capacity() const { return _partitions.capacity_of(ShenandoahFreeSetPartitionId::Mutator); } | ||
| inline size_t used() const { return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator); } | ||
| inline size_t available() const { return _partitions.available_in_not_locked(ShenandoahFreeSetPartitionId::Mutator); } | ||
| inline size_t available() { | ||
| ShenandoahRebuildLocker locker(lock()); | ||
| return _partitions.available_in_locked_for_rebuild(ShenandoahFreeSetPartitionId::Mutator); | ||
| } | ||
| 
     | 
||
| HeapWord* allocate(ShenandoahAllocRequest& req, bool& in_new_region); | ||
| 
     | 
||
| 
          
            
          
           | 
    ||
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
| 
          
            
          
           | 
    @@ -1153,17 +1153,17 @@ ShenandoahGenerationalHeap::TransferResult ShenandoahFullGC::phase5_epilog() { | |
| } | ||
| 
     | 
||
| heap->collection_set()->clear(); | ||
| size_t young_cset_regions, old_cset_regions; | ||
| size_t first_old, last_old, num_old; | ||
| heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); | ||
| 
     | 
||
| size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old; | ||
| ShenandoahFreeSet* free_set = heap->free_set(); | ||
| ShenandoahRebuildLocker rebuild_locker(free_set->lock()); | ||
| 
         There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should you not create a scope around lines 1158 to line 1167, since you don't want to hold the rebuild lock as soon as the rebuild is done (i.e. immediately following  There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. May be it doesn't matter, since no one else is running during a full gc who needs to query   | 
||
| free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); | ||
| // We also do not expand old generation size following Full GC because we have scrambled age populations and | ||
| // no longer have objects separated by age into distinct regions. | ||
| if (heap->mode()->is_generational()) { | ||
| ShenandoahGenerationalFullGC::compute_balances(); | ||
| } | ||
| 
     | 
||
| heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old); | ||
| free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old); | ||
| 
     | 
||
| // Set mark incomplete because the marking bitmaps have been reset except pinned regions. | ||
| heap->global_generation()->set_mark_incomplete(); | ||
| 
          
            
          
           | 
    ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can the comment be simplified to:
Don't say anything about the heap lock in the API comment. Rather, in the part that is
ifdef ASSERTwhere you take the heap lock (line ~244), say:There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
As I write this, I realize that in the most general case where two threads may call these API's independently in a fastdebug build, you could theoretically get into a deadlock because they attempted to acquire locks in different orders (this possibility exists -- statically -- only in the fastdebug builds).
The general MuteLocker machinery has ranked mutexes to avoid such situations through static ranking and checks while acquiring locks (in debug builds as a way of potentially catching such situations and flagging them).
With such ranking though this code would assert because the locks are acquired in different order between here and elsewhere.
In product builds you are fine because the rebuild lock acts as a "leaf lock" (in hotspot parlance). But there seems to be a definite possibility of deadlock in debug builds if/when the rebuild is attempted by one thread while another checks available and attempts to acquire the heap lock to check the assertion. You could solve it by acquiring the heap lock before calling the work method where the assertion check is done.
However, I'd be much more comfortable if we used some form of lock rank framework, unless it was utterly impossible to do so for some reason. (Here it was easy to spot the lock order inversion because it was in the code. Of course, if a debug build deadlocked you would also figure out the same, but having lock ordering gives you a quick and easy way to verify if there's potential for trouble.)
Not sure of the history of ShenandoahLock or why the parallel infra to MutexLocker was introduced (perhaps for allowing some performance/tunability), but might be worthwhile to see if we want to build lock rank checks in for robustness/maintainability.