Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 20 additions & 20 deletions src/hotspot/share/opto/chaitin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1076,8 +1076,8 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {

// Prepare register mask for each input
for( uint k = input_edge_start; k < cnt; k++ ) {
uint vreg = _lrg_map.live_range_id(n->in(k));
if (!vreg) {
uint vreg_in = _lrg_map.live_range_id(n->in(k));
if (!vreg_in) {
continue;
}

Expand All @@ -1099,7 +1099,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
if (k >= cur_node->num_opnds()) continue;
}

LRG &lrg = lrgs(vreg);
LRG &lrg_in = lrgs(vreg_in);
// // Testing for floating point code shape
// Node *test = n->in(k);
// if( test->is_Mach() ) {
Expand All @@ -1114,25 +1114,25 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
// Do not limit registers from uncommon uses before
// AggressiveCoalesce. This effectively pre-virtual-splits
// around uncommon uses of common defs.
const RegMask &rm = n->in_RegMask(k);
const RegMask &rm_in = n->in_RegMask(k);
if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * block->_freq) {
// Since we are BEFORE aggressive coalesce, leave the register
// mask untrimmed by the call. This encourages more coalescing.
// Later, AFTER aggressive, this live range will have to spill
// but the spiller handles slow-path calls very nicely.
} else {
lrg.and_with(rm);
lrg_in.and_with(rm_in);
}

// Check for bound register masks
const RegMask &lrgmask = lrg.mask();
const RegMask &lrgmask_in = lrg_in.mask();
uint kreg = n->in(k)->ideal_reg();
bool is_vect = RegMask::is_vector(kreg);
assert(n->in(k)->bottom_type()->isa_vect() == nullptr || is_vect ||
kreg == Op_RegD || kreg == Op_RegL || kreg == Op_RegVectMask,
"vector must be in vector registers");
if (lrgmask.is_bound(kreg))
lrg._is_bound = 1;
if (lrgmask_in.is_bound(kreg))
lrg_in._is_bound = 1;

// If this use of a double forces a mis-aligned double,
// flag as '_fat_proj' - really flag as allowing misalignment
Expand All @@ -1141,30 +1141,30 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
// FOUR registers!
#ifdef ASSERT
if (is_vect && !_scheduling_info_generated) {
if (lrg.num_regs() != 0) {
assert(lrgmask.is_aligned_sets(lrg.num_regs()), "vector should be aligned");
assert(!lrg._fat_proj, "sanity");
assert(RegMask::num_registers(kreg) == lrg.num_regs(), "sanity");
if (lrg_in.num_regs() != 0) {
assert(lrgmask_in.is_aligned_sets(lrg_in.num_regs()), "vector should be aligned");
assert(!lrg_in._fat_proj, "sanity");
assert(RegMask::num_registers(kreg) == lrg_in.num_regs(), "sanity");
} else {
assert(n->is_Phi(), "not all inputs processed only if Phi");
}
}
#endif
if (!is_vect && lrg.num_regs() == 2 && !lrg._fat_proj && rm.is_misaligned_pair()) {
lrg._fat_proj = 1;
lrg._is_bound = 1;
if (!is_vect && lrg_in.num_regs() == 2 && !lrg_in._fat_proj && rm_in.is_misaligned_pair()) {
lrg_in._fat_proj = 1;
lrg_in._is_bound = 1;
}
// if the LRG is an unaligned pair, we will have to spill
// so clear the LRG's register mask if it is not already spilled
if (!is_vect && !n->is_SpillCopy() &&
(lrg._def == nullptr || lrg.is_multidef() || !lrg._def->is_SpillCopy()) &&
lrgmask.is_misaligned_pair()) {
lrg.clear();
(lrg_in._def == nullptr || lrg_in.is_multidef() || !lrg_in._def->is_SpillCopy()) &&
lrgmask_in.is_misaligned_pair()) {
lrg_in.clear();
}

// Check for maximum frequency value
if (lrg._maxfreq < block->_freq) {
lrg._maxfreq = block->_freq;
if (lrg_in._maxfreq < block->_freq) {
lrg_in._maxfreq = block->_freq;
}

} // End for all allocated inputs
Expand Down