src/share/vm/opto/output.cpp
Index
Unified diffs
Context diffs
Sdiffs
Wdiffs
Patch
New
Old
Previous File
Next File
7079317 Cdiff src/share/vm/opto/output.cpp
src/share/vm/opto/output.cpp
Print this page
*** 418,428 ****
if (last_avoid_back_to_back_adr >= blk_starts[i]) {
blk_size += nop_size;
}
}
if (mach->may_be_short_branch()) {
! if (!nj->is_Branch()) {
#ifndef PRODUCT
nj->dump(3);
#endif
Unimplemented();
}
--- 418,428 ----
if (last_avoid_back_to_back_adr >= blk_starts[i]) {
blk_size += nop_size;
}
}
if (mach->may_be_short_branch()) {
! if (!nj->is_MachBranch()) {
#ifndef PRODUCT
nj->dump(3);
#endif
Unimplemented();
}
*** 471,481 ****
Block *b = _cfg->_blocks[i];
int idx = jmp_nidx[i];
MachNode* mach = (idx == -1) ? NULL: b->_nodes[idx]->as_Mach();
if (mach != NULL && mach->may_be_short_branch()) {
#ifdef ASSERT
! assert(jmp_size[i] > 0 && mach->is_Branch(), "sanity");
int j;
// Find the branch; ignore trailing NOPs.
for (j = b->_nodes.size()-1; j>=0; j--) {
Node* n = b->_nodes[j];
if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
--- 471,481 ----
Block *b = _cfg->_blocks[i];
int idx = jmp_nidx[i];
MachNode* mach = (idx == -1) ? NULL: b->_nodes[idx]->as_Mach();
if (mach != NULL && mach->may_be_short_branch()) {
#ifdef ASSERT
! assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
int j;
// Find the branch; ignore trailing NOPs.
for (j = b->_nodes.size()-1; j>=0; j--) {
Node* n = b->_nodes[j];
if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
*** 498,508 ****
if (needs_padding && offset <= 0)
offset -= nop_size;
if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
// We've got a winner. Replace this branch.
! MachNode* replacement = mach->short_branch_version(this);
// Update the jmp_size.
int new_size = replacement->size(_regalloc);
int diff = br_size - new_size;
assert(diff >= (int)nop_size, "short_branch size should be smaller");
--- 498,508 ----
if (needs_padding && offset <= 0)
offset -= nop_size;
if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
// We've got a winner. Replace this branch.
! MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
// Update the jmp_size.
int new_size = replacement->size(_regalloc);
int diff = br_size - new_size;
assert(diff >= (int)nop_size, "short_branch size should be smaller");
*** 668,678 ****
if (needs_padding && offset <= 0)
offset -= nop_size;
if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
// We've got a winner. Replace this branch.
! MachNode* replacement = mach->short_branch_version(this);
// Update the jmp_size.
int new_size = replacement->size(_regalloc);
assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
// Conservatively take into accound padding between
--- 668,678 ----
if (needs_padding && offset <= 0)
offset -= nop_size;
if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
// We've got a winner. Replace this branch.
! MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
// Update the jmp_size.
int new_size = replacement->size(_regalloc);
assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
// Conservatively take into accound padding between
*** 1523,1535 ****
else if( mach->is_MachNullCheck() ) {
inct_starts[inct_cnt++] = previous_offset;
}
// If this is a branch, then fill in the label with the target BB's label
! else if (mach->is_Branch()) {
!
! if (mach->ideal_Opcode() == Op_Jump) {
for (uint h = 0; h < b->_num_succs; h++) {
Block* succs_block = b->_succs[h];
for (uint j = 1; j < succs_block->num_preds(); j++) {
Node* jpn = succs_block->pred(j);
if (jpn->is_JumpProj() && jpn->in(0) == mach) {
--- 1523,1537 ----
else if( mach->is_MachNullCheck() ) {
inct_starts[inct_cnt++] = previous_offset;
}
// If this is a branch, then fill in the label with the target BB's label
! else if (mach->is_MachBranch()) {
! // This requires the TRUE branch target be in succs[0]
! uint block_num = b->non_connector_successor(0)->_pre_order;
! mach->as_MachBranch()->label_set( &blk_labels[block_num], block_num );
! } else if (mach->ideal_Opcode() == Op_Jump) {
for (uint h = 0; h < b->_num_succs; h++) {
Block* succs_block = b->_succs[h];
for (uint j = 1; j < succs_block->num_preds(); j++) {
Node* jpn = succs_block->pred(j);
if (jpn->is_JumpProj() && jpn->in(0) == mach) {
*** 1537,1553 ****
Label *blkLabel = &blk_labels[block_num];
mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
}
}
}
- } else {
- // For Branchs
- // This requires the TRUE branch target be in succs[0]
- uint block_num = b->non_connector_successor(0)->_pre_order;
- mach->label_set( &blk_labels[block_num], block_num );
}
- }
#ifdef ASSERT
// Check that oop-store precedes the card-mark
else if (mach->ideal_Opcode() == Op_StoreCM) {
uint storeCM_idx = j;
--- 1539,1549 ----
*** 2227,2237 ****
// branch, OR a conditionally executed instruction if
// the branch is taken. In practice, this means that
// the first instruction at the branch target is
// copied to the delay slot, and the branch goes to
// the instruction after that at the branch target
! if ( n->is_Mach() && n->is_Branch() ) {
assert( !n->is_MachNullCheck(), "should not look for delay slot for Null Check" );
assert( !n->is_Catch(), "should not look for delay slot for Catch" );
#ifndef PRODUCT
--- 2223,2233 ----
// branch, OR a conditionally executed instruction if
// the branch is taken. In practice, this means that
// the first instruction at the branch target is
// copied to the delay slot, and the branch goes to
// the instruction after that at the branch target
! if ( n->is_MachBranch() ) {
assert( !n->is_MachNullCheck(), "should not look for delay slot for Null Check" );
assert( !n->is_Catch(), "should not look for delay slot for Catch" );
#ifndef PRODUCT
*** 2888,2898 ****
}
// Kill projections on a branch should appear to occur on the
// branch, not afterwards, so grab the masks from the projections
// and process them.
! if (n->is_Branch()) {
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node* use = n->fast_out(i);
if (use->is_Proj()) {
RegMask rm = use->out_RegMask();// Make local copy
while( rm.is_NotEmpty() ) {
--- 2884,2894 ----
}
// Kill projections on a branch should appear to occur on the
// branch, not afterwards, so grab the masks from the projections
// and process them.
! if (n->is_MachBranch() || n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_Jump) {
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node* use = n->fast_out(i);
if (use->is_Proj()) {
RegMask rm = use->out_RegMask();// Make local copy
while( rm.is_NotEmpty() ) {
src/share/vm/opto/output.cpp
Index
Unified diffs
Context diffs
Sdiffs
Wdiffs
Patch
New
Old
Previous File
Next File