605
606 int cnt = 50; // Cycle limiter
607 for (;;) { // While we can dance past unrelated stores...
608 if (--cnt < 0) break; // Caught in cycle or a complicated dance?
609
610 Node* prev = mem;
611 if (mem->is_Store()) {
612 Node* st_adr = mem->in(MemNode::Address);
613 intptr_t st_offset = 0;
614 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
615 if (st_base == NULL)
616 break; // inscrutable pointer
617
618 // For raw accesses it's not enough to prove that constant offsets don't intersect.
619 // We need the bases to be the equal in order for the offset check to make sense.
620 if ((adr_maybe_raw || check_if_adr_maybe_raw(st_adr)) && st_base != base) {
621 break;
622 }
623
624 if (st_offset != offset && st_offset != Type::OffsetBot) {
625 const int MAX_STORE = BytesPerLong;
626 if (st_offset >= offset + size_in_bytes ||
627 st_offset <= offset - MAX_STORE ||
628 st_offset <= offset - mem->as_Store()->memory_size()) {
629 // Success: The offsets are provably independent.
630 // (You may ask, why not just test st_offset != offset and be done?
631 // The answer is that stores of different sizes can co-exist
632 // in the same sequence of RawMem effects. We sometimes initialize
633 // a whole 'tile' of array elements with a single jint or jlong.)
634 mem = mem->in(MemNode::Memory);
635 continue; // (a) advance through independent store memory
636 }
637 }
638 if (st_base != base &&
639 detect_ptr_independence(base, alloc,
640 st_base,
641 AllocateNode::Ideal_allocation(st_base, phase),
642 phase)) {
643 // Success: The bases are provably independent.
644 mem = mem->in(MemNode::Memory);
645 continue; // (a) advance through independent store memory
1074 // the same pointer-and-offset that we stored to.
1075 // Casted version may carry a dependency and it is respected.
1076 // Thus, we are able to replace L by V.
1077 }
1078 // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
1079 if (store_Opcode() != st->Opcode())
1080 return NULL;
1081 return st->in(MemNode::ValueIn);
1082 }
1083
1084 // A load from a freshly-created object always returns zero.
1085 // (This can happen after LoadNode::Ideal resets the load's memory input
1086 // to find_captured_store, which returned InitializeNode::zero_memory.)
1087 if (st->is_Proj() && st->in(0)->is_Allocate() &&
1088 (st->in(0) == ld_alloc) &&
1089 (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
1090 // return a zero value for the load's basic type
1091 // (This is one of the few places where a generic PhaseTransform
1092 // can create new nodes. Think of it as lazily manifesting
1093 // virtually pre-existing constants.)
1094 return phase->zerocon(memory_type());
1095 }
1096
1097 // A load from an initialization barrier can match a captured store.
1098 if (st->is_Proj() && st->in(0)->is_Initialize()) {
1099 InitializeNode* init = st->in(0)->as_Initialize();
1100 AllocateNode* alloc = init->allocation();
1101 if ((alloc != NULL) && (alloc == ld_alloc)) {
1102 // examine a captured store value
1103 st = init->find_captured_store(ld_off, memory_size(), phase);
1104 if (st != NULL) {
1105 continue; // take one more trip around
1106 }
1107 }
1108 }
1109
1110 // Load boxed value from result of valueOf() call is input parameter.
1111 if (this->is_Load() && ld_adr->is_AddP() &&
1112 (tp != NULL) && tp->is_ptr_to_boxed_value()) {
1113 intptr_t ignore = 0;
1114 Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore);
3707 // should try again during the next IGVN once the graph is
3708 // cleaner.
3709 phase->C->record_for_igvn(st);
3710 }
3711 return FAIL;
3712 }
3713
3714 return offset; // success
3715 }
3716
3717 // Find the captured store in(i) which corresponds to the range
3718 // [start..start+size) in the initialized object.
3719 // If there is one, return its index i. If there isn't, return the
3720 // negative of the index where it should be inserted.
3721 // Return 0 if the queried range overlaps an initialization boundary
3722 // or if dead code is encountered.
3723 // If size_in_bytes is zero, do not bother with overlap checks.
3724 int InitializeNode::captured_store_insertion_point(intptr_t start,
3725 int size_in_bytes,
3726 PhaseTransform* phase) {
3727 const int FAIL = 0, MAX_STORE = BytesPerLong;
3728
3729 if (is_complete())
3730 return FAIL; // arraycopy got here first; punt
3731
3732 assert(allocation() != NULL, "must be present");
3733
3734 // no negatives, no header fields:
3735 if (start < (intptr_t) allocation()->minimum_header_size()) return FAIL;
3736
3737 // after a certain size, we bail out on tracking all the stores:
3738 intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize);
3739 if (start >= ti_limit) return FAIL;
3740
3741 for (uint i = InitializeNode::RawStores, limit = req(); ; ) {
3742 if (i >= limit) return -(int)i; // not found; here is where to put it
3743
3744 Node* st = in(i);
3745 intptr_t st_off = get_store_offset(st, phase);
3746 if (st_off < 0) {
3747 if (st != zero_memory()) {
3748 return FAIL; // bail out if there is dead garbage
3749 }
3750 } else if (st_off > start) {
3751 // ...we are done, since stores are ordered
3752 if (st_off < start + size_in_bytes) {
3753 return FAIL; // the next store overlaps
3754 }
3755 return -(int)i; // not found; here is where to put it
3756 } else if (st_off < start) {
3757 if (size_in_bytes != 0 &&
3758 start < st_off + MAX_STORE &&
3759 start < st_off + st->as_Store()->memory_size()) {
3760 return FAIL; // the previous store overlaps
3761 }
3762 } else {
3763 if (size_in_bytes != 0 &&
3764 st->as_Store()->memory_size() != size_in_bytes) {
3765 return FAIL; // mismatched store size
3766 }
3767 return i;
3768 }
3769
3770 ++i;
3771 }
3772 }
3773
3774 // Look for a captured store which initializes at the offset 'start'
3775 // with the given size. If there is no such store, and no other
3776 // initialization interferes, then return zero_memory (the memory
|
605
606 int cnt = 50; // Cycle limiter
607 for (;;) { // While we can dance past unrelated stores...
608 if (--cnt < 0) break; // Caught in cycle or a complicated dance?
609
610 Node* prev = mem;
611 if (mem->is_Store()) {
612 Node* st_adr = mem->in(MemNode::Address);
613 intptr_t st_offset = 0;
614 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
615 if (st_base == NULL)
616 break; // inscrutable pointer
617
618 // For raw accesses it's not enough to prove that constant offsets don't intersect.
619 // We need the bases to be the equal in order for the offset check to make sense.
620 if ((adr_maybe_raw || check_if_adr_maybe_raw(st_adr)) && st_base != base) {
621 break;
622 }
623
624 if (st_offset != offset && st_offset != Type::OffsetBot) {
625 const int MAX_STORE = MAX2(BytesPerLong, (int)MaxVectorSize);
626 assert(mem->as_Store()->memory_size() <= MAX_STORE, "");
627 if (st_offset >= offset + size_in_bytes ||
628 st_offset <= offset - MAX_STORE ||
629 st_offset <= offset - mem->as_Store()->memory_size()) {
630 // Success: The offsets are provably independent.
631 // (You may ask, why not just test st_offset != offset and be done?
632 // The answer is that stores of different sizes can co-exist
633 // in the same sequence of RawMem effects. We sometimes initialize
634 // a whole 'tile' of array elements with a single jint or jlong.)
635 mem = mem->in(MemNode::Memory);
636 continue; // (a) advance through independent store memory
637 }
638 }
639 if (st_base != base &&
640 detect_ptr_independence(base, alloc,
641 st_base,
642 AllocateNode::Ideal_allocation(st_base, phase),
643 phase)) {
644 // Success: The bases are provably independent.
645 mem = mem->in(MemNode::Memory);
646 continue; // (a) advance through independent store memory
1075 // the same pointer-and-offset that we stored to.
1076 // Casted version may carry a dependency and it is respected.
1077 // Thus, we are able to replace L by V.
1078 }
1079 // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
1080 if (store_Opcode() != st->Opcode())
1081 return NULL;
1082 return st->in(MemNode::ValueIn);
1083 }
1084
1085 // A load from a freshly-created object always returns zero.
1086 // (This can happen after LoadNode::Ideal resets the load's memory input
1087 // to find_captured_store, which returned InitializeNode::zero_memory.)
1088 if (st->is_Proj() && st->in(0)->is_Allocate() &&
1089 (st->in(0) == ld_alloc) &&
1090 (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
1091 // return a zero value for the load's basic type
1092 // (This is one of the few places where a generic PhaseTransform
1093 // can create new nodes. Think of it as lazily manifesting
1094 // virtually pre-existing constants.)
1095 if (memory_type() != T_VOID) {
1096 return phase->zerocon(memory_type());
1097 } else {
1098 // TODO: materialize all-zero vector constant
1099 assert(!isa_Load() || as_Load()->type()->isa_vect(), "");
1100 }
1101 }
1102
1103 // A load from an initialization barrier can match a captured store.
1104 if (st->is_Proj() && st->in(0)->is_Initialize()) {
1105 InitializeNode* init = st->in(0)->as_Initialize();
1106 AllocateNode* alloc = init->allocation();
1107 if ((alloc != NULL) && (alloc == ld_alloc)) {
1108 // examine a captured store value
1109 st = init->find_captured_store(ld_off, memory_size(), phase);
1110 if (st != NULL) {
1111 continue; // take one more trip around
1112 }
1113 }
1114 }
1115
1116 // Load boxed value from result of valueOf() call is input parameter.
1117 if (this->is_Load() && ld_adr->is_AddP() &&
1118 (tp != NULL) && tp->is_ptr_to_boxed_value()) {
1119 intptr_t ignore = 0;
1120 Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore);
3713 // should try again during the next IGVN once the graph is
3714 // cleaner.
3715 phase->C->record_for_igvn(st);
3716 }
3717 return FAIL;
3718 }
3719
3720 return offset; // success
3721 }
3722
3723 // Find the captured store in(i) which corresponds to the range
3724 // [start..start+size) in the initialized object.
3725 // If there is one, return its index i. If there isn't, return the
3726 // negative of the index where it should be inserted.
3727 // Return 0 if the queried range overlaps an initialization boundary
3728 // or if dead code is encountered.
3729 // If size_in_bytes is zero, do not bother with overlap checks.
3730 int InitializeNode::captured_store_insertion_point(intptr_t start,
3731 int size_in_bytes,
3732 PhaseTransform* phase) {
3733 const int FAIL = 0, MAX_STORE = MAX2(BytesPerLong, (int)MaxVectorSize);
3734
3735 if (is_complete())
3736 return FAIL; // arraycopy got here first; punt
3737
3738 assert(allocation() != NULL, "must be present");
3739
3740 // no negatives, no header fields:
3741 if (start < (intptr_t) allocation()->minimum_header_size()) return FAIL;
3742
3743 // after a certain size, we bail out on tracking all the stores:
3744 intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize);
3745 if (start >= ti_limit) return FAIL;
3746
3747 for (uint i = InitializeNode::RawStores, limit = req(); ; ) {
3748 if (i >= limit) return -(int)i; // not found; here is where to put it
3749
3750 Node* st = in(i);
3751 intptr_t st_off = get_store_offset(st, phase);
3752 if (st_off < 0) {
3753 if (st != zero_memory()) {
3754 return FAIL; // bail out if there is dead garbage
3755 }
3756 } else if (st_off > start) {
3757 // ...we are done, since stores are ordered
3758 if (st_off < start + size_in_bytes) {
3759 return FAIL; // the next store overlaps
3760 }
3761 return -(int)i; // not found; here is where to put it
3762 } else if (st_off < start) {
3763 assert(st->as_Store()->memory_size() <= MAX_STORE, "");
3764 if (size_in_bytes != 0 &&
3765 start < st_off + MAX_STORE &&
3766 start < st_off + st->as_Store()->memory_size()) {
3767 return FAIL; // the previous store overlaps
3768 }
3769 } else {
3770 if (size_in_bytes != 0 &&
3771 st->as_Store()->memory_size() != size_in_bytes) {
3772 return FAIL; // mismatched store size
3773 }
3774 return i;
3775 }
3776
3777 ++i;
3778 }
3779 }
3780
3781 // Look for a captured store which initializes at the offset 'start'
3782 // with the given size. If there is no such store, and no other
3783 // initialization interferes, then return zero_memory (the memory
|