src/share/vm/services/memSnapshot.cpp
Index
Unified diffs
Context diffs
Sdiffs
Wdiffs
Patch
New
Old
Previous File
Next File
*** old/src/share/vm/services/memSnapshot.cpp Wed Oct 31 11:57:31 2012
--- new/src/share/vm/services/memSnapshot.cpp Wed Oct 31 11:57:30 2012
*** 29,38 ****
--- 29,101 ----
#include "services/memPtr.hpp"
#include "services/memPtrArray.hpp"
#include "services/memSnapshot.hpp"
#include "services/memTracker.hpp"
+ #ifdef ASSERT
+
+ void decode_pointer_record(MemPointerRecord* rec) {
+ tty->print("Pointer: [" PTR_FORMAT " - " PTR_FORMAT "] size = %d bytes", rec->addr(),
+ rec->addr() + rec->size(), (int)rec->size());
+ tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
+ if (rec->is_vm_pointer()) {
+ if (rec->is_allocation_record()) {
+ tty->print_cr(" (reserve)");
+ } else if (rec->is_commit_record()) {
+ tty->print_cr(" (commit)");
+ } else if (rec->is_uncommit_record()) {
+ tty->print_cr(" (uncommit)");
+ } else if (rec->is_deallocation_record()) {
+ tty->print_cr(" (release)");
+ } else {
+ tty->print_cr(" (tag)");
+ }
+ } else {
+ if (rec->is_arena_size_record()) {
+ tty->print_cr(" (arena size)");
+ } else if (rec->is_allocation_record()) {
+ tty->print_cr(" (malloc)");
+ } else {
+ tty->print_cr(" (free)");
+ }
+ }
+ if (MemTracker::track_callsite()) {
+ char buf[1024];
+ address pc = ((MemPointerRecordEx*)rec)->pc();
+ if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
+ tty->print_cr("\tfrom %s", buf);
+ } else {
+ tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
+ }
+ }
+ }
+
+ void decode_vm_region_record(VMMemRegion* rec) {
+ tty->print("VM Region [" PTR_FORMAT " - " PTR_FORMAT "]", rec->addr(),
+ rec->addr() + rec->size());
+ tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
+ if (rec->is_allocation_record()) {
+ tty->print_cr(" (reserved)");
+ } else if (rec->is_commit_record()) {
+ tty->print_cr(" (committed)");
+ } else {
+ ShouldNotReachHere();
+ }
+ if (MemTracker::track_callsite()) {
+ char buf[1024];
+ address pc = ((VMMemRegionEx*)rec)->pc();
+ if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
+ tty->print_cr("\tfrom %s", buf);
+ } else {
+ tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
+ }
+
+ }
+ }
+
+ #endif
+
bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) {
VMMemRegionEx new_rec;
assert(rec->is_allocation_record() || rec->is_commit_record(),
"Sanity check");
*** 58,135 ****
--- 121,207 ----
// we don't consolidate reserved regions, since they may be categorized
// in different types.
bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) {
assert(rec->is_allocation_record(), "Sanity check");
! VMMemRegion* cur = (VMMemRegion*)current();
! VMMemRegion* reserved_rgn = (VMMemRegion*)current();
// we don't have anything yet
! if (cur == NULL) {
! if (reserved_rgn == NULL) {
return insert_record(rec);
}
! assert(cur->is_reserved_region(), "Sanity check");
! assert(reserved_rgn->is_reserved_region(), "Sanity check");
// duplicated records
! if (cur->is_same_region(rec)) {
! if (reserved_rgn->is_same_region(rec)) {
return true;
}
! assert(cur->base() > rec->addr(), "Just check: locate()");
! assert(rec->addr() + rec->size() <= cur->base(), "Can not overlap");
! assert(reserved_rgn->base() > rec->addr(), "Just check: locate()");
! assert(!reserved_rgn->overlap_region(rec), "Can not overlap");
return insert_record(rec);
}
// we do consolidate committed regions
bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) {
assert(rec->is_commit_record(), "Sanity check");
! VMMemRegion* reserved_rgn = (VMMemRegion*)current();
cur = (VMMemRegion*)current();
assert(cur->is_reserved_region() && cur->contains_region(rec),
+ assert(reserved_rgn->is_reserved_region() && reserved_rgn->contain_region(rec),
"Sanity check");
// thread's native stack is always marked as "committed", ignore
// the "commit" operation for creating stack guard pages
! if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
! if (FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack &&
FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
return true;
}
cur = (VMMemRegion*)next();
while (cur != NULL && cur->is_committed_region()) {
+ // if the reserved region has any committed regions
+ VMMemRegion* committed_rgn = (VMMemRegion*)next();
+ while (committed_rgn != NULL && committed_rgn->is_committed_region()) {
// duplicated commit records
! if(cur->contains_region(rec)) {
! if(committed_rgn->contain_region(rec)) {
return true;
}
if (cur->base() > rec->addr()) {
// committed regions can not overlap
assert(rec->addr() + rec->size() <= cur->base(), "Can not overlap");
if (rec->addr() + rec->size() == cur->base()) {
cur->expand_region(rec->addr(), rec->size());
return true;
} else {
! return insert_record(rec);
}
} else if (cur->base() + cur->size() == rec->addr()) {
cur->expand_region(rec->addr(), rec->size());
+ } else if (committed_rgn->overlap_region(rec)) {
+ // overlaps front part
+ if (rec->addr() < committed_rgn->addr()) {
+ committed_rgn->expand_region(rec->addr(),
+ committed_rgn->addr() - rec->addr());
+ } else {
+ // overlaps tail part
+ address committed_rgn_end = committed_rgn->addr() +
! committed_rgn->size();
+ assert(committed_rgn_end < rec->addr() + rec->size(),
+ "overlap tail part");
+ committed_rgn->expand_region(committed_rgn_end,
+ (rec->addr() + rec->size()) - committed_rgn_end);
+ }
+ } else if (committed_rgn->base() + committed_rgn->size() == rec->addr()) {
+ // adjunct each other
+ committed_rgn->expand_region(rec->addr(), rec->size());
VMMemRegion* next_reg = (VMMemRegion*)next();
// see if we can consolidate next committed region
if (next_reg != NULL && next_reg->is_committed_region() &&
! next_reg->base() == cur->base() + cur->size()) {
! cur->expand_region(next_reg->base(), next_reg->size());
! next_reg->base() == committed_rgn->base() + committed_rgn->size()) {
! committed_rgn->expand_region(next_reg->base(), next_reg->size());
+ // delete merged region
remove();
}
return true;
+ } else if (committed_rgn->base() > rec->addr()) {
+ // found the location, insert this committed region
+ return insert_record(rec);
}
! cur = (VMMemRegion*)next();
! committed_rgn = (VMMemRegion*)next();
}
return insert_record(rec);
}
bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) {
assert(rec->is_uncommit_record(), "sanity check");
VMMemRegion* cur;
cur = (VMMemRegion*)current();
! assert(cur->is_reserved_region() && cur->contains_region(rec),
! assert(cur->is_reserved_region() && cur->contain_region(rec),
"Sanity check");
// thread's native stack is always marked as "committed", ignore
// the "commit" operation for creating stack guard pages
if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
*** 139,149 ****
--- 211,221 ----
cur = (VMMemRegion*)next();
while (cur != NULL && cur->is_committed_region()) {
// region already uncommitted, must be due to duplicated record
if (cur->addr() >= rec->addr() + rec->size()) {
break;
! } else if (cur->contains_region(rec)) {
! } else if (cur->contain_region(rec)) {
// uncommit whole region
if (cur->is_same_region(rec)) {
remove();
break;
} else if (rec->addr() == cur->addr() ||
*** 174,184 ****
--- 246,256 ----
}
bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) {
assert(rec->is_deallocation_record(), "Sanity check");
VMMemRegion* cur = (VMMemRegion*)current();
! assert(cur->is_reserved_region() && cur->contains_region(rec),
! assert(cur->is_reserved_region() && cur->contain_region(rec),
"Sanity check");
#ifdef ASSERT
VMMemRegion* next_reg = (VMMemRegion*)peek_next();
// should not have any committed memory in this reserved region
assert(next_reg == NULL || !next_reg->is_committed_region(), "Sanity check");
*** 215,225 ****
--- 287,297 ----
}
return insert_record(rec);
}
bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) {
! assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained");
! assert(rgn->contain_region(new_rgn_addr, new_rgn_size), "Not fully contained");
address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL);
if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region
size_t sz = rgn->size() - new_rgn_size;
// the original region becomes 'new' region
rgn->exclude_region(new_rgn_addr + new_rgn_size, sz);
*** 499,509 ****
--- 571,581 ----
// locate a reserved region that contains the specified address, or
// the nearest reserved region has base address just above the specified
// address
reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
! if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) {
! if (reserved_rec != NULL && reserved_rec->contain_region(new_rec)) {
// snapshot can only have 'live' records
assert(reserved_rec->is_reserved_region(), "Sanity check");
if (new_rec->is_allocation_record()) {
if (!reserved_rec->is_same_region(new_rec)) {
// only deal with split a bigger reserved region into smaller regions.
src/share/vm/services/memSnapshot.cpp
Index
Unified diffs
Context diffs
Sdiffs
Wdiffs
Patch
New
Old
Previous File
Next File