34 #include "utilities/align.hpp"
35
36 size_t CardTable::compute_byte_map_size() {
37 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
38 "uninitialized, check declaration order");
39 assert(_page_size != 0, "uninitialized, check declaration order");
40 const size_t granularity = os::vm_allocation_granularity();
41 return align_up(_guard_index + 1, MAX2(_page_size, granularity));
42 }
43
44 CardTable::CardTable(MemRegion whole_heap, bool conc_scan) :
45 _scanned_concurrently(conc_scan),
46 _whole_heap(whole_heap),
47 _guard_index(0),
48 _last_valid_index(0),
49 _page_size(os::vm_page_size()),
50 _byte_map_size(0),
51 _byte_map(NULL),
52 _byte_map_base(NULL),
53 _cur_covered_regions(0),
54 _covered(NULL),
55 _committed(NULL),
56 _guard_region()
57 {
58 assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary");
59 assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary");
60
61 assert(card_size <= 512, "card_size must be less than 512"); // why?
62
63 _covered = new MemRegion[_max_covered_regions];
64 if (_covered == NULL) {
65 vm_exit_during_initialization("Could not allocate card table covered region set.");
66 }
67 }
68
69 CardTable::~CardTable() {
70 if (_covered) {
71 delete[] _covered;
72 _covered = NULL;
73 }
74 if (_committed) {
75 delete[] _committed;
76 _committed = NULL;
77 }
78 }
79
80 void CardTable::initialize() {
81 _guard_index = cards_required(_whole_heap.word_size()) - 1;
82 _last_valid_index = _guard_index - 1;
83
84 _byte_map_size = compute_byte_map_size();
85
86 HeapWord* low_bound = _whole_heap.start();
87 HeapWord* high_bound = _whole_heap.end();
88
89 _cur_covered_regions = 0;
90 _committed = new MemRegion[_max_covered_regions];
91 if (_committed == NULL) {
92 vm_exit_during_initialization("Could not allocate card table committed region set.");
93 }
94
95 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
96 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
97 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
98
99 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
100
101 os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
102 _page_size, heap_rs.base(), heap_rs.size());
103 if (!heap_rs.is_reserved()) {
104 vm_exit_during_initialization("Could not reserve enough space for the "
105 "card marking array");
106 }
107
108 // The assembler store_check code will do an unsigned shift of the oop,
109 // then add it to _byte_map_base, i.e.
110 //
111 // _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift)
112 _byte_map = (CardValue*) heap_rs.base();
113 _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
|
34 #include "utilities/align.hpp"
35
36 size_t CardTable::compute_byte_map_size() {
37 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
38 "uninitialized, check declaration order");
39 assert(_page_size != 0, "uninitialized, check declaration order");
40 const size_t granularity = os::vm_allocation_granularity();
41 return align_up(_guard_index + 1, MAX2(_page_size, granularity));
42 }
43
44 CardTable::CardTable(MemRegion whole_heap, bool conc_scan) :
45 _scanned_concurrently(conc_scan),
46 _whole_heap(whole_heap),
47 _guard_index(0),
48 _last_valid_index(0),
49 _page_size(os::vm_page_size()),
50 _byte_map_size(0),
51 _byte_map(NULL),
52 _byte_map_base(NULL),
53 _cur_covered_regions(0),
54 _covered(MemRegion::create(_max_covered_regions, mtGC)),
55 _committed(MemRegion::create(_max_covered_regions, mtGC)),
56 _guard_region()
57 {
58 assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary");
59 assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary");
60
61 assert(card_size <= 512, "card_size must be less than 512"); // why?
62 }
63
64 CardTable::~CardTable() {
65 FREE_C_HEAP_ARRAY(MemRegion, _covered);
66 FREE_C_HEAP_ARRAY(MemRegion, _committed);
67 }
68
69 void CardTable::initialize() {
70 _guard_index = cards_required(_whole_heap.word_size()) - 1;
71 _last_valid_index = _guard_index - 1;
72
73 _byte_map_size = compute_byte_map_size();
74
75 HeapWord* low_bound = _whole_heap.start();
76 HeapWord* high_bound = _whole_heap.end();
77
78 _cur_covered_regions = 0;
79
80 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
81 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
82 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
83
84 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
85
86 os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
87 _page_size, heap_rs.base(), heap_rs.size());
88 if (!heap_rs.is_reserved()) {
89 vm_exit_during_initialization("Could not reserve enough space for the "
90 "card marking array");
91 }
92
93 // The assembler store_check code will do an unsigned shift of the oop,
94 // then add it to _byte_map_base, i.e.
95 //
96 // _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift)
97 _byte_map = (CardValue*) heap_rs.base();
98 _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
|