9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/collectedHeap.hpp"
27 #include "gc/shared/plab.inline.hpp"
28 #include "gc/shared/threadLocalAllocBuffer.hpp"
29 #include "logging/log.hpp"
30 #include "oops/arrayOop.hpp"
31 #include "oops/oop.inline.hpp"
32
33 size_t PLAB::min_size() {
34 // Make sure that we return something that is larger than AlignmentReserve
35 return align_object_size(MAX2(MinTLABSize / HeapWordSize, (size_t)oopDesc::header_size())) + AlignmentReserve;
36 }
37
38 size_t PLAB::max_size() {
39 return ThreadLocalAllocBuffer::max_size();
40 }
41
42 PLAB::PLAB(size_t desired_plab_sz_) :
43 _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
44 _end(NULL), _hard_end(NULL), _allocated(0), _wasted(0), _undo_wasted(0)
45 {
46 // ArrayOopDesc::header_size depends on command line initialization.
47 AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? align_object_size(arrayOopDesc::header_size(T_INT)) : 0;
48 assert(min_size() > AlignmentReserve,
49 "Minimum PLAB size " SIZE_FORMAT " must be larger than alignment reserve " SIZE_FORMAT " "
50 "to be able to contain objects", min_size(), AlignmentReserve);
51 }
52
53 // If the minimum object size is greater than MinObjAlignment, we can
54 // end up with a shard at the end of the buffer that's smaller than
55 // the smallest object. We can't allow that because the buffer must
56 // look like it's full of objects when we retire it, so we make
57 // sure we have enough space for a filler int array object.
58 size_t PLAB::AlignmentReserve;
59
60 void PLAB::flush_and_retire_stats(PLABStats* stats) {
61 // Retire the last allocation buffer.
62 size_t unused = retire_internal();
63
64 // Now flush the statistics.
65 stats->add_allocated(_allocated);
66 stats->add_wasted(_wasted);
67 stats->add_undo_wasted(_undo_wasted);
68 stats->add_unused(unused);
69
70 // Since we have flushed the stats we need to clear the _allocated and _wasted
71 // fields in case somebody retains an instance of this over GCs. Not doing so
72 // will artifically inflate the values in the statistics.
73 _allocated = 0;
74 _wasted = 0;
75 _undo_wasted = 0;
76 }
77
78 void PLAB::retire() {
79 _wasted += retire_internal();
80 }
81
82 size_t PLAB::retire_internal() {
83 size_t result = 0;
84 if (_top < _hard_end) {
85 Universe::heap()->fill_with_dummy_object(_top, _hard_end, true);
86 result += invalidate();
87 }
88 return result;
89 }
90
91 void PLAB::add_undo_waste(HeapWord* obj, size_t word_sz) {
92 Universe::heap()->fill_with_dummy_object(obj, obj + word_sz, true);
93 _undo_wasted += word_sz;
94 }
95
96 void PLAB::undo_last_allocation(HeapWord* obj, size_t word_sz) {
97 assert(pointer_delta(_top, _bottom) >= word_sz, "Bad undo");
98 assert(pointer_delta(_top, obj) == word_sz, "Bad undo");
99 _top = obj;
100 }
101
102 void PLAB::undo_allocation(HeapWord* obj, size_t word_sz) {
103 // Is the alloc in the current alloc buffer?
104 if (contains(obj)) {
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/collectedHeap.hpp"
27 #include "gc/shared/plab.inline.hpp"
28 #include "gc/shared/threadLocalAllocBuffer.hpp"
29 #include "memory/universe.hpp"
30 #include "logging/log.hpp"
31 #include "oops/arrayOop.hpp"
32 #include "oops/oop.inline.hpp"
33
34 size_t PLAB::min_size() {
35 // Make sure that we return something that is larger than AlignmentReserve
36 return align_object_size(MAX2(MinTLABSize / HeapWordSize, (size_t)oopDesc::header_size())) + AlignmentReserve;
37 }
38
39 size_t PLAB::max_size() {
40 return ThreadLocalAllocBuffer::max_size();
41 }
42
43 PLAB::PLAB(size_t desired_plab_sz_) :
44 _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
45 _end(NULL), _hard_end(NULL), _allocated(0), _wasted(0), _undo_wasted(0)
46 {
47 // ArrayOopDesc::header_size depends on command line initialization.
48 CollectedHeap* heap = Universe::heap();
49 int rsv_regular = heap->obj_header_size();
50 int rsv_array = align_object_size(heap->array_header_size(T_INT));
51 AlignmentReserve = rsv_regular > MinObjAlignment ? rsv_array : 0;
52 assert(min_size() > AlignmentReserve,
53 "Minimum PLAB size " SIZE_FORMAT " must be larger than alignment reserve " SIZE_FORMAT " "
54 "to be able to contain objects", min_size(), AlignmentReserve);
55 }
56
57 // If the minimum object size is greater than MinObjAlignment, we can
58 // end up with a shard at the end of the buffer that's smaller than
59 // the smallest object. We can't allow that because the buffer must
60 // look like it's full of objects when we retire it, so we make
61 // sure we have enough space for a filler int array object.
62 size_t PLAB::AlignmentReserve;
63
64 void PLAB::flush_and_retire_stats(PLABStats* stats) {
65 // Retire the last allocation buffer.
66 size_t unused = retire_internal();
67
68 // Now flush the statistics.
69 stats->add_allocated(_allocated);
70 stats->add_wasted(_wasted);
71 stats->add_undo_wasted(_undo_wasted);
72 stats->add_unused(unused);
73
74 // Since we have flushed the stats we need to clear the _allocated and _wasted
75 // fields in case somebody retains an instance of this over GCs. Not doing so
76 // will artifically inflate the values in the statistics.
77 _allocated = 0;
78 _wasted = 0;
79 _undo_wasted = 0;
80 }
81
82 void PLAB::retire() {
83 _wasted += retire_internal();
84 }
85
86 size_t PLAB::retire_internal() {
87 size_t result = 0;
88 if (_top < _hard_end) {
89 assert(pointer_delta(_hard_end, _top) >= (size_t)(Universe::heap()->obj_header_size()),
90 "better have enough space left to fill with dummy");
91 Universe::heap()->fill_with_dummy_object(_top, _hard_end, true);
92 result += invalidate();
93 }
94 return result;
95 }
96
97 void PLAB::add_undo_waste(HeapWord* obj, size_t word_sz) {
98 Universe::heap()->fill_with_dummy_object(obj, obj + word_sz, true);
99 _undo_wasted += word_sz;
100 }
101
102 void PLAB::undo_last_allocation(HeapWord* obj, size_t word_sz) {
103 assert(pointer_delta(_top, _bottom) >= word_sz, "Bad undo");
104 assert(pointer_delta(_top, obj) == word_sz, "Bad undo");
105 _top = obj;
106 }
107
108 void PLAB::undo_allocation(HeapWord* obj, size_t word_sz) {
109 // Is the alloc in the current alloc buffer?
110 if (contains(obj)) {
|