1 /* 2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "gc/shared/allocTracer.hpp" 28 #include "gc/shared/collectedHeap.hpp" 29 #include "gc/shared/memAllocator.hpp" 30 #include "gc/shared/threadLocalAllocBuffer.inline.hpp" 31 #include "memory/universe.hpp" 32 #include "oops/arrayOop.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/jvmtiExport.hpp" 35 #include "runtime/sharedRuntime.hpp" 36 #include "runtime/handles.inline.hpp" 37 #include "runtime/thread.inline.hpp" 38 #include "services/lowMemoryDetector.hpp" 39 #include "utilities/align.hpp" 40 #include "utilities/copy.hpp" 41 42 class MemAllocator::Allocation: StackObj { 43 friend class MemAllocator; 44 45 const MemAllocator& _allocator; 46 Thread* _thread; 47 oop* _obj; 48 bool _overhead_limit_exceeded; 49 bool _allocated_outside_tlab; 50 size_t _allocated_tlab_size; 51 bool _tlab_end_reset_for_sample; 52 53 bool check_out_of_memory(); 54 void verify_before(); 55 void verify_after(); 56 void notify_allocation(); 57 void notify_allocation_jvmti_allocation_event(); 58 void notify_allocation_jvmti_sampler(); 59 void notify_allocation_low_memory_detector(); 60 void notify_allocation_jfr_sampler(); 61 void notify_allocation_dtrace_sampler(); 62 void check_for_bad_heap_word_value() const; 63 #ifdef ASSERT 64 void check_for_valid_allocation_state() const; 65 #endif 66 67 public: 68 Allocation(const MemAllocator& allocator, oop* obj) 69 : _allocator(allocator), 70 _thread(Thread::current()), 71 _obj(obj), 72 _overhead_limit_exceeded(false), 73 _allocated_outside_tlab(false), 74 _allocated_tlab_size(0), 75 _tlab_end_reset_for_sample(false) 76 { 77 verify_before(); 78 } 79 80 ~Allocation() { 81 if (!check_out_of_memory()) { 82 verify_after(); 83 notify_allocation(); 84 } 85 } 86 87 oop obj() const { return *_obj; } 88 void set_obj(oop obj) const { *_obj = obj; } 89 }; 90 91 bool MemAllocator::Allocation::check_out_of_memory() { 92 Thread* THREAD = _thread; 93 assert(!HAS_PENDING_EXCEPTION, "Unexpected exception, will result in uninitialized storage"); 94 95 if (obj() != NULL) { 96 return false; 97 } 98 99 if (!_overhead_limit_exceeded) { 100 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 101 report_java_out_of_memory("Java heap space"); 102 103 if (JvmtiExport::should_post_resource_exhausted()) { 104 JvmtiExport::post_resource_exhausted( 105 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP, 106 "Java heap space"); 107 } 108 THROW_OOP_(Universe::out_of_memory_error_java_heap(), true); 109 } else { 110 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 111 report_java_out_of_memory("GC overhead limit exceeded"); 112 113 if (JvmtiExport::should_post_resource_exhausted()) { 114 JvmtiExport::post_resource_exhausted( 115 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP, 116 "GC overhead limit exceeded"); 117 } 118 119 THROW_OOP_(Universe::out_of_memory_error_gc_overhead_limit(), true); 120 } 121 } 122 123 void MemAllocator::Allocation::verify_before() { 124 // Clear unhandled oops for memory allocation. Memory allocation might 125 // not take out a lock if from tlab, so clear here. 126 Thread* THREAD = _thread; 127 CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();) 128 assert(!HAS_PENDING_EXCEPTION, "Should not allocate with exception pending"); 129 debug_only(check_for_valid_allocation_state()); 130 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 131 } 132 133 void MemAllocator::Allocation::verify_after() { 134 NOT_PRODUCT(check_for_bad_heap_word_value();) 135 } 136 137 void MemAllocator::Allocation::check_for_bad_heap_word_value() const { 138 MemRegion obj_range = _allocator.obj_memory_range(obj()); 139 HeapWord* addr = obj_range.start(); 140 size_t size = obj_range.word_size(); 141 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 142 for (size_t slot = 0; slot < size; slot += 1) { 143 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal), 144 "Found badHeapWordValue in post-allocation check"); 145 } 146 } 147 } 148 149 #ifdef ASSERT 150 void MemAllocator::Allocation::check_for_valid_allocation_state() const { 151 // How to choose between a pending exception and a potential 152 // OutOfMemoryError? Don't allow pending exceptions. 153 // This is a VM policy failure, so how do we exhaustively test it? 154 assert(!_thread->has_pending_exception(), 155 "shouldn't be allocating with pending exception"); 156 if (StrictSafepointChecks) { 157 assert(_thread->allow_allocation(), 158 "Allocation done by thread for which allocation is blocked " 159 "by No_Allocation_Verifier!"); 160 // Allocation of an oop can always invoke a safepoint, 161 // hence, the true argument 162 _thread->check_for_valid_safepoint_state(true); 163 } 164 } 165 #endif 166 167 void MemAllocator::Allocation::notify_allocation_jvmti_sampler() { 168 // support for JVMTI VMObjectAlloc event (no-op if not enabled) 169 JvmtiExport::vm_object_alloc_event_collector(obj()); 170 171 if (!ThreadHeapSampler::enabled()) { 172 // Sampling disabled 173 return; 174 } 175 176 if (!_allocated_outside_tlab && _allocated_tlab_size == 0 && !_tlab_end_reset_for_sample) { 177 // Sample if it's a non-TLAB allocation, or a TLAB allocation that either refills the TLAB 178 // or expands it due to taking a sampler induced slow path. 179 return; 180 } 181 182 assert(JavaThread::current()->heap_sampler().add_sampling_collector(), 183 "Should never return false."); 184 185 // Only check if the sampler could actually sample something in this path. 186 assert(!JvmtiExport::should_post_sampled_object_alloc() || 187 !JvmtiSampledObjectAllocEventCollector::object_alloc_is_safe_to_sample() || 188 _thread->heap_sampler().sampling_collector_present(), 189 "Sampling collector not present."); 190 191 // If we want to be sampling, protect the allocated object with a Handle 192 // before doing the callback. The callback is done in the destructor of 193 // the JvmtiSampledObjectAllocEventCollector. 194 HandleMark hm; 195 Handle obj_h(_thread, obj()); 196 set_obj(NULL); 197 if (JvmtiExport::should_post_sampled_object_alloc()) { 198 JvmtiSampledObjectAllocEventCollector collector; 199 oop obj = obj_h(); 200 HeapWord* mem = (HeapWord*)obj; 201 size_t size_in_bytes = _allocator._word_size * HeapWordSize; 202 ThreadLocalAllocBuffer& tlab = _thread->tlab(); 203 size_t bytes_since_last = _allocated_outside_tlab ? 0 : tlab.bytes_since_last_sample_point(); 204 _thread->heap_sampler().check_for_sampling(mem, size_in_bytes, bytes_since_last); 205 } 206 set_obj(obj_h()); 207 208 assert(JavaThread::current()->heap_sampler().remove_sampling_collector(), "Should never return false."); 209 210 if (_tlab_end_reset_for_sample || _allocated_tlab_size != 0) { 211 _thread->tlab().set_sample_end(); 212 } 213 } 214 215 void MemAllocator::Allocation::notify_allocation_low_memory_detector() { 216 // support low memory notifications (no-op if not enabled) 217 LowMemoryDetector::detect_low_memory_for_collected_pools(); 218 } 219 220 void MemAllocator::Allocation::notify_allocation_jfr_sampler() { 221 HeapWord* mem = (HeapWord*)obj(); 222 size_t size_in_bytes = _allocator._word_size * HeapWordSize; 223 224 if (_allocated_outside_tlab) { 225 AllocTracer::send_allocation_outside_tlab(_allocator._klass, mem, size_in_bytes, _thread); 226 } else if (_allocated_tlab_size != 0) { 227 // TLAB was refilled 228 AllocTracer::send_allocation_in_new_tlab(_allocator._klass, mem, _allocated_tlab_size * HeapWordSize, 229 size_in_bytes, _thread); 230 } 231 } 232 233 void MemAllocator::Allocation::notify_allocation_dtrace_sampler() { 234 if (DTraceAllocProbes) { 235 // support for Dtrace object alloc event (no-op most of the time) 236 Klass* klass = _allocator._klass; 237 size_t word_size = _allocator._word_size; 238 if (klass != NULL && klass->name() != NULL) { 239 SharedRuntime::dtrace_object_alloc(obj(), (int)word_size); 240 } 241 } 242 } 243 244 void MemAllocator::Allocation::notify_allocation() { 245 notify_allocation_low_memory_detector(); 246 notify_allocation_jfr_sampler(); 247 notify_allocation_dtrace_sampler(); 248 notify_allocation_jvmti_sampler(); 249 } 250 251 HeapWord* MemAllocator::allocate_outside_tlab(Allocation& allocation) const { 252 allocation._allocated_outside_tlab = true; 253 HeapWord* mem = _heap->mem_allocate(_word_size, &allocation._overhead_limit_exceeded); 254 if (mem == NULL) { 255 return mem; 256 } 257 258 NOT_PRODUCT(_heap->check_for_non_bad_heap_word_value(mem, _word_size)); 259 size_t size_in_bytes = _word_size * HeapWordSize; 260 _thread->incr_allocated_bytes(size_in_bytes); 261 262 return mem; 263 } 264 265 HeapWord* MemAllocator::allocate_from_tlab(Allocation& allocation) const { 266 assert(UseTLAB, "should use UseTLAB"); 267 268 // Try allocating from an existing TLAB. 269 HeapWord* mem = _thread->tlab().allocate(_word_size); 270 if (mem != NULL) { 271 return mem; 272 } 273 274 // 3. Try refilling the TLAB and allocating the object in it. 275 return allocate_from_tlab_slow(allocation); 276 } 277 278 HeapWord* MemAllocator::allocate_from_tlab_slow(Allocation& allocation) const { 279 HeapWord* mem = NULL; 280 ThreadLocalAllocBuffer& tlab = _thread->tlab(); 281 282 if (ThreadHeapSampler::enabled()) { 283 // Try to allocate the sampled object from TLAB, it is possible a sample 284 // point was put and the TLAB still has space. 285 tlab.set_back_allocation_end(); 286 mem = tlab.allocate(_word_size); 287 if (mem != NULL) { 288 allocation._tlab_end_reset_for_sample = true; 289 return mem; 290 } 291 } 292 293 // Retain tlab and allocate object in shared space if 294 // the amount free in the tlab is too large to discard. 295 if (tlab.free() > tlab.refill_waste_limit()) { 296 tlab.record_slow_allocation(_word_size); 297 return NULL; 298 } 299 300 // Discard tlab and allocate a new one. 301 // To minimize fragmentation, the last TLAB may be smaller than the rest. 302 size_t new_tlab_size = tlab.compute_size(_word_size); 303 304 tlab.clear_before_allocation(); 305 306 if (new_tlab_size == 0) { 307 return NULL; 308 } 309 310 // Allocate a new TLAB requesting new_tlab_size. Any size 311 // between minimal and new_tlab_size is accepted. 312 size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(_word_size); 313 mem = _heap->allocate_new_tlab(min_tlab_size, new_tlab_size, &allocation._allocated_tlab_size); 314 if (mem == NULL) { 315 assert(allocation._allocated_tlab_size == 0, 316 "Allocation failed, but actual size was updated. min: " SIZE_FORMAT 317 ", desired: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 318 min_tlab_size, new_tlab_size, allocation._allocated_tlab_size); 319 return NULL; 320 } 321 assert(allocation._allocated_tlab_size != 0, "Allocation succeeded but actual size not updated. mem at: " 322 PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT, 323 p2i(mem), min_tlab_size, new_tlab_size); 324 325 if (ZeroTLAB) { 326 // ..and clear it. 327 Copy::zero_to_words(mem, allocation._allocated_tlab_size); 328 } else { 329 // ...and zap just allocated object. 330 #ifdef ASSERT 331 // Skip mangling the space corresponding to the object header to 332 // ensure that the returned space is not considered parsable by 333 // any concurrent GC thread. 334 size_t hdr_size = oopDesc::header_size(); 335 Copy::fill_to_words(mem + hdr_size, allocation._allocated_tlab_size - hdr_size, badHeapWordVal); 336 #endif // ASSERT 337 } 338 339 tlab.fill(mem, mem + _word_size, allocation._allocated_tlab_size); 340 return mem; 341 } 342 343 HeapWord* MemAllocator::mem_allocate(Allocation& allocation) const { 344 if (UseTLAB) { 345 HeapWord* result = allocate_from_tlab(allocation); 346 if (result != NULL) { 347 return result; 348 } 349 } 350 351 return allocate_outside_tlab(allocation); 352 } 353 354 oop MemAllocator::allocate() const { 355 oop obj = NULL; 356 { 357 Allocation allocation(*this, &obj); 358 HeapWord* mem = mem_allocate(allocation); 359 if (mem != NULL) { 360 obj = init_obj(mem); 361 } 362 } 363 return obj; 364 } 365 366 void MemAllocator::mem_clear(HeapWord* mem) const { 367 assert(mem != NULL, "cannot initialize NULL object"); 368 const size_t hs = oopDesc::header_size(); 369 assert(_word_size >= hs, "unexpected object size"); 370 oopDesc::set_klass_gap(mem, 0); 371 Copy::fill_to_aligned_words(mem + hs, _word_size - hs); 372 } 373 374 oop MemAllocator::init_obj(HeapWord* mem) const { 375 assert(mem != NULL, "NULL object pointer"); 376 if (UseBiasedLocking) { 377 oopDesc::set_mark_raw(mem, _klass->prototype_header()); 378 } else { 379 // May be bootstrapping 380 oopDesc::set_mark_raw(mem, markOopDesc::prototype()); 381 } 382 // Need a release store to ensure array/class length, mark word, and 383 // object zeroing are visible before setting the klass non-NULL, for 384 // concurrent collectors. 385 oopDesc::release_set_klass(mem, _klass); 386 return oop(mem); 387 } 388 389 oop ObjAllocator::init_obj(HeapWord* mem) const { 390 mem_clear(mem); 391 oop obj = MemAllocator::init_obj(mem); 392 assert(Universe::is_bootstrapping() || !obj->is_array(), "must not be an array"); 393 return obj; 394 } 395 396 oop ObjArrayAllocator::init_obj(HeapWord* mem) const { 397 // Set array length before setting the _klass field because a 398 // non-NULL klass field indicates that the object is parsable by 399 // concurrent GC. 400 assert(_length >= 0, "length should be non-negative"); 401 if (_do_zero) { 402 mem_clear(mem); 403 } 404 arrayOopDesc::set_length(mem, _length); 405 oop obj = MemAllocator::init_obj(mem); 406 assert(obj->is_array(), "must be an array"); 407 return obj; 408 } 409 410 oop ClassAllocator::init_obj(HeapWord* mem) const { 411 // Set oop_size field before setting the _klass field because a 412 // non-NULL _klass field indicates that the object is parsable by 413 // concurrent GC. 414 assert(_word_size > 0, "oop_size must be positive."); 415 mem_clear(mem); 416 java_lang_Class::set_oop_size(mem, (int)_word_size); 417 oop obj = MemAllocator::init_obj(mem); 418 assert(Universe::is_bootstrapping() || !obj->is_array(), "must not be an array"); 419 return obj; 420 }