1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_ALLOCATION_HPP 26 #define SHARE_VM_MEMORY_ALLOCATION_HPP 27 28 #include "runtime/globals.hpp" 29 #include "utilities/globalDefinitions.hpp" 30 #include "utilities/macros.hpp" 31 #ifdef COMPILER1 32 #include "c1/c1_globals.hpp" 33 #endif 34 #ifdef COMPILER2 35 #include "opto/c2_globals.hpp" 36 #endif 37 38 #include <new> 39 40 #define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1) 41 #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1)) 42 #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK) 43 44 45 // noinline attribute 46 #ifdef _WINDOWS 47 #define _NOINLINE_ __declspec(noinline) 48 #else 49 #if __GNUC__ < 3 // gcc 2.x does not support noinline attribute 50 #define _NOINLINE_ 51 #else 52 #define _NOINLINE_ __attribute__ ((noinline)) 53 #endif 54 #endif 55 56 class AllocFailStrategy { 57 public: 58 enum AllocFailEnum { EXIT_OOM, RETURN_NULL }; 59 }; 60 typedef AllocFailStrategy::AllocFailEnum AllocFailType; 61 62 // All classes in the virtual machine must be subclassed 63 // by one of the following allocation classes: 64 // 65 // For objects allocated in the resource area (see resourceArea.hpp). 66 // - ResourceObj 67 // 68 // For objects allocated in the C-heap (managed by: free & malloc). 69 // - CHeapObj 70 // 71 // For objects allocated on the stack. 72 // - StackObj 73 // 74 // For embedded objects. 75 // - ValueObj 76 // 77 // For classes used as name spaces. 78 // - AllStatic 79 // 80 // For classes in Metaspace (class data) 81 // - MetaspaceObj 82 // 83 // The printable subclasses are used for debugging and define virtual 84 // member functions for printing. Classes that avoid allocating the 85 // vtbl entries in the objects should therefore not be the printable 86 // subclasses. 87 // 88 // The following macros and function should be used to allocate memory 89 // directly in the resource area or in the C-heap, The _OBJ variants 90 // of the NEW/FREE_C_HEAP macros are used for alloc/dealloc simple 91 // objects which are not inherited from CHeapObj, note constructor and 92 // destructor are not called. The preferable way to allocate objects 93 // is using the new operator. 94 // 95 // WARNING: The array variant must only be used for a homogenous array 96 // where all objects are of the exact type specified. If subtypes are 97 // stored in the array then must pay attention to calling destructors 98 // at needed. 99 // 100 // NEW_RESOURCE_ARRAY(type, size) 101 // NEW_RESOURCE_OBJ(type) 102 // NEW_C_HEAP_ARRAY(type, size) 103 // NEW_C_HEAP_OBJ(type, memflags) 104 // FREE_C_HEAP_ARRAY(type, old) 105 // FREE_C_HEAP_OBJ(objname, type, memflags) 106 // char* AllocateHeap(size_t size, const char* name); 107 // void FreeHeap(void* p); 108 // 109 // C-heap allocation can be traced using +PrintHeapAllocation. 110 // malloc and free should therefore never called directly. 111 112 // Base class for objects allocated in the C-heap. 113 114 // In non product mode we introduce a super class for all allocation classes 115 // that supports printing. 116 // We avoid the superclass in product mode since some C++ compilers add 117 // a word overhead for empty super classes. 118 119 #ifdef PRODUCT 120 #define ALLOCATION_SUPER_CLASS_SPEC 121 #else 122 #define ALLOCATION_SUPER_CLASS_SPEC : public AllocatedObj 123 class AllocatedObj { 124 public: 125 // Printing support 126 void print() const; 127 void print_value() const; 128 129 virtual void print_on(outputStream* st) const; 130 virtual void print_value_on(outputStream* st) const; 131 }; 132 #endif 133 134 135 /* 136 * Memory types 137 */ 138 enum MemoryType { 139 // Memory type by sub systems. It occupies lower byte. 140 mtJavaHeap = 0x00, // Java heap 141 mtClass = 0x01, // memory class for Java classes 142 mtThread = 0x02, // memory for thread objects 143 mtThreadStack = 0x03, 144 mtCode = 0x04, // memory for generated code 145 mtGC = 0x05, // memory for GC 146 mtCompiler = 0x06, // memory for compiler 147 mtInternal = 0x07, // memory used by VM, but does not belong to 148 // any of above categories, and not used for 149 // native memory tracking 150 mtOther = 0x08, // memory not used by VM 151 mtSymbol = 0x09, // symbol 152 mtNMT = 0x0A, // memory used by native memory tracking 153 mtClassShared = 0x0B, // class data sharing 154 mtChunk = 0x0C, // chunk that holds content of arenas 155 mtTest = 0x0D, // Test type for verifying NMT 156 mtTracing = 0x0E, // memory used for Tracing 157 mtLogging = 0x0F, // memory for logging 158 mtNone = 0x10, // undefined 159 mt_number_of_types = 0x11 // number of memory types (mtDontTrack 160 // is not included as validate type) 161 }; 162 163 typedef MemoryType MEMFLAGS; 164 165 166 #if INCLUDE_NMT 167 168 extern bool NMT_track_callsite; 169 170 #else 171 172 const bool NMT_track_callsite = false; 173 174 #endif // INCLUDE_NMT 175 176 class NativeCallStack; 177 178 179 template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC { 180 public: 181 _NOINLINE_ void* operator new(size_t size, const NativeCallStack& stack) throw(); 182 _NOINLINE_ void* operator new(size_t size) throw(); 183 _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant, 184 const NativeCallStack& stack) throw(); 185 _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant) 186 throw(); 187 _NOINLINE_ void* operator new [](size_t size, const NativeCallStack& stack) throw(); 188 _NOINLINE_ void* operator new [](size_t size) throw(); 189 _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, 190 const NativeCallStack& stack) throw(); 191 _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) 192 throw(); 193 void operator delete(void* p); 194 void operator delete [] (void* p); 195 }; 196 197 // Base class for objects allocated on the stack only. 198 // Calling new or delete will result in fatal error. 199 200 class StackObj ALLOCATION_SUPER_CLASS_SPEC { 201 private: 202 void* operator new(size_t size) throw(); 203 void* operator new [](size_t size) throw(); 204 #ifdef __IBMCPP__ 205 public: 206 #endif 207 void operator delete(void* p); 208 void operator delete [](void* p); 209 }; 210 211 // Base class for objects used as value objects. 212 // Calling new or delete will result in fatal error. 213 // 214 // Portability note: Certain compilers (e.g. gcc) will 215 // always make classes bigger if it has a superclass, even 216 // if the superclass does not have any virtual methods or 217 // instance fields. The HotSpot implementation relies on this 218 // not to happen. So never make a ValueObj class a direct subclass 219 // of this object, but use the VALUE_OBJ_CLASS_SPEC class instead, e.g., 220 // like this: 221 // 222 // class A VALUE_OBJ_CLASS_SPEC { 223 // ... 224 // } 225 // 226 // With gcc and possible other compilers the VALUE_OBJ_CLASS_SPEC can 227 // be defined as a an empty string "". 228 // 229 class _ValueObj { 230 private: 231 void* operator new(size_t size) throw(); 232 void operator delete(void* p); 233 void* operator new [](size_t size) throw(); 234 void operator delete [](void* p); 235 }; 236 237 238 // Base class for objects stored in Metaspace. 239 // Calling delete will result in fatal error. 240 // 241 // Do not inherit from something with a vptr because this class does 242 // not introduce one. This class is used to allocate both shared read-only 243 // and shared read-write classes. 244 // 245 246 class ClassLoaderData; 247 248 class MetaspaceObj { 249 public: 250 bool is_metaspace_object() const; 251 bool is_shared() const; 252 void print_address_on(outputStream* st) const; // nonvirtual address printing 253 254 #define METASPACE_OBJ_TYPES_DO(f) \ 255 f(Unknown) \ 256 f(Class) \ 257 f(Symbol) \ 258 f(TypeArrayU1) \ 259 f(TypeArrayU2) \ 260 f(TypeArrayU4) \ 261 f(TypeArrayU8) \ 262 f(TypeArrayOther) \ 263 f(Method) \ 264 f(ConstMethod) \ 265 f(MethodData) \ 266 f(ConstantPool) \ 267 f(ConstantPoolCache) \ 268 f(Annotation) \ 269 f(MethodCounters) \ 270 f(Deallocated) 271 272 #define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type, 273 #define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name; 274 275 enum Type { 276 // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc 277 METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE) 278 _number_of_types 279 }; 280 281 static const char * type_name(Type type) { 282 switch(type) { 283 METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE) 284 default: 285 ShouldNotReachHere(); 286 return NULL; 287 } 288 } 289 290 static MetaspaceObj::Type array_type(size_t elem_size) { 291 switch (elem_size) { 292 case 1: return TypeArrayU1Type; 293 case 2: return TypeArrayU2Type; 294 case 4: return TypeArrayU4Type; 295 case 8: return TypeArrayU8Type; 296 default: 297 return TypeArrayOtherType; 298 } 299 } 300 301 void* operator new(size_t size, ClassLoaderData* loader_data, 302 size_t word_size, bool read_only, 303 Type type, Thread* thread) throw(); 304 // can't use TRAPS from this header file. 305 void operator delete(void* p) { ShouldNotCallThis(); } 306 }; 307 308 // Base class for classes that constitute name spaces. 309 310 class AllStatic { 311 public: 312 AllStatic() { ShouldNotCallThis(); } 313 ~AllStatic() { ShouldNotCallThis(); } 314 }; 315 316 317 //------------------------------Chunk------------------------------------------ 318 // Linked list of raw memory chunks 319 class Chunk: CHeapObj<mtChunk> { 320 friend class VMStructs; 321 322 protected: 323 Chunk* _next; // Next Chunk in list 324 const size_t _len; // Size of this Chunk 325 public: 326 void* operator new(size_t size, AllocFailType alloc_failmode, size_t length) throw(); 327 void operator delete(void* p); 328 Chunk(size_t length); 329 330 enum { 331 // default sizes; make them slightly smaller than 2**k to guard against 332 // buddy-system style malloc implementations 333 #ifdef _LP64 334 slack = 40, // [RGV] Not sure if this is right, but make it 335 // a multiple of 8. 336 #else 337 slack = 20, // suspected sizeof(Chunk) + internal malloc headers 338 #endif 339 340 tiny_size = 256 - slack, // Size of first chunk (tiny) 341 init_size = 1*K - slack, // Size of first chunk (normal aka small) 342 medium_size= 10*K - slack, // Size of medium-sized chunk 343 size = 32*K - slack, // Default size of an Arena chunk (following the first) 344 non_pool_size = init_size + 32 // An initial size which is not one of above 345 }; 346 347 void chop(); // Chop this chunk 348 void next_chop(); // Chop next chunk 349 static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); } 350 static size_t aligned_overhead_size(size_t byte_size) { return ARENA_ALIGN(byte_size); } 351 352 size_t length() const { return _len; } 353 Chunk* next() const { return _next; } 354 void set_next(Chunk* n) { _next = n; } 355 // Boundaries of data area (possibly unused) 356 char* bottom() const { return ((char*) this) + aligned_overhead_size(); } 357 char* top() const { return bottom() + _len; } 358 bool contains(char* p) const { return bottom() <= p && p <= top(); } 359 360 // Start the chunk_pool cleaner task 361 static void start_chunk_pool_cleaner_task(); 362 363 static void clean_chunk_pool(); 364 }; 365 366 //------------------------------Arena------------------------------------------ 367 // Fast allocation of memory 368 class Arena : public CHeapObj<mtNone> { 369 protected: 370 friend class ResourceMark; 371 friend class HandleMark; 372 friend class NoHandleMark; 373 friend class VMStructs; 374 375 MEMFLAGS _flags; // Memory tracking flags 376 377 Chunk *_first; // First chunk 378 Chunk *_chunk; // current chunk 379 char *_hwm, *_max; // High water mark and max in current chunk 380 // Get a new Chunk of at least size x 381 void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); 382 size_t _size_in_bytes; // Size of arena (used for native memory tracking) 383 384 NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start 385 friend class AllocStats; 386 debug_only(void* malloc(size_t size);) 387 debug_only(void* internal_malloc_4(size_t x);) 388 NOT_PRODUCT(void inc_bytes_allocated(size_t x);) 389 390 void signal_out_of_memory(size_t request, const char* whence) const; 391 392 bool check_for_overflow(size_t request, const char* whence, 393 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const { 394 if (UINTPTR_MAX - request < (uintptr_t)_hwm) { 395 if (alloc_failmode == AllocFailStrategy::RETURN_NULL) { 396 return false; 397 } 398 signal_out_of_memory(request, whence); 399 } 400 return true; 401 } 402 403 public: 404 Arena(MEMFLAGS memflag); 405 Arena(MEMFLAGS memflag, size_t init_size); 406 ~Arena(); 407 void destruct_contents(); 408 char* hwm() const { return _hwm; } 409 410 // new operators 411 void* operator new (size_t size) throw(); 412 void* operator new (size_t size, const std::nothrow_t& nothrow_constant) throw(); 413 414 // dynamic memory type tagging 415 void* operator new(size_t size, MEMFLAGS flags) throw(); 416 void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw(); 417 void operator delete(void* p); 418 419 // Fast allocate in the arena. Common case is: pointer test + increment. 420 void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { 421 assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2"); 422 x = ARENA_ALIGN(x); 423 debug_only(if (UseMallocOnly) return malloc(x);) 424 if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode)) 425 return NULL; 426 NOT_PRODUCT(inc_bytes_allocated(x);) 427 if (_hwm + x > _max) { 428 return grow(x, alloc_failmode); 429 } else { 430 char *old = _hwm; 431 _hwm += x; 432 return old; 433 } 434 } 435 // Further assume size is padded out to words 436 void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { 437 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); 438 debug_only(if (UseMallocOnly) return malloc(x);) 439 if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode)) 440 return NULL; 441 NOT_PRODUCT(inc_bytes_allocated(x);) 442 if (_hwm + x > _max) { 443 return grow(x, alloc_failmode); 444 } else { 445 char *old = _hwm; 446 _hwm += x; 447 return old; 448 } 449 } 450 451 // Allocate with 'double' alignment. It is 8 bytes on sparc. 452 // In other cases Amalloc_D() should be the same as Amalloc_4(). 453 void* Amalloc_D(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { 454 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); 455 debug_only(if (UseMallocOnly) return malloc(x);) 456 #if defined(SPARC) && !defined(_LP64) 457 #define DALIGN_M1 7 458 size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm; 459 x += delta; 460 #endif 461 if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode)) 462 return NULL; 463 NOT_PRODUCT(inc_bytes_allocated(x);) 464 if (_hwm + x > _max) { 465 return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes. 466 } else { 467 char *old = _hwm; 468 _hwm += x; 469 #if defined(SPARC) && !defined(_LP64) 470 old += delta; // align to 8-bytes 471 #endif 472 return old; 473 } 474 } 475 476 // Fast delete in area. Common case is: NOP (except for storage reclaimed) 477 void Afree(void *ptr, size_t size) { 478 #ifdef ASSERT 479 if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory 480 if (UseMallocOnly) return; 481 #endif 482 if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr; 483 } 484 485 void *Arealloc( void *old_ptr, size_t old_size, size_t new_size, 486 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); 487 488 // Move contents of this arena into an empty arena 489 Arena *move_contents(Arena *empty_arena); 490 491 // Determine if pointer belongs to this Arena or not. 492 bool contains( const void *ptr ) const; 493 494 // Total of all chunks in use (not thread-safe) 495 size_t used() const; 496 497 // Total # of bytes used 498 size_t size_in_bytes() const { return _size_in_bytes; }; 499 void set_size_in_bytes(size_t size); 500 501 static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN; 502 static void free_all(char** start, char** end) PRODUCT_RETURN; 503 504 private: 505 // Reset this Arena to empty, access will trigger grow if necessary 506 void reset(void) { 507 _first = _chunk = NULL; 508 _hwm = _max = NULL; 509 set_size_in_bytes(0); 510 } 511 }; 512 513 // One of the following macros must be used when allocating 514 // an array or object from an arena 515 #define NEW_ARENA_ARRAY(arena, type, size) \ 516 (type*) (arena)->Amalloc((size) * sizeof(type)) 517 518 #define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size) \ 519 (type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \ 520 (new_size) * sizeof(type) ) 521 522 #define FREE_ARENA_ARRAY(arena, type, old, size) \ 523 (arena)->Afree((char*)(old), (size) * sizeof(type)) 524 525 #define NEW_ARENA_OBJ(arena, type) \ 526 NEW_ARENA_ARRAY(arena, type, 1) 527 528 529 //%note allocation_1 530 extern char* resource_allocate_bytes(size_t size, 531 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); 532 extern char* resource_allocate_bytes(Thread* thread, size_t size, 533 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); 534 extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size, 535 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); 536 extern void resource_free_bytes( char *old, size_t size ); 537 538 //---------------------------------------------------------------------- 539 // Base class for objects allocated in the resource area per default. 540 // Optionally, objects may be allocated on the C heap with 541 // new(ResourceObj::C_HEAP) Foo(...) or in an Arena with new (&arena) 542 // ResourceObj's can be allocated within other objects, but don't use 543 // new or delete (allocation_type is unknown). If new is used to allocate, 544 // use delete to deallocate. 545 class ResourceObj ALLOCATION_SUPER_CLASS_SPEC { 546 public: 547 enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 }; 548 static void set_allocation_type(address res, allocation_type type) NOT_DEBUG_RETURN; 549 #ifdef ASSERT 550 private: 551 // When this object is allocated on stack the new() operator is not 552 // called but garbage on stack may look like a valid allocation_type. 553 // Store negated 'this' pointer when new() is called to distinguish cases. 554 // Use second array's element for verification value to distinguish garbage. 555 uintptr_t _allocation_t[2]; 556 bool is_type_set() const; 557 public: 558 allocation_type get_allocation_type() const; 559 bool allocated_on_stack() const { return get_allocation_type() == STACK_OR_EMBEDDED; } 560 bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; } 561 bool allocated_on_C_heap() const { return get_allocation_type() == C_HEAP; } 562 bool allocated_on_arena() const { return get_allocation_type() == ARENA; } 563 ResourceObj(); // default constructor 564 ResourceObj(const ResourceObj& r); // default copy constructor 565 ResourceObj& operator=(const ResourceObj& r); // default copy assignment 566 ~ResourceObj(); 567 #endif // ASSERT 568 569 public: 570 void* operator new(size_t size, allocation_type type, MEMFLAGS flags) throw(); 571 void* operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw(); 572 void* operator new(size_t size, const std::nothrow_t& nothrow_constant, 573 allocation_type type, MEMFLAGS flags) throw(); 574 void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, 575 allocation_type type, MEMFLAGS flags) throw(); 576 577 void* operator new(size_t size, Arena *arena) throw() { 578 address res = (address)arena->Amalloc(size); 579 DEBUG_ONLY(set_allocation_type(res, ARENA);) 580 return res; 581 } 582 583 void* operator new [](size_t size, Arena *arena) throw() { 584 address res = (address)arena->Amalloc(size); 585 DEBUG_ONLY(set_allocation_type(res, ARENA);) 586 return res; 587 } 588 589 void* operator new(size_t size) throw() { 590 address res = (address)resource_allocate_bytes(size); 591 DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);) 592 return res; 593 } 594 595 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { 596 address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL); 597 DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);) 598 return res; 599 } 600 601 void* operator new [](size_t size) throw() { 602 address res = (address)resource_allocate_bytes(size); 603 DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);) 604 return res; 605 } 606 607 void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) throw() { 608 address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL); 609 DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);) 610 return res; 611 } 612 613 void operator delete(void* p); 614 void operator delete [](void* p); 615 }; 616 617 // One of the following macros must be used when allocating an array 618 // or object to determine whether it should reside in the C heap on in 619 // the resource area. 620 621 #define NEW_RESOURCE_ARRAY(type, size)\ 622 (type*) resource_allocate_bytes((size) * sizeof(type)) 623 624 #define NEW_RESOURCE_ARRAY_RETURN_NULL(type, size)\ 625 (type*) resource_allocate_bytes((size) * sizeof(type), AllocFailStrategy::RETURN_NULL) 626 627 #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\ 628 (type*) resource_allocate_bytes(thread, (size) * sizeof(type)) 629 630 #define NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(thread, type, size)\ 631 (type*) resource_allocate_bytes(thread, (size) * sizeof(type), AllocFailStrategy::RETURN_NULL) 632 633 #define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\ 634 (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type)) 635 636 #define REALLOC_RESOURCE_ARRAY_RETURN_NULL(type, old, old_size, new_size)\ 637 (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type),\ 638 (new_size) * sizeof(type), AllocFailStrategy::RETURN_NULL) 639 640 #define FREE_RESOURCE_ARRAY(type, old, size)\ 641 resource_free_bytes((char*)(old), (size) * sizeof(type)) 642 643 #define FREE_FAST(old)\ 644 /* nop */ 645 646 #define NEW_RESOURCE_OBJ(type)\ 647 NEW_RESOURCE_ARRAY(type, 1) 648 649 #define NEW_RESOURCE_OBJ_RETURN_NULL(type)\ 650 NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1) 651 652 #define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\ 653 (type*) AllocateHeap((size) * sizeof(type), memflags, pc, allocfail) 654 655 #define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\ 656 (type*) (AllocateHeap((size) * sizeof(type), memflags, pc)) 657 658 #define NEW_C_HEAP_ARRAY(type, size, memflags)\ 659 (type*) (AllocateHeap((size) * sizeof(type), memflags)) 660 661 #define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\ 662 NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL) 663 664 #define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\ 665 NEW_C_HEAP_ARRAY3(type, (size), memflags, CURRENT_PC, AllocFailStrategy::RETURN_NULL) 666 667 #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\ 668 (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags)) 669 670 #define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\ 671 (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL)) 672 673 #define FREE_C_HEAP_ARRAY(type, old) \ 674 FreeHeap((char*)(old)) 675 676 // allocate type in heap without calling ctor 677 #define NEW_C_HEAP_OBJ(type, memflags)\ 678 NEW_C_HEAP_ARRAY(type, 1, memflags) 679 680 #define NEW_C_HEAP_OBJ_RETURN_NULL(type, memflags)\ 681 NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, memflags) 682 683 // deallocate obj of type in heap without calling dtor 684 #define FREE_C_HEAP_OBJ(objname)\ 685 FreeHeap((char*)objname); 686 687 // for statistics 688 #ifndef PRODUCT 689 class AllocStats : StackObj { 690 julong start_mallocs, start_frees; 691 julong start_malloc_bytes, start_mfree_bytes, start_res_bytes; 692 public: 693 AllocStats(); 694 695 julong num_mallocs(); // since creation of receiver 696 julong alloc_bytes(); 697 julong num_frees(); 698 julong free_bytes(); 699 julong resource_bytes(); 700 void print(); 701 }; 702 #endif 703 704 705 //------------------------------ReallocMark--------------------------------- 706 // Code which uses REALLOC_RESOURCE_ARRAY should check an associated 707 // ReallocMark, which is declared in the same scope as the reallocated 708 // pointer. Any operation that could __potentially__ cause a reallocation 709 // should check the ReallocMark. 710 class ReallocMark: public StackObj { 711 protected: 712 NOT_PRODUCT(int _nesting;) 713 714 public: 715 ReallocMark() PRODUCT_RETURN; 716 void check() PRODUCT_RETURN; 717 }; 718 719 // Helper class to allocate arrays that may become large. 720 // Uses the OS malloc for allocations smaller than ArrayAllocatorMallocLimit 721 // and uses mapped memory for larger allocations. 722 // Most OS mallocs do something similar but Solaris malloc does not revert 723 // to mapped memory for large allocations. By default ArrayAllocatorMallocLimit 724 // is set so that we always use malloc except for Solaris where we set the 725 // limit to get mapped memory. 726 template <class E, MEMFLAGS F> 727 class ArrayAllocator VALUE_OBJ_CLASS_SPEC { 728 char* _addr; 729 bool _use_malloc; 730 size_t _size; 731 bool _free_in_destructor; 732 733 static bool should_use_malloc(size_t size) { 734 return size < ArrayAllocatorMallocLimit; 735 } 736 737 static char* allocate_inner(size_t& size, bool& use_malloc); 738 public: 739 ArrayAllocator(bool free_in_destructor = true) : 740 _addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { } 741 742 ~ArrayAllocator() { 743 if (_free_in_destructor) { 744 free(); 745 } 746 } 747 748 E* allocate(size_t length); 749 E* reallocate(size_t new_length); 750 void free(); 751 }; 752 753 #endif // SHARE_VM_MEMORY_ALLOCATION_HPP