1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_GENERATION_HPP 26 #define SHARE_VM_GC_SHARED_GENERATION_HPP 27 28 #include "gc/shared/collectorCounters.hpp" 29 #include "gc/shared/referenceProcessor.hpp" 30 #include "logging/log.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/memRegion.hpp" 33 #include "memory/universe.hpp" 34 #include "memory/virtualspace.hpp" 35 #include "runtime/mutex.hpp" 36 #include "runtime/perfData.hpp" 37 38 // A Generation models a heap area for similarly-aged objects. 39 // It will contain one ore more spaces holding the actual objects. 40 // 41 // The Generation class hierarchy: 42 // 43 // Generation - abstract base class 44 // - DefNewGeneration - allocation area (copy collected) 45 // - ParNewGeneration - a DefNewGeneration that is collected by 46 // several threads 47 // - CardGeneration - abstract class adding offset array behavior 48 // - TenuredGeneration - tenured (old object) space (markSweepCompact) 49 // - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation 50 // (Detlefs-Printezis refinement of 51 // Boehm-Demers-Schenker) 52 // 53 // The system configurations currently allowed are: 54 // 55 // DefNewGeneration + TenuredGeneration 56 // 57 // ParNewGeneration + ConcurrentMarkSweepGeneration 58 // 59 60 class DefNewGeneration; 61 class GenerationSpec; 62 class CompactibleSpace; 63 class ContiguousSpace; 64 class CompactPoint; 65 class OopsInGenClosure; 66 class OopClosure; 67 class ScanClosure; 68 class FastScanClosure; 69 class GenCollectedHeap; 70 class GCStats; 71 72 // A "ScratchBlock" represents a block of memory in one generation usable by 73 // another. It represents "num_words" free words, starting at and including 74 // the address of "this". 75 struct ScratchBlock { 76 ScratchBlock* next; 77 size_t num_words; 78 HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming 79 // first two fields are word-sized.) 80 }; 81 82 class Generation: public CHeapObj<mtGC> { 83 friend class VMStructs; 84 private: 85 jlong _time_of_last_gc; // time when last gc on this generation happened (ms) 86 MemRegion _prev_used_region; // for collectors that want to "remember" a value for 87 // used region at some specific point during collection. 88 89 protected: 90 // Minimum and maximum addresses for memory reserved (not necessarily 91 // committed) for generation. 92 // Used by card marking code. Must not overlap with address ranges of 93 // other generations. 94 MemRegion _reserved; 95 96 // Memory area reserved for generation 97 VirtualSpace _virtual_space; 98 99 // ("Weak") Reference processing support 100 ReferenceProcessor* _ref_processor; 101 102 // Performance Counters 103 CollectorCounters* _gc_counters; 104 105 // Statistics for garbage collection 106 GCStats* _gc_stats; 107 108 // Initialize the generation. 109 Generation(ReservedSpace rs, size_t initial_byte_size); 110 111 // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in 112 // "sp" that point into younger generations. 113 // The iteration is only over objects allocated at the start of the 114 // iterations; objects allocated as a result of applying the closure are 115 // not included. 116 void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads); 117 118 public: 119 // The set of possible generation kinds. 120 enum Name { 121 DefNew, 122 ParNew, 123 MarkSweepCompact, 124 ConcurrentMarkSweep, 125 Other 126 }; 127 128 enum SomePublicConstants { 129 // Generations are GenGrain-aligned and have size that are multiples of 130 // GenGrain. 131 // Note: on ARM we add 1 bit for card_table_base to be properly aligned 132 // (we expect its low byte to be zero - see implementation of post_barrier) 133 LogOfGenGrain = 16 ARM32_ONLY(+1), 134 GenGrain = 1 << LogOfGenGrain 135 }; 136 137 // allocate and initialize ("weak") refs processing support 138 virtual void ref_processor_init(); 139 void set_ref_processor(ReferenceProcessor* rp) { 140 assert(_ref_processor == NULL, "clobbering existing _ref_processor"); 141 _ref_processor = rp; 142 } 143 144 virtual Generation::Name kind() { return Generation::Other; } 145 146 // This properly belongs in the collector, but for now this 147 // will do. 148 virtual bool refs_discovery_is_atomic() const { return true; } 149 virtual bool refs_discovery_is_mt() const { return false; } 150 151 // Space inquiries (results in bytes) 152 size_t initial_size(); 153 virtual size_t capacity() const = 0; // The maximum number of object bytes the 154 // generation can currently hold. 155 virtual size_t used() const = 0; // The number of used bytes in the gen. 156 virtual size_t free() const = 0; // The number of free bytes in the gen. 157 158 // Support for java.lang.Runtime.maxMemory(); see CollectedHeap. 159 // Returns the total number of bytes available in a generation 160 // for the allocation of objects. 161 virtual size_t max_capacity() const; 162 163 // If this is a young generation, the maximum number of bytes that can be 164 // allocated in this generation before a GC is triggered. 165 virtual size_t capacity_before_gc() const { return 0; } 166 167 // The largest number of contiguous free bytes in the generation, 168 // including expansion (Assumes called at a safepoint.) 169 virtual size_t contiguous_available() const = 0; 170 // The largest number of contiguous free bytes in this or any higher generation. 171 virtual size_t max_contiguous_available() const; 172 173 // Returns true if promotions of the specified amount are 174 // likely to succeed without a promotion failure. 175 // Promotion of the full amount is not guaranteed but 176 // might be attempted in the worst case. 177 virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const; 178 179 // For a non-young generation, this interface can be used to inform a 180 // generation that a promotion attempt into that generation failed. 181 // Typically used to enable diagnostic output for post-mortem analysis, 182 // but other uses of the interface are not ruled out. 183 virtual void promotion_failure_occurred() { /* does nothing */ } 184 185 // Return an estimate of the maximum allocation that could be performed 186 // in the generation without triggering any collection or expansion 187 // activity. It is "unsafe" because no locks are taken; the result 188 // should be treated as an approximation, not a guarantee, for use in 189 // heuristic resizing decisions. 190 virtual size_t unsafe_max_alloc_nogc() const = 0; 191 192 // Returns true if this generation cannot be expanded further 193 // without a GC. Override as appropriate. 194 virtual bool is_maximal_no_gc() const { 195 return _virtual_space.uncommitted_size() == 0; 196 } 197 198 MemRegion reserved() const { return _reserved; } 199 200 // Returns a region guaranteed to contain all the objects in the 201 // generation. 202 virtual MemRegion used_region() const { return _reserved; } 203 204 MemRegion prev_used_region() const { return _prev_used_region; } 205 virtual void save_used_region() { _prev_used_region = used_region(); } 206 207 // Returns "TRUE" iff "p" points into the committed areas in the generation. 208 // For some kinds of generations, this may be an expensive operation. 209 // To avoid performance problems stemming from its inadvertent use in 210 // product jvm's, we restrict its use to assertion checking or 211 // verification only. 212 virtual bool is_in(const void* p) const; 213 214 /* Returns "TRUE" iff "p" points into the reserved area of the generation. */ 215 bool is_in_reserved(const void* p) const { 216 return _reserved.contains(p); 217 } 218 219 // If some space in the generation contains the given "addr", return a 220 // pointer to that space, else return "NULL". 221 virtual Space* space_containing(const void* addr) const; 222 223 // Iteration - do not use for time critical operations 224 virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0; 225 226 // Returns the first space, if any, in the generation that can participate 227 // in compaction, or else "NULL". 228 virtual CompactibleSpace* first_compaction_space() const = 0; 229 230 // Returns "true" iff this generation should be used to allocate an 231 // object of the given size. Young generations might 232 // wish to exclude very large objects, for example, since, if allocated 233 // often, they would greatly increase the frequency of young-gen 234 // collection. 235 virtual bool should_allocate(size_t word_size, bool is_tlab) { 236 bool result = false; 237 size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize); 238 if (!is_tlab || supports_tlab_allocation()) { 239 result = (word_size > 0) && (word_size < overflow_limit); 240 } 241 return result; 242 } 243 244 // Allocate and returns a block of the requested size, or returns "NULL". 245 // Assumes the caller has done any necessary locking. 246 virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0; 247 248 // Like "allocate", but performs any necessary locking internally. 249 virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0; 250 251 // Some generation may offer a region for shared, contiguous allocation, 252 // via inlined code (by exporting the address of the top and end fields 253 // defining the extent of the contiguous allocation region.) 254 255 // This function returns "true" iff the heap supports this kind of 256 // allocation. (More precisely, this means the style of allocation that 257 // increments *top_addr()" with a CAS.) (Default is "no".) 258 // A generation that supports this allocation style must use lock-free 259 // allocation for *all* allocation, since there are times when lock free 260 // allocation will be concurrent with plain "allocate" calls. 261 virtual bool supports_inline_contig_alloc() const { return false; } 262 263 // These functions return the addresses of the fields that define the 264 // boundaries of the contiguous allocation area. (These fields should be 265 // physically near to one another.) 266 virtual HeapWord* volatile* top_addr() const { return NULL; } 267 virtual HeapWord** end_addr() const { return NULL; } 268 269 // Thread-local allocation buffers 270 virtual bool supports_tlab_allocation() const { return false; } 271 virtual size_t tlab_capacity() const { 272 guarantee(false, "Generation doesn't support thread local allocation buffers"); 273 return 0; 274 } 275 virtual size_t tlab_used() const { 276 guarantee(false, "Generation doesn't support thread local allocation buffers"); 277 return 0; 278 } 279 virtual size_t unsafe_max_tlab_alloc() const { 280 guarantee(false, "Generation doesn't support thread local allocation buffers"); 281 return 0; 282 } 283 284 // "obj" is the address of an object in a younger generation. Allocate space 285 // for "obj" in the current (or some higher) generation, and copy "obj" into 286 // the newly allocated space, if possible, returning the result (or NULL if 287 // the allocation failed). 288 // 289 // The "obj_size" argument is just obj->size(), passed along so the caller can 290 // avoid repeating the virtual call to retrieve it. 291 virtual oop promote(oop obj, size_t obj_size); 292 293 // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote 294 // object "obj", whose original mark word was "m", and whose size is 295 // "word_sz". If possible, allocate space for "obj", copy obj into it 296 // (taking care to copy "m" into the mark word when done, since the mark 297 // word of "obj" may have been overwritten with a forwarding pointer, and 298 // also taking care to copy the klass pointer *last*. Returns the new 299 // object if successful, or else NULL. 300 virtual oop par_promote(int thread_num, oop obj, markOop m, size_t word_sz); 301 302 // Informs the current generation that all par_promote_alloc's in the 303 // collection have been completed; any supporting data structures can be 304 // reset. Default is to do nothing. 305 virtual void par_promote_alloc_done(int thread_num) {} 306 307 // Informs the current generation that all oop_since_save_marks_iterates 308 // performed by "thread_num" in the current collection, if any, have been 309 // completed; any supporting data structures can be reset. Default is to 310 // do nothing. 311 virtual void par_oop_since_save_marks_iterate_done(int thread_num) {} 312 313 // Returns "true" iff collect() should subsequently be called on this 314 // this generation. See comment below. 315 // This is a generic implementation which can be overridden. 316 // 317 // Note: in the current (1.4) implementation, when genCollectedHeap's 318 // incremental_collection_will_fail flag is set, all allocations are 319 // slow path (the only fast-path place to allocate is DefNew, which 320 // will be full if the flag is set). 321 // Thus, older generations which collect younger generations should 322 // test this flag and collect if it is set. 323 virtual bool should_collect(bool full, 324 size_t word_size, 325 bool is_tlab) { 326 return (full || should_allocate(word_size, is_tlab)); 327 } 328 329 // Returns true if the collection is likely to be safely 330 // completed. Even if this method returns true, a collection 331 // may not be guaranteed to succeed, and the system should be 332 // able to safely unwind and recover from that failure, albeit 333 // at some additional cost. 334 virtual bool collection_attempt_is_safe() { 335 guarantee(false, "Are you sure you want to call this method?"); 336 return true; 337 } 338 339 // Perform a garbage collection. 340 // If full is true attempt a full garbage collection of this generation. 341 // Otherwise, attempting to (at least) free enough space to support an 342 // allocation of the given "word_size". 343 virtual void collect(bool full, 344 bool clear_all_soft_refs, 345 size_t word_size, 346 bool is_tlab) = 0; 347 348 // Perform a heap collection, attempting to create (at least) enough 349 // space to support an allocation of the given "word_size". If 350 // successful, perform the allocation and return the resulting 351 // "oop" (initializing the allocated block). If the allocation is 352 // still unsuccessful, return "NULL". 353 virtual HeapWord* expand_and_allocate(size_t word_size, 354 bool is_tlab, 355 bool parallel = false) = 0; 356 357 // Some generations may require some cleanup or preparation actions before 358 // allowing a collection. The default is to do nothing. 359 virtual void gc_prologue(bool full) {} 360 361 // Some generations may require some cleanup actions after a collection. 362 // The default is to do nothing. 363 virtual void gc_epilogue(bool full) {} 364 365 // Save the high water marks for the used space in a generation. 366 virtual void record_spaces_top() {} 367 368 // Some generations may need to be "fixed-up" after some allocation 369 // activity to make them parsable again. The default is to do nothing. 370 virtual void ensure_parsability() {} 371 372 // Time (in ms) when we were last collected or now if a collection is 373 // in progress. 374 virtual jlong time_of_last_gc(jlong now) { 375 // Both _time_of_last_gc and now are set using a time source 376 // that guarantees monotonically non-decreasing values provided 377 // the underlying platform provides such a source. So we still 378 // have to guard against non-monotonicity. 379 NOT_PRODUCT( 380 if (now < _time_of_last_gc) { 381 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, _time_of_last_gc, now); 382 } 383 ) 384 return _time_of_last_gc; 385 } 386 387 virtual void update_time_of_last_gc(jlong now) { 388 _time_of_last_gc = now; 389 } 390 391 // Generations may keep statistics about collection. This method 392 // updates those statistics. current_generation is the generation 393 // that was most recently collected. This allows the generation to 394 // decide what statistics are valid to collect. For example, the 395 // generation can decide to gather the amount of promoted data if 396 // the collection of the young generation has completed. 397 GCStats* gc_stats() const { return _gc_stats; } 398 virtual void update_gc_stats(Generation* current_generation, bool full) {} 399 400 // Mark sweep support phase2 401 virtual void prepare_for_compaction(CompactPoint* cp); 402 // Mark sweep support phase3 403 virtual void adjust_pointers(); 404 // Mark sweep support phase4 405 virtual void compact(); 406 virtual void post_compact() { ShouldNotReachHere(); } 407 408 // Support for CMS's rescan. In this general form we return a pointer 409 // to an abstract object that can be used, based on specific previously 410 // decided protocols, to exchange information between generations, 411 // information that may be useful for speeding up certain types of 412 // garbage collectors. A NULL value indicates to the client that 413 // no data recording is expected by the provider. The data-recorder is 414 // expected to be GC worker thread-local, with the worker index 415 // indicated by "thr_num". 416 virtual void* get_data_recorder(int thr_num) { return NULL; } 417 virtual void sample_eden_chunk() {} 418 419 // Some generations may require some cleanup actions before allowing 420 // a verification. 421 virtual void prepare_for_verify() {} 422 423 // Accessing "marks". 424 425 // This function gives a generation a chance to note a point between 426 // collections. For example, a contiguous generation might note the 427 // beginning allocation point post-collection, which might allow some later 428 // operations to be optimized. 429 virtual void save_marks() {} 430 431 // This function allows generations to initialize any "saved marks". That 432 // is, should only be called when the generation is empty. 433 virtual void reset_saved_marks() {} 434 435 // This function is "true" iff any no allocations have occurred in the 436 // generation since the last call to "save_marks". 437 virtual bool no_allocs_since_save_marks() = 0; 438 439 // Apply "cl->apply" to (the addresses of) all reference fields in objects 440 // allocated in the current generation since the last call to "save_marks". 441 // If more objects are allocated in this generation as a result of applying 442 // the closure, iterates over reference fields in those objects as well. 443 // Calls "save_marks" at the end of the iteration. 444 // General signature... 445 virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0; 446 // ...and specializations for de-virtualization. (The general 447 // implementation of the _nv versions call the virtual version. 448 // Note that the _nv suffix is not really semantically necessary, 449 // but it avoids some not-so-useful warnings on Solaris.) 450 #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 451 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 452 oop_since_save_marks_iterate_v((OopsInGenClosure*)cl); \ 453 } 454 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL) 455 456 #undef Generation_SINCE_SAVE_MARKS_DECL 457 458 // The "requestor" generation is performing some garbage collection 459 // action for which it would be useful to have scratch space. If 460 // the target is not the requestor, no gc actions will be required 461 // of the target. The requestor promises to allocate no more than 462 // "max_alloc_words" in the target generation (via promotion say, 463 // if the requestor is a young generation and the target is older). 464 // If the target generation can provide any scratch space, it adds 465 // it to "list", leaving "list" pointing to the head of the 466 // augmented list. The default is to offer no space. 467 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, 468 size_t max_alloc_words) {} 469 470 // Give each generation an opportunity to do clean up for any 471 // contributed scratch. 472 virtual void reset_scratch() {} 473 474 // When an older generation has been collected, and perhaps resized, 475 // this method will be invoked on all younger generations (from older to 476 // younger), allowing them to resize themselves as appropriate. 477 virtual void compute_new_size() = 0; 478 479 // Printing 480 virtual const char* name() const = 0; 481 virtual const char* short_name() const = 0; 482 483 // Reference Processing accessor 484 ReferenceProcessor* const ref_processor() { return _ref_processor; } 485 486 // Iteration. 487 488 // Iterate over all the ref-containing fields of all objects in the 489 // generation, calling "cl.do_oop" on each. 490 virtual void oop_iterate(ExtendedOopClosure* cl); 491 492 // Iterate over all objects in the generation, calling "cl.do_object" on 493 // each. 494 virtual void object_iterate(ObjectClosure* cl); 495 496 // Iterate over all safe objects in the generation, calling "cl.do_object" on 497 // each. An object is safe if its references point to other objects in 498 // the heap. This defaults to object_iterate() unless overridden. 499 virtual void safe_object_iterate(ObjectClosure* cl); 500 501 // Apply "cl->do_oop" to (the address of) all and only all the ref fields 502 // in the current generation that contain pointers to objects in younger 503 // generations. Objects allocated since the last "save_marks" call are 504 // excluded. 505 virtual void younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) = 0; 506 507 // Inform a generation that it longer contains references to objects 508 // in any younger generation. [e.g. Because younger gens are empty, 509 // clear the card table.] 510 virtual void clear_remembered_set() { } 511 512 // Inform a generation that some of its objects have moved. [e.g. The 513 // generation's spaces were compacted, invalidating the card table.] 514 virtual void invalidate_remembered_set() { } 515 516 // Block abstraction. 517 518 // Returns the address of the start of the "block" that contains the 519 // address "addr". We say "blocks" instead of "object" since some heaps 520 // may not pack objects densely; a chunk may either be an object or a 521 // non-object. 522 virtual HeapWord* block_start(const void* addr) const; 523 524 // Requires "addr" to be the start of a chunk, and returns its size. 525 // "addr + size" is required to be the start of a new chunk, or the end 526 // of the active area of the heap. 527 virtual size_t block_size(const HeapWord* addr) const ; 528 529 // Requires "addr" to be the start of a block, and returns "TRUE" iff 530 // the block is an object. 531 virtual bool block_is_obj(const HeapWord* addr) const; 532 533 void print_heap_change(size_t prev_used) const; 534 535 virtual void print() const; 536 virtual void print_on(outputStream* st) const; 537 538 virtual void verify() = 0; 539 540 struct StatRecord { 541 int invocations; 542 elapsedTimer accumulated_time; 543 StatRecord() : 544 invocations(0), 545 accumulated_time(elapsedTimer()) {} 546 }; 547 private: 548 StatRecord _stat_record; 549 public: 550 StatRecord* stat_record() { return &_stat_record; } 551 552 virtual void print_summary_info(); 553 554 // Performance Counter support 555 virtual void update_counters() = 0; 556 virtual CollectorCounters* counters() { return _gc_counters; } 557 }; 558 559 #endif // SHARE_VM_GC_SHARED_GENERATION_HPP