1 /*
   2  * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcHeapSummary.hpp"
  26 #include "gc/z/zCollectedHeap.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zNMethodTable.hpp"
  30 #include "gc/z/zServiceability.hpp"
  31 #include "gc/z/zStat.hpp"
  32 #include "gc/z/zUtils.inline.hpp"
  33 #include "runtime/mutexLocker.hpp"
  34 
  35 ZCollectedHeap* ZCollectedHeap::heap() {
  36   CollectedHeap* heap = Universe::heap();
  37   assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()");
  38   assert(heap->kind() == CollectedHeap::ZCollectedHeap, "Not an ZCollectedHeap");
  39   return (ZCollectedHeap*)heap;
  40 }
  41 
  42 ZCollectedHeap::ZCollectedHeap(ZCollectorPolicy* policy) :
  43     _policy(policy),
  44     _heap(),
  45     _director(new ZDirector()),
  46     _driver(new ZDriver()),
  47     _stat(new ZStat()) {}
  48 
  49 CollectedHeap::Name ZCollectedHeap::kind() const {
  50   return CollectedHeap::ZCollectedHeap;
  51 }
  52 
  53 const char* ZCollectedHeap::name() const {
  54   return ZGCName;
  55 }
  56 
  57 jint ZCollectedHeap::initialize() {
  58   if (!_heap.is_initialized()) {
  59     return JNI_ENOMEM;
  60   }
  61 
  62   initialize_reserved_region((HeapWord*)ZAddressReservedStart(),
  63                              (HeapWord*)ZAddressReservedEnd());
  64 
  65   set_barrier_set(&_barrier);
  66 
  67   return JNI_OK;
  68 }
  69 
  70 void ZCollectedHeap::initialize_serviceability() {
  71   _heap.serviceability_initialize();
  72 }
  73 
  74 void ZCollectedHeap::stop() {
  75   _director->stop();
  76   _driver->stop();
  77   _stat->stop();
  78 }
  79 
  80 CollectorPolicy* ZCollectedHeap::collector_policy() const {
  81   return _policy;
  82 }
  83 
  84 AdaptiveSizePolicy* ZCollectedHeap::size_policy() {
  85   Unimplemented();
  86   return NULL;
  87 }
  88 
  89 size_t ZCollectedHeap::max_capacity() const {
  90   return _heap.max_capacity();
  91 }
  92 
  93 size_t ZCollectedHeap::capacity() const {
  94   return _heap.capacity();
  95 }
  96 
  97 size_t ZCollectedHeap::used() const {
  98   return _heap.used();
  99 }
 100 
 101 bool ZCollectedHeap::is_maximal_no_gc() const {
 102   // Not supported
 103   ShouldNotReachHere();
 104   return false;
 105 }
 106 
 107 bool ZCollectedHeap::is_scavengable(oop obj) {
 108   return false;
 109 }
 110 
 111 bool ZCollectedHeap::is_in(const void* p) const {
 112   return is_in_reserved(p) && _heap.is_in((uintptr_t)p);
 113 }
 114 
 115 bool ZCollectedHeap::is_in_closed_subset(const void* p) const {
 116   return is_in(p);
 117 }
 118 
 119 HeapWord* ZCollectedHeap::allocate_new_tlab(size_t size) {
 120   size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
 121   return (HeapWord*)_heap.alloc_tlab(size_in_bytes);
 122 }
 123 
 124 HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) {
 125   size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
 126   return (HeapWord*)_heap.alloc_object(size_in_bytes);
 127 }
 128 
 129 void ZCollectedHeap::collect(GCCause::Cause cause) {
 130   _driver->collect(cause);
 131 }
 132 
 133 void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
 134   // These collection requests are ignored since ZGC can't run a synchronous
 135   // GC cycle from within the VM thread. This is considered benign, since the
 136   // only GC causes comming in here should be heap dumper and heap inspector.
 137   // However, neither the heap dumper nor the heap inspector really need a GC
 138   // to happen, but the result of their heap iterations might in that case be
 139   // less accurate since they might include objects that would otherwise have
 140   // been collected by a GC.
 141   assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
 142   guarantee(cause == GCCause::_heap_dump ||
 143             cause == GCCause::_heap_inspection, "Invalid cause");
 144 }
 145 
 146 void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 147   // Not supported
 148   ShouldNotReachHere();
 149 }
 150 
 151 bool ZCollectedHeap::supports_tlab_allocation() const {
 152   return true;
 153 }
 154 
 155 size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
 156   return _heap.tlab_capacity();
 157 }
 158 
 159 size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
 160   return _heap.tlab_used();
 161 }
 162 
 163 size_t ZCollectedHeap::max_tlab_size() const {
 164   return _heap.max_tlab_size();
 165 }
 166 
 167 size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
 168   return _heap.unsafe_max_tlab_alloc();
 169 }
 170 
 171 bool ZCollectedHeap::can_elide_tlab_store_barriers() const {
 172   return false;
 173 }
 174 
 175 bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
 176   // Not supported
 177   ShouldNotReachHere();
 178   return true;
 179 }
 180 
 181 bool ZCollectedHeap::card_mark_must_follow_store() const {
 182   // Not supported
 183   ShouldNotReachHere();
 184   return false;
 185 }
 186 
 187 GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
 188   return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager());
 189 }
 190 
 191 GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
 192   return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool());
 193 }
 194 
 195 void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
 196   _heap.object_iterate(cl);
 197 }
 198 
 199 void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
 200   _heap.object_iterate(cl);
 201 }
 202 
 203 HeapWord* ZCollectedHeap::block_start(const void* addr) const {
 204   return (HeapWord*)_heap.block_start((uintptr_t)addr);
 205 }
 206 
 207 size_t ZCollectedHeap::block_size(const HeapWord* addr) const {
 208   size_t size_in_bytes = _heap.block_size((uintptr_t)addr);
 209   return ZUtils::bytes_to_words(size_in_bytes);
 210 }
 211 
 212 bool ZCollectedHeap::block_is_obj(const HeapWord* addr) const {
 213   return _heap.block_is_obj((uintptr_t)addr);
 214 }
 215 
 216 void ZCollectedHeap::register_nmethod(nmethod* nm) {
 217   assert_locked_or_safepoint(CodeCache_lock);
 218   ZNMethodTable::register_nmethod(nm);
 219 }
 220 
 221 void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
 222   assert_locked_or_safepoint(CodeCache_lock);
 223   ZNMethodTable::unregister_nmethod(nm);
 224 }
 225 
 226 void ZCollectedHeap::verify_nmethod(nmethod* nm) {
 227   // Does nothing
 228 }
 229 
 230 jlong ZCollectedHeap::millis_since_last_gc() {
 231   return ZStatCycle::time_since_last() / MILLIUNITS;
 232 }
 233 
 234 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
 235   tc->do_thread(_director);
 236   tc->do_thread(_driver);
 237   tc->do_thread(_stat);
 238   _heap.worker_threads_do(tc);
 239 }
 240 
 241 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
 242   const size_t capacity_in_words = capacity() / HeapWordSize;
 243   const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
 244   return VirtualSpaceSummary(reserved_region().start(),
 245                              reserved_region().start() + capacity_in_words,
 246                              reserved_region().start() + max_capacity_in_words);
 247 }
 248 
 249 void ZCollectedHeap::prepare_for_verify() {
 250   // Does nothing
 251 }
 252 
 253 void ZCollectedHeap::print_on(outputStream* st) const {
 254   _heap.print_on(st);
 255 }
 256 
 257 void ZCollectedHeap::print_on_error(outputStream* st) const {
 258   CollectedHeap::print_on_error(st);
 259 
 260   st->print_cr("Address Space");
 261   st->print_cr( "     Start:             " PTR_FORMAT, ZAddressSpaceStart);
 262   st->print_cr( "     End:               " PTR_FORMAT, ZAddressSpaceEnd);
 263   st->print_cr( "     Size:              " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressSpaceSize, ZAddressSpaceSize);
 264   st->print_cr( "Heap");
 265   st->print_cr( "     GlobalPhase:       %u", ZGlobalPhase);
 266   st->print_cr( "     GlobalSeqNum:      %u", ZGlobalSeqNum);
 267   st->print_cr( "     Offset Max:        " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax);
 268   st->print_cr( "     Page Size Small:   " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall);
 269   st->print_cr( "     Page Size Medium:  " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium);
 270   st->print_cr( "Metadata Bits");
 271   st->print_cr( "     Good:              " PTR_FORMAT, ZAddressGoodMask);
 272   st->print_cr( "     Bad:               " PTR_FORMAT, ZAddressBadMask);
 273   st->print_cr( "     WeakBad:           " PTR_FORMAT, ZAddressWeakBadMask);
 274   st->print_cr( "     Marked:            " PTR_FORMAT, ZAddressMetadataMarked);
 275   st->print_cr( "     Remapped:          " PTR_FORMAT, ZAddressMetadataRemapped);
 276 }
 277 
 278 void ZCollectedHeap::print_extended_on(outputStream* st) const {
 279   _heap.print_extended_on(st);
 280 }
 281 
 282 void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
 283   _director->print_on(st);
 284   st->cr();
 285   _driver->print_on(st);
 286   st->cr();
 287   _stat->print_on(st);
 288   st->cr();
 289   _heap.print_worker_threads_on(st);
 290 }
 291 
 292 void ZCollectedHeap::print_tracing_info() const {
 293   // Does nothing
 294 }
 295 
 296 void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
 297   _heap.verify();
 298 }
 299 
 300 bool ZCollectedHeap::is_oop(oop object) const {
 301   return CollectedHeap::is_oop(object) && _heap.is_oop(object);
 302 }