1 /* 2 * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 #include "jvm.h" 26 27 #include "runtime/orderAccess.hpp" 28 #include "runtime/vmThread.hpp" 29 #include "runtime/vmOperations.hpp" 30 #include "services/memBaseline.hpp" 31 #include "services/memReporter.hpp" 32 #include "services/mallocTracker.inline.hpp" 33 #include "services/memTracker.hpp" 34 #include "services/threadStackTracker.hpp" 35 #include "utilities/debug.hpp" 36 #include "utilities/defaultStream.hpp" 37 #include "utilities/vmError.hpp" 38 39 #ifdef _WINDOWS 40 #include <windows.h> 41 #endif 42 43 #ifdef SOLARIS 44 volatile bool NMT_stack_walkable = false; 45 #else 46 volatile bool NMT_stack_walkable = true; 47 #endif 48 49 volatile NMT_TrackingLevel MemTracker::_tracking_level = NMT_unknown; 50 NMT_TrackingLevel MemTracker::_cmdline_tracking_level = NMT_unknown; 51 52 MemBaseline MemTracker::_baseline; 53 bool MemTracker::_is_nmt_env_valid = true; 54 55 static const size_t buffer_size = 64; 56 57 NMT_TrackingLevel MemTracker::init_tracking_level() { 58 // Memory type is encoded into tracking header as a byte field, 59 // make sure that we don't overflow it. 60 STATIC_ASSERT(mt_number_of_types <= max_jubyte); 61 62 char nmt_env_variable[buffer_size]; 63 jio_snprintf(nmt_env_variable, sizeof(nmt_env_variable), "NMT_LEVEL_%d", os::current_process_id()); 64 const char* nmt_env_value; 65 #ifdef _WINDOWS 66 // Read the NMT environment variable from the PEB instead of the CRT 67 char value[buffer_size]; 68 nmt_env_value = GetEnvironmentVariable(nmt_env_variable, value, (DWORD)sizeof(value)) != 0 ? value : NULL; 69 #else 70 nmt_env_value = ::getenv(nmt_env_variable); 71 #endif 72 NMT_TrackingLevel level = NMT_off; 73 if (nmt_env_value != NULL) { 74 if (strcmp(nmt_env_value, "summary") == 0) { 75 level = NMT_summary; 76 } else if (strcmp(nmt_env_value, "detail") == 0) { 77 level = NMT_detail; 78 } else if (strcmp(nmt_env_value, "off") != 0) { 79 // The value of the environment variable is invalid 80 _is_nmt_env_valid = false; 81 } 82 // Remove the environment variable to avoid leaking to child processes 83 os::unsetenv(nmt_env_variable); 84 } 85 86 if (!MallocTracker::initialize(level) || 87 !VirtualMemoryTracker::initialize(level)) { 88 level = NMT_off; 89 } 90 return level; 91 } 92 93 void MemTracker::init() { 94 NMT_TrackingLevel level = tracking_level(); 95 if (level >= NMT_summary) { 96 if (!VirtualMemoryTracker::late_initialize(level) || 97 !ThreadStackTracker::late_initialize(level)) { 98 shutdown(); 99 return; 100 } 101 } 102 } 103 104 bool MemTracker::check_launcher_nmt_support(const char* value) { 105 if (strcmp(value, "=detail") == 0) { 106 if (MemTracker::tracking_level() != NMT_detail) { 107 return false; 108 } 109 } else if (strcmp(value, "=summary") == 0) { 110 if (MemTracker::tracking_level() != NMT_summary) { 111 return false; 112 } 113 } else if (strcmp(value, "=off") == 0) { 114 if (MemTracker::tracking_level() != NMT_off) { 115 return false; 116 } 117 } else { 118 _is_nmt_env_valid = false; 119 } 120 121 return true; 122 } 123 124 bool MemTracker::verify_nmt_option() { 125 return _is_nmt_env_valid; 126 } 127 128 void* MemTracker::malloc_base(void* memblock) { 129 return MallocTracker::get_base(memblock); 130 } 131 132 void Tracker::record(address addr, size_t size) { 133 if (MemTracker::tracking_level() < NMT_summary) return; 134 switch(_type) { 135 case uncommit: 136 VirtualMemoryTracker::remove_uncommitted_region(addr, size); 137 break; 138 case release: 139 VirtualMemoryTracker::remove_released_region(addr, size); 140 break; 141 default: 142 ShouldNotReachHere(); 143 } 144 } 145 146 147 // Shutdown can only be issued via JCmd, and NMT JCmd is serialized by lock 148 void MemTracker::shutdown() { 149 // We can only shutdown NMT to minimal tracking level if it is ever on. 150 if (tracking_level () > NMT_minimal) { 151 transition_to(NMT_minimal); 152 } 153 } 154 155 bool MemTracker::transition_to(NMT_TrackingLevel level) { 156 NMT_TrackingLevel current_level = tracking_level(); 157 158 assert(level != NMT_off || current_level == NMT_off, "Cannot transition NMT to off"); 159 160 if (current_level == level) { 161 return true; 162 } else if (current_level > level) { 163 // Downgrade tracking level, we want to lower the tracking level first 164 _tracking_level = level; 165 // Make _tracking_level visible immediately. 166 OrderAccess::fence(); 167 VirtualMemoryTracker::transition(current_level, level); 168 MallocTracker::transition(current_level, level); 169 ThreadStackTracker::transition(current_level, level); 170 } else { 171 // Upgrading tracking level is not supported and has never been supported. 172 // Allocating and deallocating malloc tracking structures is not thread safe and 173 // leads to inconsistencies unless a lot coarser locks are added. 174 } 175 return true; 176 } 177 178 179 static volatile bool g_final_report_did_run = false; 180 void MemTracker::final_report(outputStream* output) { 181 // This function is called during both error reporting and normal VM exit. 182 // However, it should only ever run once. E.g. if the VM crashes after 183 // printing the final report during normal VM exit, it should not print 184 // the final report again. In addition, it should be guarded from 185 // recursive calls in case NMT reporting itself crashes. 186 if (Atomic::cmpxchg(true, &g_final_report_did_run, false) == false) { 187 NMT_TrackingLevel level = tracking_level(); 188 if (level >= NMT_summary) { 189 report(level == NMT_summary, output); 190 } 191 } 192 } 193 194 void MemTracker::report(bool summary_only, outputStream* output) { 195 assert(output != NULL, "No output stream"); 196 MemBaseline baseline; 197 if (baseline.baseline(summary_only)) { 198 if (summary_only) { 199 MemSummaryReporter rpt(baseline, output); 200 rpt.report(); 201 } else { 202 MemDetailReporter rpt(baseline, output); 203 rpt.report(); 204 output->print("Metaspace:"); 205 // The basic metaspace report avoids any locking and should be safe to 206 // be called at any time. 207 MetaspaceUtils::print_basic_report(output, K); 208 } 209 } 210 } 211 212 // This is a walker to gather malloc site hashtable statistics, 213 // the result is used for tuning. 214 class StatisticsWalker : public MallocSiteWalker { 215 private: 216 enum Threshold { 217 // aggregates statistics over this threshold into one 218 // line item. 219 report_threshold = 20 220 }; 221 222 private: 223 // Number of allocation sites that have all memory freed 224 int _empty_entries; 225 // Total number of allocation sites, include empty sites 226 int _total_entries; 227 // Number of captured call stack distribution 228 int _stack_depth_distribution[NMT_TrackingStackDepth]; 229 // Hash distribution 230 int _hash_distribution[report_threshold]; 231 // Number of hash buckets that have entries over the threshold 232 int _bucket_over_threshold; 233 234 // The hash bucket that walker is currently walking 235 int _current_hash_bucket; 236 // The length of current hash bucket 237 int _current_bucket_length; 238 // Number of hash buckets that are not empty 239 int _used_buckets; 240 // Longest hash bucket length 241 int _longest_bucket_length; 242 243 public: 244 StatisticsWalker() : _empty_entries(0), _total_entries(0) { 245 int index = 0; 246 for (index = 0; index < NMT_TrackingStackDepth; index ++) { 247 _stack_depth_distribution[index] = 0; 248 } 249 for (index = 0; index < report_threshold; index ++) { 250 _hash_distribution[index] = 0; 251 } 252 _bucket_over_threshold = 0; 253 _longest_bucket_length = 0; 254 _current_hash_bucket = -1; 255 _current_bucket_length = 0; 256 _used_buckets = 0; 257 } 258 259 virtual bool do_malloc_site(const MallocSite* e) { 260 if (e->size() == 0) _empty_entries ++; 261 _total_entries ++; 262 263 // stack depth distrubution 264 int frames = e->call_stack()->frames(); 265 _stack_depth_distribution[frames - 1] ++; 266 267 // hash distribution 268 int hash_bucket = e->hash() % MallocSiteTable::hash_buckets(); 269 if (_current_hash_bucket == -1) { 270 _current_hash_bucket = hash_bucket; 271 _current_bucket_length = 1; 272 } else if (_current_hash_bucket == hash_bucket) { 273 _current_bucket_length ++; 274 } else { 275 record_bucket_length(_current_bucket_length); 276 _current_hash_bucket = hash_bucket; 277 _current_bucket_length = 1; 278 } 279 return true; 280 } 281 282 // walk completed 283 void completed() { 284 record_bucket_length(_current_bucket_length); 285 } 286 287 void report_statistics(outputStream* out) { 288 int index; 289 out->print_cr("Malloc allocation site table:"); 290 out->print_cr("\tTotal entries: %d", _total_entries); 291 out->print_cr("\tEmpty entries: %d (%2.2f%%)", _empty_entries, ((float)_empty_entries * 100) / _total_entries); 292 out->print_cr(" "); 293 out->print_cr("Hash distribution:"); 294 if (_used_buckets < MallocSiteTable::hash_buckets()) { 295 out->print_cr("empty bucket: %d", (MallocSiteTable::hash_buckets() - _used_buckets)); 296 } 297 for (index = 0; index < report_threshold; index ++) { 298 if (_hash_distribution[index] != 0) { 299 if (index == 0) { 300 out->print_cr(" %d entry: %d", 1, _hash_distribution[0]); 301 } else if (index < 9) { // single digit 302 out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]); 303 } else { 304 out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]); 305 } 306 } 307 } 308 if (_bucket_over_threshold > 0) { 309 out->print_cr(" >%d entries: %d", report_threshold, _bucket_over_threshold); 310 } 311 out->print_cr("most entries: %d", _longest_bucket_length); 312 out->print_cr(" "); 313 out->print_cr("Call stack depth distribution:"); 314 for (index = 0; index < NMT_TrackingStackDepth; index ++) { 315 if (_stack_depth_distribution[index] > 0) { 316 out->print_cr("\t%d: %d", index + 1, _stack_depth_distribution[index]); 317 } 318 } 319 } 320 321 private: 322 void record_bucket_length(int length) { 323 _used_buckets ++; 324 if (length <= report_threshold) { 325 _hash_distribution[length - 1] ++; 326 } else { 327 _bucket_over_threshold ++; 328 } 329 _longest_bucket_length = MAX2(_longest_bucket_length, length); 330 } 331 }; 332 333 334 void MemTracker::tuning_statistics(outputStream* out) { 335 // NMT statistics 336 StatisticsWalker walker; 337 MallocSiteTable::walk_malloc_site(&walker); 338 walker.completed(); 339 340 out->print_cr("Native Memory Tracking Statistics:"); 341 out->print_cr("Malloc allocation site table size: %d", MallocSiteTable::hash_buckets()); 342 out->print_cr(" Tracking stack depth: %d", NMT_TrackingStackDepth); 343 NOT_PRODUCT(out->print_cr("Peak concurrent access: %d", MallocSiteTable::access_peak_count());) 344 out->print_cr(" "); 345 walker.report_statistics(out); 346 }