1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_GLOBALS_HPP 26 #define SHARE_VM_RUNTIME_GLOBALS_HPP 27 28 #include "utilities/debug.hpp" 29 #include <float.h> // for DBL_MAX 30 31 // use this for flags that are true per default in the tiered build 32 // but false in non-tiered builds, and vice versa 33 #ifdef TIERED 34 #define trueInTiered true 35 #define falseInTiered false 36 #else 37 #define trueInTiered false 38 #define falseInTiered true 39 #endif 40 41 #ifdef TARGET_ARCH_x86 42 # include "globals_x86.hpp" 43 #endif 44 #ifdef TARGET_ARCH_sparc 45 # include "globals_sparc.hpp" 46 #endif 47 #ifdef TARGET_ARCH_zero 48 # include "globals_zero.hpp" 49 #endif 50 #ifdef TARGET_ARCH_arm 51 # include "globals_arm.hpp" 52 #endif 53 #ifdef TARGET_ARCH_ppc 54 # include "globals_ppc.hpp" 55 #endif 56 #ifdef TARGET_ARCH_aarch64 57 # include "globals_aarch64.hpp" 58 #endif 59 #ifdef TARGET_OS_FAMILY_linux 60 # include "globals_linux.hpp" 61 #endif 62 #ifdef TARGET_OS_FAMILY_solaris 63 # include "globals_solaris.hpp" 64 #endif 65 #ifdef TARGET_OS_FAMILY_windows 66 # include "globals_windows.hpp" 67 #endif 68 #ifdef TARGET_OS_FAMILY_aix 69 # include "globals_aix.hpp" 70 #endif 71 #ifdef TARGET_OS_FAMILY_bsd 72 # include "globals_bsd.hpp" 73 #endif 74 #ifdef TARGET_OS_ARCH_linux_x86 75 # include "globals_linux_x86.hpp" 76 #endif 77 #ifdef TARGET_OS_ARCH_linux_sparc 78 # include "globals_linux_sparc.hpp" 79 #endif 80 #ifdef TARGET_OS_ARCH_linux_zero 81 # include "globals_linux_zero.hpp" 82 #endif 83 #ifdef TARGET_OS_ARCH_solaris_x86 84 # include "globals_solaris_x86.hpp" 85 #endif 86 #ifdef TARGET_OS_ARCH_solaris_sparc 87 # include "globals_solaris_sparc.hpp" 88 #endif 89 #ifdef TARGET_OS_ARCH_windows_x86 90 # include "globals_windows_x86.hpp" 91 #endif 92 #ifdef TARGET_OS_ARCH_linux_arm 93 # include "globals_linux_arm.hpp" 94 #endif 95 #ifdef TARGET_OS_ARCH_linux_ppc 96 # include "globals_linux_ppc.hpp" 97 #endif 98 #ifdef TARGET_OS_ARCH_linux_aarch64 99 # include "globals_linux_aarch64.hpp" 100 #endif 101 #ifdef TARGET_OS_ARCH_aix_ppc 102 # include "globals_aix_ppc.hpp" 103 #endif 104 #ifdef TARGET_OS_ARCH_bsd_x86 105 # include "globals_bsd_x86.hpp" 106 #endif 107 #ifdef TARGET_OS_ARCH_bsd_zero 108 # include "globals_bsd_zero.hpp" 109 #endif 110 #ifdef COMPILER1 111 #ifdef TARGET_ARCH_x86 112 # include "c1_globals_x86.hpp" 113 #endif 114 #ifdef TARGET_ARCH_sparc 115 # include "c1_globals_sparc.hpp" 116 #endif 117 #ifdef TARGET_ARCH_arm 118 # include "c1_globals_arm.hpp" 119 #endif 120 #ifdef TARGET_ARCH_aarch64 121 # include "c1_globals_aarch64.hpp" 122 #endif 123 #ifdef TARGET_OS_FAMILY_linux 124 # include "c1_globals_linux.hpp" 125 #endif 126 #ifdef TARGET_OS_FAMILY_solaris 127 # include "c1_globals_solaris.hpp" 128 #endif 129 #ifdef TARGET_OS_FAMILY_windows 130 # include "c1_globals_windows.hpp" 131 #endif 132 #ifdef TARGET_OS_FAMILY_aix 133 # include "c1_globals_aix.hpp" 134 #endif 135 #ifdef TARGET_OS_FAMILY_bsd 136 # include "c1_globals_bsd.hpp" 137 #endif 138 #ifdef TARGET_ARCH_ppc 139 # include "c1_globals_ppc.hpp" 140 #endif 141 #endif 142 #ifdef COMPILER2 143 #ifdef TARGET_ARCH_x86 144 # include "c2_globals_x86.hpp" 145 #endif 146 #ifdef TARGET_ARCH_sparc 147 # include "c2_globals_sparc.hpp" 148 #endif 149 #ifdef TARGET_ARCH_arm 150 # include "c2_globals_arm.hpp" 151 #endif 152 #ifdef TARGET_ARCH_ppc 153 # include "c2_globals_ppc.hpp" 154 #endif 155 #ifdef TARGET_ARCH_aarch64 156 # include "c2_globals_aarch64.hpp" 157 #endif 158 #ifdef TARGET_OS_FAMILY_linux 159 # include "c2_globals_linux.hpp" 160 #endif 161 #ifdef TARGET_OS_FAMILY_solaris 162 # include "c2_globals_solaris.hpp" 163 #endif 164 #ifdef TARGET_OS_FAMILY_windows 165 # include "c2_globals_windows.hpp" 166 #endif 167 #ifdef TARGET_OS_FAMILY_aix 168 # include "c2_globals_aix.hpp" 169 #endif 170 #ifdef TARGET_OS_FAMILY_bsd 171 # include "c2_globals_bsd.hpp" 172 #endif 173 #endif 174 #ifdef SHARK 175 #ifdef TARGET_ARCH_zero 176 # include "shark_globals_zero.hpp" 177 #endif 178 #endif 179 180 #if !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK) && !INCLUDE_JVMCI 181 define_pd_global(bool, BackgroundCompilation, false); 182 define_pd_global(bool, UseTLAB, false); 183 define_pd_global(bool, CICompileOSR, false); 184 define_pd_global(bool, UseTypeProfile, false); 185 define_pd_global(bool, UseOnStackReplacement, false); 186 define_pd_global(bool, InlineIntrinsics, false); 187 define_pd_global(bool, PreferInterpreterNativeStubs, true); 188 define_pd_global(bool, ProfileInterpreter, false); 189 define_pd_global(bool, ProfileTraps, false); 190 define_pd_global(bool, TieredCompilation, false); 191 192 define_pd_global(intx, CompileThreshold, 0); 193 194 define_pd_global(intx, OnStackReplacePercentage, 0); 195 define_pd_global(bool, ResizeTLAB, false); 196 define_pd_global(intx, FreqInlineSize, 0); 197 define_pd_global(size_t, NewSizeThreadIncrease, 4*K); 198 define_pd_global(intx, InlineClassNatives, true); 199 define_pd_global(intx, InlineUnsafeOps, true); 200 define_pd_global(intx, InitialCodeCacheSize, 160*K); 201 define_pd_global(intx, ReservedCodeCacheSize, 32*M); 202 define_pd_global(intx, NonProfiledCodeHeapSize, 0); 203 define_pd_global(intx, ProfiledCodeHeapSize, 0); 204 define_pd_global(intx, NonNMethodCodeHeapSize, 32*M); 205 206 define_pd_global(intx, CodeCacheExpansionSize, 32*K); 207 define_pd_global(intx, CodeCacheMinBlockLength, 1); 208 define_pd_global(intx, CodeCacheMinimumUseSpace, 200*K); 209 define_pd_global(size_t, MetaspaceSize, ScaleForWordSize(4*M)); 210 define_pd_global(bool, NeverActAsServerClassMachine, true); 211 define_pd_global(uint64_t,MaxRAM, 1ULL*G); 212 #define CI_COMPILER_COUNT 0 213 #else 214 215 #if defined(COMPILER2) || INCLUDE_JVMCI 216 #define CI_COMPILER_COUNT 2 217 #else 218 #define CI_COMPILER_COUNT 1 219 #endif // COMPILER2 || INCLUDE_JVMCI 220 221 #endif // no compilers 222 223 // string type aliases used only in this file 224 typedef const char* ccstr; 225 typedef const char* ccstrlist; // represents string arguments which accumulate 226 227 struct Flag { 228 enum Flags { 229 // value origin 230 DEFAULT = 0, 231 COMMAND_LINE = 1, 232 ENVIRON_VAR = 2, 233 CONFIG_FILE = 3, 234 MANAGEMENT = 4, 235 ERGONOMIC = 5, 236 ATTACH_ON_DEMAND = 6, 237 INTERNAL = 7, 238 239 LAST_VALUE_ORIGIN = INTERNAL, 240 VALUE_ORIGIN_BITS = 4, 241 VALUE_ORIGIN_MASK = right_n_bits(VALUE_ORIGIN_BITS), 242 243 // flag kind 244 KIND_PRODUCT = 1 << 4, 245 KIND_MANAGEABLE = 1 << 5, 246 KIND_DIAGNOSTIC = 1 << 6, 247 KIND_EXPERIMENTAL = 1 << 7, 248 KIND_NOT_PRODUCT = 1 << 8, 249 KIND_DEVELOP = 1 << 9, 250 KIND_PLATFORM_DEPENDENT = 1 << 10, 251 KIND_READ_WRITE = 1 << 11, 252 KIND_C1 = 1 << 12, 253 KIND_C2 = 1 << 13, 254 KIND_ARCH = 1 << 14, 255 KIND_SHARK = 1 << 15, 256 KIND_LP64_PRODUCT = 1 << 16, 257 KIND_COMMERCIAL = 1 << 17, 258 KIND_JVMCI = 1 << 18, 259 260 KIND_MASK = ~VALUE_ORIGIN_MASK 261 }; 262 263 enum Error { 264 // no error 265 SUCCESS = 0, 266 // flag name is missing 267 MISSING_NAME, 268 // flag value is missing 269 MISSING_VALUE, 270 // error parsing the textual form of the value 271 WRONG_FORMAT, 272 // flag is not writeable 273 NON_WRITABLE, 274 // flag value is outside of its bounds 275 OUT_OF_BOUNDS, 276 // flag value violates its constraint 277 VIOLATES_CONSTRAINT, 278 // there is no flag with the given name 279 INVALID_FLAG, 280 // other, unspecified error related to setting the flag 281 ERR_OTHER 282 }; 283 284 enum MsgType { 285 NONE = 0, 286 DIAGNOSTIC_FLAG_BUT_LOCKED, 287 EXPERIMENTAL_FLAG_BUT_LOCKED, 288 DEVELOPER_FLAG_BUT_PRODUCT_BUILD, 289 NOTPRODUCT_FLAG_BUT_PRODUCT_BUILD 290 }; 291 292 const char* _type; 293 const char* _name; 294 void* _addr; 295 NOT_PRODUCT(const char* _doc;) 296 Flags _flags; 297 298 // points to all Flags static array 299 static Flag* flags; 300 301 // number of flags 302 static size_t numFlags; 303 304 static Flag* find_flag(const char* name) { return find_flag(name, strlen(name), true, true); }; 305 static Flag* find_flag(const char* name, size_t length, bool allow_locked = false, bool return_flag = false); 306 static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false); 307 308 void check_writable(); 309 310 bool is_bool() const; 311 bool get_bool() const; 312 void set_bool(bool value); 313 314 bool is_int() const; 315 int get_int() const; 316 void set_int(int value); 317 318 bool is_uint() const; 319 uint get_uint() const; 320 void set_uint(uint value); 321 322 bool is_intx() const; 323 intx get_intx() const; 324 void set_intx(intx value); 325 326 bool is_uintx() const; 327 uintx get_uintx() const; 328 void set_uintx(uintx value); 329 330 bool is_uint64_t() const; 331 uint64_t get_uint64_t() const; 332 void set_uint64_t(uint64_t value); 333 334 bool is_size_t() const; 335 size_t get_size_t() const; 336 void set_size_t(size_t value); 337 338 bool is_double() const; 339 double get_double() const; 340 void set_double(double value); 341 342 bool is_ccstr() const; 343 bool ccstr_accumulates() const; 344 ccstr get_ccstr() const; 345 void set_ccstr(ccstr value); 346 347 Flags get_origin(); 348 void set_origin(Flags origin); 349 350 bool is_default(); 351 bool is_ergonomic(); 352 bool is_command_line(); 353 354 bool is_product() const; 355 bool is_manageable() const; 356 bool is_diagnostic() const; 357 bool is_experimental() const; 358 bool is_notproduct() const; 359 bool is_develop() const; 360 bool is_read_write() const; 361 bool is_commercial() const; 362 363 bool is_constant_in_binary() const; 364 365 bool is_unlocker() const; 366 bool is_unlocked() const; 367 bool is_writeable() const; 368 bool is_external() const; 369 370 bool is_unlocker_ext() const; 371 bool is_unlocked_ext() const; 372 bool is_writeable_ext() const; 373 bool is_external_ext() const; 374 375 void unlock_diagnostic(); 376 377 Flag::MsgType get_locked_message(char*, int) const; 378 void get_locked_message_ext(char*, int) const; 379 380 // printRanges will print out flags type, name and range values as expected by -XX:+PrintFlagsRanges 381 void print_on(outputStream* st, bool withComments = false, bool printRanges = false); 382 void print_kind(outputStream* st); 383 void print_as_flag(outputStream* st); 384 385 static const char* flag_error_str(Flag::Error error); 386 }; 387 388 // debug flags control various aspects of the VM and are global accessible 389 390 // use FlagSetting to temporarily change some debug flag 391 // e.g. FlagSetting fs(DebugThisAndThat, true); 392 // restored to previous value upon leaving scope 393 class FlagSetting { 394 bool val; 395 bool* flag; 396 public: 397 FlagSetting(bool& fl, bool newValue) { flag = &fl; val = fl; fl = newValue; } 398 ~FlagSetting() { *flag = val; } 399 }; 400 401 402 class CounterSetting { 403 intx* counter; 404 public: 405 CounterSetting(intx* cnt) { counter = cnt; (*counter)++; } 406 ~CounterSetting() { (*counter)--; } 407 }; 408 409 class IntFlagSetting { 410 int val; 411 int* flag; 412 public: 413 IntFlagSetting(int& fl, int newValue) { flag = &fl; val = fl; fl = newValue; } 414 ~IntFlagSetting() { *flag = val; } 415 }; 416 417 class UIntFlagSetting { 418 uint val; 419 uint* flag; 420 public: 421 UIntFlagSetting(uint& fl, uint newValue) { flag = &fl; val = fl; fl = newValue; } 422 ~UIntFlagSetting() { *flag = val; } 423 }; 424 425 class UIntXFlagSetting { 426 uintx val; 427 uintx* flag; 428 public: 429 UIntXFlagSetting(uintx& fl, uintx newValue) { flag = &fl; val = fl; fl = newValue; } 430 ~UIntXFlagSetting() { *flag = val; } 431 }; 432 433 class DoubleFlagSetting { 434 double val; 435 double* flag; 436 public: 437 DoubleFlagSetting(double& fl, double newValue) { flag = &fl; val = fl; fl = newValue; } 438 ~DoubleFlagSetting() { *flag = val; } 439 }; 440 441 class SizeTFlagSetting { 442 size_t val; 443 size_t* flag; 444 public: 445 SizeTFlagSetting(size_t& fl, size_t newValue) { flag = &fl; val = fl; fl = newValue; } 446 ~SizeTFlagSetting() { *flag = val; } 447 }; 448 449 450 class CommandLineFlags { 451 public: 452 static Flag::Error boolAt(const char* name, size_t len, bool* value, bool allow_locked = false, bool return_flag = false); 453 static Flag::Error boolAt(const char* name, bool* value, bool allow_locked = false, bool return_flag = false) { return boolAt(name, strlen(name), value, allow_locked, return_flag); } 454 static Flag::Error boolAtPut(Flag* flag, bool* value, Flag::Flags origin); 455 static Flag::Error boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin); 456 static Flag::Error boolAtPut(const char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); } 457 458 static Flag::Error intAt(const char* name, size_t len, int* value, bool allow_locked = false, bool return_flag = false); 459 static Flag::Error intAt(const char* name, int* value, bool allow_locked = false, bool return_flag = false) { return intAt(name, strlen(name), value, allow_locked, return_flag); } 460 static Flag::Error intAtPut(Flag* flag, int* value, Flag::Flags origin); 461 static Flag::Error intAtPut(const char* name, size_t len, int* value, Flag::Flags origin); 462 static Flag::Error intAtPut(const char* name, int* value, Flag::Flags origin) { return intAtPut(name, strlen(name), value, origin); } 463 464 static Flag::Error uintAt(const char* name, size_t len, uint* value, bool allow_locked = false, bool return_flag = false); 465 static Flag::Error uintAt(const char* name, uint* value, bool allow_locked = false, bool return_flag = false) { return uintAt(name, strlen(name), value, allow_locked, return_flag); } 466 static Flag::Error uintAtPut(Flag* flag, uint* value, Flag::Flags origin); 467 static Flag::Error uintAtPut(const char* name, size_t len, uint* value, Flag::Flags origin); 468 static Flag::Error uintAtPut(const char* name, uint* value, Flag::Flags origin) { return uintAtPut(name, strlen(name), value, origin); } 469 470 static Flag::Error intxAt(const char* name, size_t len, intx* value, bool allow_locked = false, bool return_flag = false); 471 static Flag::Error intxAt(const char* name, intx* value, bool allow_locked = false, bool return_flag = false) { return intxAt(name, strlen(name), value, allow_locked, return_flag); } 472 static Flag::Error intxAtPut(Flag* flag, intx* value, Flag::Flags origin); 473 static Flag::Error intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin); 474 static Flag::Error intxAtPut(const char* name, intx* value, Flag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); } 475 476 static Flag::Error uintxAt(const char* name, size_t len, uintx* value, bool allow_locked = false, bool return_flag = false); 477 static Flag::Error uintxAt(const char* name, uintx* value, bool allow_locked = false, bool return_flag = false) { return uintxAt(name, strlen(name), value, allow_locked, return_flag); } 478 static Flag::Error uintxAtPut(Flag* flag, uintx* value, Flag::Flags origin); 479 static Flag::Error uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin); 480 static Flag::Error uintxAtPut(const char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); } 481 482 static Flag::Error size_tAt(const char* name, size_t len, size_t* value, bool allow_locked = false, bool return_flag = false); 483 static Flag::Error size_tAt(const char* name, size_t* value, bool allow_locked = false, bool return_flag = false) { return size_tAt(name, strlen(name), value, allow_locked, return_flag); } 484 static Flag::Error size_tAtPut(Flag* flag, size_t* value, Flag::Flags origin); 485 static Flag::Error size_tAtPut(const char* name, size_t len, size_t* value, Flag::Flags origin); 486 static Flag::Error size_tAtPut(const char* name, size_t* value, Flag::Flags origin) { return size_tAtPut(name, strlen(name), value, origin); } 487 488 static Flag::Error uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked = false, bool return_flag = false); 489 static Flag::Error uint64_tAt(const char* name, uint64_t* value, bool allow_locked = false, bool return_flag = false) { return uint64_tAt(name, strlen(name), value, allow_locked, return_flag); } 490 static Flag::Error uint64_tAtPut(Flag* flag, uint64_t* value, Flag::Flags origin); 491 static Flag::Error uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin); 492 static Flag::Error uint64_tAtPut(const char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); } 493 494 static Flag::Error doubleAt(const char* name, size_t len, double* value, bool allow_locked = false, bool return_flag = false); 495 static Flag::Error doubleAt(const char* name, double* value, bool allow_locked = false, bool return_flag = false) { return doubleAt(name, strlen(name), value, allow_locked, return_flag); } 496 static Flag::Error doubleAtPut(Flag* flag, double* value, Flag::Flags origin); 497 static Flag::Error doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin); 498 static Flag::Error doubleAtPut(const char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); } 499 500 static Flag::Error ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked = false, bool return_flag = false); 501 static Flag::Error ccstrAt(const char* name, ccstr* value, bool allow_locked = false, bool return_flag = false) { return ccstrAt(name, strlen(name), value, allow_locked, return_flag); } 502 // Contract: Flag will make private copy of the incoming value. 503 // Outgoing value is always malloc-ed, and caller MUST call free. 504 static Flag::Error ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin); 505 static Flag::Error ccstrAtPut(const char* name, ccstr* value, Flag::Flags origin) { return ccstrAtPut(name, strlen(name), value, origin); } 506 507 // Returns false if name is not a command line flag. 508 static bool wasSetOnCmdline(const char* name, bool* value); 509 static void printSetFlags(outputStream* out); 510 511 // printRanges will print out flags type, name and range values as expected by -XX:+PrintFlagsRanges 512 static void printFlags(outputStream* out, bool withComments, bool printRanges = false); 513 514 static void verify() PRODUCT_RETURN; 515 }; 516 517 // use this for flags that are true by default in the debug version but 518 // false in the optimized version, and vice versa 519 #ifdef ASSERT 520 #define trueInDebug true 521 #define falseInDebug false 522 #else 523 #define trueInDebug false 524 #define falseInDebug true 525 #endif 526 527 // use this for flags that are true per default in the product build 528 // but false in development builds, and vice versa 529 #ifdef PRODUCT 530 #define trueInProduct true 531 #define falseInProduct false 532 #else 533 #define trueInProduct false 534 #define falseInProduct true 535 #endif 536 537 #ifdef JAVASE_EMBEDDED 538 #define falseInEmbedded false 539 #else 540 #define falseInEmbedded true 541 #endif 542 543 // develop flags are settable / visible only during development and are constant in the PRODUCT version 544 // product flags are always settable / visible 545 // notproduct flags are settable / visible only during development and are not declared in the PRODUCT version 546 547 // A flag must be declared with one of the following types: 548 // bool, int, uint, intx, uintx, size_t, ccstr, double, or uint64_t. 549 // The type "ccstr" is an alias for "const char*" and is used 550 // only in this file, because the macrology requires single-token type names. 551 552 // Note: Diagnostic options not meant for VM tuning or for product modes. 553 // They are to be used for VM quality assurance or field diagnosis 554 // of VM bugs. They are hidden so that users will not be encouraged to 555 // try them as if they were VM ordinary execution options. However, they 556 // are available in the product version of the VM. Under instruction 557 // from support engineers, VM customers can turn them on to collect 558 // diagnostic information about VM problems. To use a VM diagnostic 559 // option, you must first specify +UnlockDiagnosticVMOptions. 560 // (This master switch also affects the behavior of -Xprintflags.) 561 // 562 // experimental flags are in support of features that are not 563 // part of the officially supported product, but are available 564 // for experimenting with. They could, for example, be performance 565 // features that may not have undergone full or rigorous QA, but which may 566 // help performance in some cases and released for experimentation 567 // by the community of users and developers. This flag also allows one to 568 // be able to build a fully supported product that nonetheless also 569 // ships with some unsupported, lightly tested, experimental features. 570 // Like the UnlockDiagnosticVMOptions flag above, there is a corresponding 571 // UnlockExperimentalVMOptions flag, which allows the control and 572 // modification of the experimental flags. 573 // 574 // Nota bene: neither diagnostic nor experimental options should be used casually, 575 // and they are not supported on production loads, except under explicit 576 // direction from support engineers. 577 // 578 // manageable flags are writeable external product flags. 579 // They are dynamically writeable through the JDK management interface 580 // (com.sun.management.HotSpotDiagnosticMXBean API) and also through JConsole. 581 // These flags are external exported interface (see CCC). The list of 582 // manageable flags can be queried programmatically through the management 583 // interface. 584 // 585 // A flag can be made as "manageable" only if 586 // - the flag is defined in a CCC as an external exported interface. 587 // - the VM implementation supports dynamic setting of the flag. 588 // This implies that the VM must *always* query the flag variable 589 // and not reuse state related to the flag state at any given time. 590 // - you want the flag to be queried programmatically by the customers. 591 // 592 // product_rw flags are writeable internal product flags. 593 // They are like "manageable" flags but for internal/private use. 594 // The list of product_rw flags are internal/private flags which 595 // may be changed/removed in a future release. It can be set 596 // through the management interface to get/set value 597 // when the name of flag is supplied. 598 // 599 // A flag can be made as "product_rw" only if 600 // - the VM implementation supports dynamic setting of the flag. 601 // This implies that the VM must *always* query the flag variable 602 // and not reuse state related to the flag state at any given time. 603 // 604 // Note that when there is a need to support develop flags to be writeable, 605 // it can be done in the same way as product_rw. 606 // 607 // range is a macro that will expand to min and max arguments for range 608 // checking code if provided - see commandLineFlagRangeList.hpp 609 // 610 // constraint is a macro that will expand to custom function call 611 // for constraint checking if provided - see commandLineFlagConstraintList.hpp 612 // 613 614 #define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw, lp64_product, range, constraint) \ 615 \ 616 lp64_product(bool, UseCompressedOops, false, \ 617 "Use 32-bit object references in 64-bit VM. " \ 618 "lp64_product means flag is always constant in 32 bit VM") \ 619 \ 620 lp64_product(bool, UseCompressedClassPointers, false, \ 621 "Use 32-bit class pointers in 64-bit VM. " \ 622 "lp64_product means flag is always constant in 32 bit VM") \ 623 \ 624 notproduct(bool, CheckCompressedOops, true, \ 625 "Generate checks in encoding/decoding code in debug VM") \ 626 \ 627 product(uintx, HeapSearchSteps, 3 PPC64_ONLY(+17), \ 628 "Heap allocation steps through preferred address regions to find" \ 629 " where it can allocate the heap. Number of steps to take per " \ 630 "region.") \ 631 range(1, max_uintx) \ 632 \ 633 diagnostic(bool, PrintCompressedOopsMode, false, \ 634 "Print compressed oops base address and encoding mode") \ 635 \ 636 lp64_product(intx, ObjectAlignmentInBytes, 8, \ 637 "Default object alignment in bytes, 8 is minimum") \ 638 range(8, 256) \ 639 constraint(ObjectAlignmentInBytesConstraintFunc,AtParse) \ 640 \ 641 product(bool, AssumeMP, false, \ 642 "Instruct the VM to assume multiple processors are available") \ 643 \ 644 /* UseMembar is theoretically a temp flag used for memory barrier */ \ 645 /* removal testing. It was supposed to be removed before FCS but has */ \ 646 /* been re-added (see 6401008) */ \ 647 product_pd(bool, UseMembar, \ 648 "(Unstable) Issues membars on thread state transitions") \ 649 \ 650 develop(bool, CleanChunkPoolAsync, falseInEmbedded, \ 651 "Clean the chunk pool asynchronously") \ 652 \ 653 experimental(bool, AlwaysSafeConstructors, false, \ 654 "Force safe construction, as if all fields are final.") \ 655 \ 656 diagnostic(bool, UnlockDiagnosticVMOptions, trueInDebug, \ 657 "Enable normal processing of flags relating to field diagnostics")\ 658 \ 659 experimental(bool, UnlockExperimentalVMOptions, false, \ 660 "Enable normal processing of flags relating to experimental " \ 661 "features") \ 662 \ 663 product(bool, JavaMonitorsInStackTrace, true, \ 664 "Print information about Java monitor locks when the stacks are" \ 665 "dumped") \ 666 \ 667 product_pd(bool, UseLargePages, \ 668 "Use large page memory") \ 669 \ 670 product_pd(bool, UseLargePagesIndividualAllocation, \ 671 "Allocate large pages individually for better affinity") \ 672 \ 673 develop(bool, LargePagesIndividualAllocationInjectError, false, \ 674 "Fail large pages individual allocation") \ 675 \ 676 product(bool, UseLargePagesInMetaspace, false, \ 677 "Use large page memory in metaspace. " \ 678 "Only used if UseLargePages is enabled.") \ 679 \ 680 develop(bool, TracePageSizes, false, \ 681 "Trace page size selection and usage") \ 682 \ 683 product(bool, UseNUMA, false, \ 684 "Use NUMA if available") \ 685 \ 686 product(bool, UseNUMAInterleaving, false, \ 687 "Interleave memory across NUMA nodes if available") \ 688 \ 689 product(size_t, NUMAInterleaveGranularity, 2*M, \ 690 "Granularity to use for NUMA interleaving on Windows OS") \ 691 range(os::vm_allocation_granularity(), NOT_LP64(2*G) LP64_ONLY(8192*G)) \ 692 \ 693 product(bool, ForceNUMA, false, \ 694 "Force NUMA optimizations on single-node/UMA systems") \ 695 \ 696 product(uintx, NUMAChunkResizeWeight, 20, \ 697 "Percentage (0-100) used to weight the current sample when " \ 698 "computing exponentially decaying average for " \ 699 "AdaptiveNUMAChunkSizing") \ 700 range(0, 100) \ 701 \ 702 product(size_t, NUMASpaceResizeRate, 1*G, \ 703 "Do not reallocate more than this amount per collection") \ 704 range(0, max_uintx) \ 705 \ 706 product(bool, UseAdaptiveNUMAChunkSizing, true, \ 707 "Enable adaptive chunk sizing for NUMA") \ 708 \ 709 product(bool, NUMAStats, false, \ 710 "Print NUMA stats in detailed heap information") \ 711 \ 712 product(uintx, NUMAPageScanRate, 256, \ 713 "Maximum number of pages to include in the page scan procedure") \ 714 range(0, max_uintx) \ 715 \ 716 product_pd(bool, NeedsDeoptSuspend, \ 717 "True for register window machines (sparc/ia64)") \ 718 \ 719 product(intx, UseSSE, 99, \ 720 "Highest supported SSE instructions set on x86/x64") \ 721 range(0, 99) \ 722 \ 723 product(bool, UseAES, false, \ 724 "Control whether AES instructions can be used on x86/x64") \ 725 \ 726 product(bool, UseSHA, false, \ 727 "Control whether SHA instructions can be used " \ 728 "on SPARC and on ARM") \ 729 \ 730 product(bool, UseGHASHIntrinsics, false, \ 731 "Use intrinsics for GHASH versions of crypto") \ 732 \ 733 product(size_t, LargePageSizeInBytes, 0, \ 734 "Large page size (0 to let VM choose the page size)") \ 735 range(0, max_uintx) \ 736 \ 737 product(size_t, LargePageHeapSizeThreshold, 128*M, \ 738 "Use large pages if maximum heap is at least this big") \ 739 range(0, max_uintx) \ 740 \ 741 product(bool, ForceTimeHighResolution, false, \ 742 "Using high time resolution (for Win32 only)") \ 743 \ 744 develop(bool, TracePcPatching, false, \ 745 "Trace usage of frame::patch_pc") \ 746 \ 747 develop(bool, TraceJumps, false, \ 748 "Trace assembly jumps in thread ring buffer") \ 749 \ 750 develop(bool, TraceRelocator, false, \ 751 "Trace the bytecode relocator") \ 752 \ 753 develop(bool, TraceLongCompiles, false, \ 754 "Print out every time compilation is longer than " \ 755 "a given threshold") \ 756 \ 757 develop(bool, SafepointALot, false, \ 758 "Generate a lot of safepoints. This works with " \ 759 "GuaranteedSafepointInterval") \ 760 \ 761 product_pd(bool, BackgroundCompilation, \ 762 "A thread requesting compilation is not blocked during " \ 763 "compilation") \ 764 \ 765 product(bool, PrintVMQWaitTime, false, \ 766 "Print out the waiting time in VM operation queue") \ 767 \ 768 develop(bool, TraceOopMapGeneration, false, \ 769 "Show OopMapGeneration") \ 770 \ 771 product(bool, MethodFlushing, true, \ 772 "Reclamation of zombie and not-entrant methods") \ 773 \ 774 develop(bool, VerifyStack, false, \ 775 "Verify stack of each thread when it is entering a runtime call") \ 776 \ 777 diagnostic(bool, ForceUnreachable, false, \ 778 "Make all non code cache addresses to be unreachable by " \ 779 "forcing use of 64bit literal fixups") \ 780 \ 781 notproduct(bool, StressDerivedPointers, false, \ 782 "Force scavenge when a derived pointer is detected on stack " \ 783 "after rtm call") \ 784 \ 785 develop(bool, TraceDerivedPointers, false, \ 786 "Trace traversal of derived pointers on stack") \ 787 \ 788 notproduct(bool, TraceCodeBlobStacks, false, \ 789 "Trace stack-walk of codeblobs") \ 790 \ 791 product(bool, PrintJNIResolving, false, \ 792 "Used to implement -v:jni") \ 793 \ 794 notproduct(bool, PrintRewrites, false, \ 795 "Print methods that are being rewritten") \ 796 \ 797 product(bool, UseInlineCaches, true, \ 798 "Use Inline Caches for virtual calls ") \ 799 \ 800 develop(bool, InlineArrayCopy, true, \ 801 "Inline arraycopy native that is known to be part of " \ 802 "base library DLL") \ 803 \ 804 develop(bool, InlineObjectHash, true, \ 805 "Inline Object::hashCode() native that is known to be part " \ 806 "of base library DLL") \ 807 \ 808 develop(bool, InlineNatives, true, \ 809 "Inline natives that are known to be part of base library DLL") \ 810 \ 811 develop(bool, InlineMathNatives, true, \ 812 "Inline SinD, CosD, etc.") \ 813 \ 814 develop(bool, InlineClassNatives, true, \ 815 "Inline Class.isInstance, etc") \ 816 \ 817 develop(bool, InlineThreadNatives, true, \ 818 "Inline Thread.currentThread, etc") \ 819 \ 820 develop(bool, InlineUnsafeOps, true, \ 821 "Inline memory ops (native methods) from Unsafe") \ 822 \ 823 product(bool, CriticalJNINatives, true, \ 824 "Check for critical JNI entry points") \ 825 \ 826 notproduct(bool, StressCriticalJNINatives, false, \ 827 "Exercise register saving code in critical natives") \ 828 \ 829 product(bool, UseAESIntrinsics, false, \ 830 "Use intrinsics for AES versions of crypto") \ 831 \ 832 product(bool, UseAESCTRIntrinsics, false, \ 833 "Use intrinsics for the paralleled version of AES/CTR crypto") \ 834 \ 835 product(bool, UseSHA1Intrinsics, false, \ 836 "Use intrinsics for SHA-1 crypto hash function. " \ 837 "Requires that UseSHA is enabled.") \ 838 \ 839 product(bool, UseSHA256Intrinsics, false, \ 840 "Use intrinsics for SHA-224 and SHA-256 crypto hash functions. " \ 841 "Requires that UseSHA is enabled.") \ 842 \ 843 product(bool, UseSHA512Intrinsics, false, \ 844 "Use intrinsics for SHA-384 and SHA-512 crypto hash functions. " \ 845 "Requires that UseSHA is enabled.") \ 846 \ 847 product(bool, UseCRC32Intrinsics, false, \ 848 "use intrinsics for java.util.zip.CRC32") \ 849 \ 850 product(bool, UseCRC32CIntrinsics, false, \ 851 "use intrinsics for java.util.zip.CRC32C") \ 852 \ 853 product(bool, UseAdler32Intrinsics, false, \ 854 "use intrinsics for java.util.zip.Adler32") \ 855 \ 856 product(bool, UseVectorizedMismatchIntrinsic, false, \ 857 "Enables intrinsification of ArraysSupport.vectorizedMismatch()") \ 858 \ 859 diagnostic(ccstrlist, DisableIntrinsic, "", \ 860 "do not expand intrinsics whose (internal) names appear here") \ 861 \ 862 develop(bool, TraceCallFixup, false, \ 863 "Trace all call fixups") \ 864 \ 865 develop(bool, DeoptimizeALot, false, \ 866 "Deoptimize at every exit from the runtime system") \ 867 \ 868 notproduct(ccstrlist, DeoptimizeOnlyAt, "", \ 869 "A comma separated list of bcis to deoptimize at") \ 870 \ 871 product(bool, DeoptimizeRandom, false, \ 872 "Deoptimize random frames on random exit from the runtime system")\ 873 \ 874 notproduct(bool, ZombieALot, false, \ 875 "Create zombies (non-entrant) at exit from the runtime system") \ 876 \ 877 product(bool, UnlinkSymbolsALot, false, \ 878 "Unlink unreferenced symbols from the symbol table at safepoints")\ 879 \ 880 notproduct(bool, WalkStackALot, false, \ 881 "Trace stack (no print) at every exit from the runtime system") \ 882 \ 883 product(bool, Debugging, false, \ 884 "Set when executing debug methods in debug.cpp " \ 885 "(to prevent triggering assertions)") \ 886 \ 887 notproduct(bool, StrictSafepointChecks, trueInDebug, \ 888 "Enable strict checks that safepoints cannot happen for threads " \ 889 "that use NoSafepointVerifier") \ 890 \ 891 notproduct(bool, VerifyLastFrame, false, \ 892 "Verify oops on last frame on entry to VM") \ 893 \ 894 develop(bool, TraceHandleAllocation, false, \ 895 "Print out warnings when suspiciously many handles are allocated")\ 896 \ 897 product(bool, FailOverToOldVerifier, true, \ 898 "Fail over to old verifier when split verifier fails") \ 899 \ 900 develop(bool, ShowSafepointMsgs, false, \ 901 "Show message about safepoint synchronization") \ 902 \ 903 product(bool, SafepointTimeout, false, \ 904 "Time out and warn or fail after SafepointTimeoutDelay " \ 905 "milliseconds if failed to reach safepoint") \ 906 \ 907 develop(bool, DieOnSafepointTimeout, false, \ 908 "Die upon failure to reach safepoint (see SafepointTimeout)") \ 909 \ 910 /* 50 retries * (5 * current_retry_count) millis = ~6.375 seconds */ \ 911 /* typically, at most a few retries are needed */ \ 912 product(intx, SuspendRetryCount, 50, \ 913 "Maximum retry count for an external suspend request") \ 914 range(0, max_intx) \ 915 \ 916 product(intx, SuspendRetryDelay, 5, \ 917 "Milliseconds to delay per retry (* current_retry_count)") \ 918 range(0, max_intx) \ 919 \ 920 product(bool, AssertOnSuspendWaitFailure, false, \ 921 "Assert/Guarantee on external suspend wait failure") \ 922 \ 923 product(bool, TraceSuspendWaitFailures, false, \ 924 "Trace external suspend wait failures") \ 925 \ 926 product(bool, MaxFDLimit, true, \ 927 "Bump the number of file descriptors to maximum in Solaris") \ 928 \ 929 diagnostic(bool, LogEvents, true, \ 930 "Enable the various ring buffer event logs") \ 931 \ 932 diagnostic(uintx, LogEventsBufferEntries, 10, \ 933 "Number of ring buffer event logs") \ 934 range(1, NOT_LP64(1*K) LP64_ONLY(1*M)) \ 935 \ 936 product(bool, BytecodeVerificationRemote, true, \ 937 "Enable the Java bytecode verifier for remote classes") \ 938 \ 939 product(bool, BytecodeVerificationLocal, false, \ 940 "Enable the Java bytecode verifier for local classes") \ 941 \ 942 develop(bool, ForceFloatExceptions, trueInDebug, \ 943 "Force exceptions on FP stack under/overflow") \ 944 \ 945 develop(bool, VerifyStackAtCalls, false, \ 946 "Verify that the stack pointer is unchanged after calls") \ 947 \ 948 develop(bool, TraceJavaAssertions, false, \ 949 "Trace java language assertions") \ 950 \ 951 notproduct(bool, CheckAssertionStatusDirectives, false, \ 952 "Temporary - see javaClasses.cpp") \ 953 \ 954 notproduct(bool, PrintMallocFree, false, \ 955 "Trace calls to C heap malloc/free allocation") \ 956 \ 957 product(bool, PrintOopAddress, false, \ 958 "Always print the location of the oop") \ 959 \ 960 notproduct(bool, VerifyCodeCache, false, \ 961 "Verify code cache on memory allocation/deallocation") \ 962 \ 963 develop(bool, UseMallocOnly, false, \ 964 "Use only malloc/free for allocation (no resource area/arena)") \ 965 \ 966 develop(bool, PrintMalloc, false, \ 967 "Print all malloc/free calls") \ 968 \ 969 develop(bool, PrintMallocStatistics, false, \ 970 "Print malloc/free statistics") \ 971 \ 972 develop(bool, ZapResourceArea, trueInDebug, \ 973 "Zap freed resource/arena space with 0xABABABAB") \ 974 \ 975 notproduct(bool, ZapVMHandleArea, trueInDebug, \ 976 "Zap freed VM handle space with 0xBCBCBCBC") \ 977 \ 978 develop(bool, ZapJNIHandleArea, trueInDebug, \ 979 "Zap freed JNI handle space with 0xFEFEFEFE") \ 980 \ 981 notproduct(bool, ZapStackSegments, trueInDebug, \ 982 "Zap allocated/freed stack segments with 0xFADFADED") \ 983 \ 984 develop(bool, ZapUnusedHeapArea, trueInDebug, \ 985 "Zap unused heap space with 0xBAADBABE") \ 986 \ 987 develop(bool, CheckZapUnusedHeapArea, false, \ 988 "Check zapping of unused heap space") \ 989 \ 990 develop(bool, ZapFillerObjects, trueInDebug, \ 991 "Zap filler objects with 0xDEAFBABE") \ 992 \ 993 develop(bool, PrintVMMessages, true, \ 994 "Print VM messages on console") \ 995 \ 996 diagnostic(bool, VerboseVerification, false, \ 997 "Display detailed verification details") \ 998 \ 999 notproduct(uintx, ErrorHandlerTest, 0, \ 1000 "If > 0, provokes an error after VM initialization; the value " \ 1001 "determines which error to provoke. See test_error_handler() " \ 1002 "in debug.cpp.") \ 1003 \ 1004 notproduct(uintx, TestCrashInErrorHandler, 0, \ 1005 "If > 0, provokes an error inside VM error handler (a secondary " \ 1006 "crash). see test_error_handler() in debug.cpp.") \ 1007 \ 1008 notproduct(bool, TestSafeFetchInErrorHandler, false, \ 1009 "If true, tests SafeFetch inside error handler.") \ 1010 \ 1011 develop(bool, Verbose, false, \ 1012 "Print additional debugging information from other modes") \ 1013 \ 1014 develop(bool, PrintMiscellaneous, false, \ 1015 "Print uncategorized debugging information (requires +Verbose)") \ 1016 \ 1017 develop(bool, WizardMode, false, \ 1018 "Print much more debugging information") \ 1019 \ 1020 product(bool, ShowMessageBoxOnError, false, \ 1021 "Keep process alive on VM fatal error") \ 1022 \ 1023 product(bool, CreateCoredumpOnCrash, true, \ 1024 "Create core/mini dump on VM fatal error") \ 1025 \ 1026 product(uint64_t, ErrorLogTimeout, 2 * 60, \ 1027 "Timeout, in seconds, to limit the time spent on writing an " \ 1028 "error log in case of a crash.") \ 1029 range(0, (uint64_t)max_jlong/1000) \ 1030 \ 1031 product_pd(bool, UseOSErrorReporting, \ 1032 "Let VM fatal error propagate to the OS (ie. WER on Windows)") \ 1033 \ 1034 product(bool, SuppressFatalErrorMessage, false, \ 1035 "Report NO fatal error message (avoid deadlock)") \ 1036 \ 1037 product(ccstrlist, OnError, "", \ 1038 "Run user-defined commands on fatal error; see VMError.cpp " \ 1039 "for examples") \ 1040 \ 1041 product(ccstrlist, OnOutOfMemoryError, "", \ 1042 "Run user-defined commands on first java.lang.OutOfMemoryError") \ 1043 \ 1044 manageable(bool, HeapDumpBeforeFullGC, false, \ 1045 "Dump heap to file before any major stop-the-world GC") \ 1046 \ 1047 manageable(bool, HeapDumpAfterFullGC, false, \ 1048 "Dump heap to file after any major stop-the-world GC") \ 1049 \ 1050 manageable(bool, HeapDumpOnOutOfMemoryError, false, \ 1051 "Dump heap to file when java.lang.OutOfMemoryError is thrown") \ 1052 \ 1053 manageable(ccstr, HeapDumpPath, NULL, \ 1054 "When HeapDumpOnOutOfMemoryError is on, the path (filename or " \ 1055 "directory) of the dump file (defaults to java_pid<pid>.hprof " \ 1056 "in the working directory)") \ 1057 \ 1058 develop(size_t, SegmentedHeapDumpThreshold, 2*G, \ 1059 "Generate a segmented heap dump (JAVA PROFILE 1.0.2 format) " \ 1060 "when the heap usage is larger than this") \ 1061 \ 1062 develop(size_t, HeapDumpSegmentSize, 1*G, \ 1063 "Approximate segment size when generating a segmented heap dump") \ 1064 \ 1065 develop(bool, BreakAtWarning, false, \ 1066 "Execute breakpoint upon encountering VM warning") \ 1067 \ 1068 develop(bool, UseFakeTimers, false, \ 1069 "Tell whether the VM should use system time or a fake timer") \ 1070 \ 1071 product(ccstr, NativeMemoryTracking, "off", \ 1072 "Native memory tracking options") \ 1073 \ 1074 diagnostic(bool, PrintNMTStatistics, false, \ 1075 "Print native memory tracking summary data if it is on") \ 1076 \ 1077 diagnostic(bool, LogCompilation, false, \ 1078 "Log compilation activity in detail to LogFile") \ 1079 \ 1080 product(bool, PrintCompilation, false, \ 1081 "Print compilations") \ 1082 \ 1083 diagnostic(bool, TraceNMethodInstalls, false, \ 1084 "Trace nmethod installation") \ 1085 \ 1086 diagnostic(intx, ScavengeRootsInCode, 2, \ 1087 "0: do not allow scavengable oops in the code cache; " \ 1088 "1: allow scavenging from the code cache; " \ 1089 "2: emit as many constants as the compiler can see") \ 1090 range(0, 2) \ 1091 \ 1092 product(bool, AlwaysRestoreFPU, false, \ 1093 "Restore the FPU control word after every JNI call (expensive)") \ 1094 \ 1095 diagnostic(bool, PrintCompilation2, false, \ 1096 "Print additional statistics per compilation") \ 1097 \ 1098 diagnostic(bool, PrintAdapterHandlers, false, \ 1099 "Print code generated for i2c/c2i adapters") \ 1100 \ 1101 diagnostic(bool, VerifyAdapterCalls, trueInDebug, \ 1102 "Verify that i2c/c2i adapters are called properly") \ 1103 \ 1104 develop(bool, VerifyAdapterSharing, false, \ 1105 "Verify that the code for shared adapters is the equivalent") \ 1106 \ 1107 diagnostic(bool, PrintAssembly, false, \ 1108 "Print assembly code (using external disassembler.so)") \ 1109 \ 1110 diagnostic(ccstr, PrintAssemblyOptions, NULL, \ 1111 "Print options string passed to disassembler.so") \ 1112 \ 1113 notproduct(bool, PrintNMethodStatistics, false, \ 1114 "Print a summary statistic for the generated nmethods") \ 1115 \ 1116 diagnostic(bool, PrintNMethods, false, \ 1117 "Print assembly code for nmethods when generated") \ 1118 \ 1119 diagnostic(bool, PrintNativeNMethods, false, \ 1120 "Print assembly code for native nmethods when generated") \ 1121 \ 1122 develop(bool, PrintDebugInfo, false, \ 1123 "Print debug information for all nmethods when generated") \ 1124 \ 1125 develop(bool, PrintRelocations, false, \ 1126 "Print relocation information for all nmethods when generated") \ 1127 \ 1128 develop(bool, PrintDependencies, false, \ 1129 "Print dependency information for all nmethods when generated") \ 1130 \ 1131 develop(bool, PrintExceptionHandlers, false, \ 1132 "Print exception handler tables for all nmethods when generated") \ 1133 \ 1134 develop(bool, StressCompiledExceptionHandlers, false, \ 1135 "Exercise compiled exception handlers") \ 1136 \ 1137 develop(bool, InterceptOSException, false, \ 1138 "Start debugger when an implicit OS (e.g. NULL) " \ 1139 "exception happens") \ 1140 \ 1141 product(bool, PrintCodeCache, false, \ 1142 "Print the code cache memory usage when exiting") \ 1143 \ 1144 develop(bool, PrintCodeCache2, false, \ 1145 "Print detailed usage information on the code cache when exiting")\ 1146 \ 1147 product(bool, PrintCodeCacheOnCompilation, false, \ 1148 "Print the code cache memory usage each time a method is " \ 1149 "compiled") \ 1150 \ 1151 diagnostic(bool, PrintStubCode, false, \ 1152 "Print generated stub code") \ 1153 \ 1154 product(bool, StackTraceInThrowable, true, \ 1155 "Collect backtrace in throwable when exception happens") \ 1156 \ 1157 product(bool, OmitStackTraceInFastThrow, true, \ 1158 "Omit backtraces for some 'hot' exceptions in optimized code") \ 1159 \ 1160 product(bool, ProfilerPrintByteCodeStatistics, false, \ 1161 "Print bytecode statistics when dumping profiler output") \ 1162 \ 1163 product(bool, ProfilerRecordPC, false, \ 1164 "Collect ticks for each 16 byte interval of compiled code") \ 1165 \ 1166 product(bool, ProfileVM, false, \ 1167 "Profile ticks that fall within VM (either in the VM Thread " \ 1168 "or VM code called through stubs)") \ 1169 \ 1170 product(bool, ProfileIntervals, false, \ 1171 "Print profiles for each interval (see ProfileIntervalsTicks)") \ 1172 \ 1173 notproduct(bool, ProfilerCheckIntervals, false, \ 1174 "Collect and print information on spacing of profiler ticks") \ 1175 \ 1176 product(bool, PrintWarnings, true, \ 1177 "Print JVM warnings to output stream") \ 1178 \ 1179 notproduct(uintx, WarnOnStalledSpinLock, 0, \ 1180 "Print warnings for stalled SpinLocks") \ 1181 \ 1182 product(bool, RegisterFinalizersAtInit, true, \ 1183 "Register finalizable objects at end of Object.<init> or " \ 1184 "after allocation") \ 1185 \ 1186 develop(bool, RegisterReferences, true, \ 1187 "Tell whether the VM should register soft/weak/final/phantom " \ 1188 "references") \ 1189 \ 1190 develop(bool, IgnoreRewrites, false, \ 1191 "Suppress rewrites of bytecodes in the oopmap generator. " \ 1192 "This is unsafe!") \ 1193 \ 1194 develop(bool, PrintCodeCacheExtension, false, \ 1195 "Print extension of code cache") \ 1196 \ 1197 develop(bool, UsePrivilegedStack, true, \ 1198 "Enable the security JVM functions") \ 1199 \ 1200 develop(bool, ProtectionDomainVerification, true, \ 1201 "Verify protection domain before resolution in system dictionary")\ 1202 \ 1203 product(bool, ClassUnloading, true, \ 1204 "Do unloading of classes") \ 1205 \ 1206 product(bool, ClassUnloadingWithConcurrentMark, true, \ 1207 "Do unloading of classes with a concurrent marking cycle") \ 1208 \ 1209 develop(bool, DisableStartThread, false, \ 1210 "Disable starting of additional Java threads " \ 1211 "(for debugging only)") \ 1212 \ 1213 develop(bool, MemProfiling, false, \ 1214 "Write memory usage profiling to log file") \ 1215 \ 1216 notproduct(bool, PrintSystemDictionaryAtExit, false, \ 1217 "Print the system dictionary at exit") \ 1218 \ 1219 experimental(intx, PredictedLoadedClassCount, 0, \ 1220 "Experimental: Tune loaded class cache starting size") \ 1221 \ 1222 diagnostic(bool, UnsyncloadClass, false, \ 1223 "Unstable: VM calls loadClass unsynchronized. Custom " \ 1224 "class loader must call VM synchronized for findClass " \ 1225 "and defineClass.") \ 1226 \ 1227 product(bool, AlwaysLockClassLoader, false, \ 1228 "Require the VM to acquire the class loader lock before calling " \ 1229 "loadClass() even for class loaders registering " \ 1230 "as parallel capable") \ 1231 \ 1232 product(bool, AllowParallelDefineClass, false, \ 1233 "Allow parallel defineClass requests for class loaders " \ 1234 "registering as parallel capable") \ 1235 \ 1236 product(bool, MustCallLoadClassInternal, false, \ 1237 "Call loadClassInternal() rather than loadClass()") \ 1238 \ 1239 product_pd(bool, DontYieldALot, \ 1240 "Throw away obvious excess yield calls") \ 1241 \ 1242 product(bool, ConvertSleepToYield, true, \ 1243 "Convert sleep(0) to thread yield ") \ 1244 \ 1245 product(bool, ConvertYieldToSleep, false, \ 1246 "Convert yield to a sleep of MinSleepInterval to simulate Win32 " \ 1247 "behavior") \ 1248 \ 1249 develop(bool, UseDetachedThreads, true, \ 1250 "Use detached threads that are recycled upon termination " \ 1251 "(for Solaris only)") \ 1252 \ 1253 product(bool, UseLWPSynchronization, true, \ 1254 "Use LWP-based instead of libthread-based synchronization " \ 1255 "(SPARC only)") \ 1256 \ 1257 experimental(ccstr, SyncKnobs, NULL, \ 1258 "(Unstable) Various monitor synchronization tunables") \ 1259 \ 1260 experimental(intx, EmitSync, 0, \ 1261 "(Unsafe, Unstable) " \ 1262 "Control emission of inline sync fast-path code") \ 1263 \ 1264 product(intx, MonitorBound, 0, "Bound Monitor population") \ 1265 range(0, max_jint) \ 1266 \ 1267 product(bool, MonitorInUseLists, false, "Track Monitors for Deflation") \ 1268 \ 1269 experimental(intx, SyncFlags, 0, "(Unsafe, Unstable) " \ 1270 "Experimental Sync flags") \ 1271 \ 1272 experimental(intx, SyncVerbose, 0, "(Unstable)") \ 1273 \ 1274 diagnostic(bool, InlineNotify, true, "intrinsify subset of notify") \ 1275 \ 1276 experimental(intx, ClearFPUAtPark, 0, "(Unsafe, Unstable)") \ 1277 \ 1278 experimental(intx, hashCode, 5, \ 1279 "(Unstable) select hashCode generation algorithm") \ 1280 \ 1281 product(bool, FilterSpuriousWakeups, true, \ 1282 "When true prevents OS-level spurious, or premature, wakeups " \ 1283 "from Object.wait (Ignored for Windows)") \ 1284 \ 1285 experimental(intx, NativeMonitorTimeout, -1, "(Unstable)") \ 1286 \ 1287 experimental(intx, NativeMonitorFlags, 0, "(Unstable)") \ 1288 \ 1289 experimental(intx, NativeMonitorSpinLimit, 20, "(Unstable)") \ 1290 \ 1291 develop(bool, UsePthreads, false, \ 1292 "Use pthread-based instead of libthread-based synchronization " \ 1293 "(SPARC only)") \ 1294 \ 1295 product(bool, ReduceSignalUsage, false, \ 1296 "Reduce the use of OS signals in Java and/or the VM") \ 1297 \ 1298 develop_pd(bool, ShareVtableStubs, \ 1299 "Share vtable stubs (smaller code but worse branch prediction") \ 1300 \ 1301 develop(bool, LoadLineNumberTables, true, \ 1302 "Tell whether the class file parser loads line number tables") \ 1303 \ 1304 develop(bool, LoadLocalVariableTables, true, \ 1305 "Tell whether the class file parser loads local variable tables") \ 1306 \ 1307 develop(bool, LoadLocalVariableTypeTables, true, \ 1308 "Tell whether the class file parser loads local variable type" \ 1309 "tables") \ 1310 \ 1311 product(bool, AllowUserSignalHandlers, false, \ 1312 "Do not complain if the application installs signal handlers " \ 1313 "(Solaris & Linux only)") \ 1314 \ 1315 product(bool, UseSignalChaining, true, \ 1316 "Use signal-chaining to invoke signal handlers installed " \ 1317 "by the application (Solaris & Linux only)") \ 1318 \ 1319 product(bool, AllowJNIEnvProxy, false, \ 1320 "Allow JNIEnv proxies for jdbx") \ 1321 \ 1322 product(bool, RestoreMXCSROnJNICalls, false, \ 1323 "Restore MXCSR when returning from JNI calls") \ 1324 \ 1325 product(bool, CheckJNICalls, false, \ 1326 "Verify all arguments to JNI calls") \ 1327 \ 1328 product(bool, CheckEndorsedAndExtDirs, false, \ 1329 "Verify the endorsed and extension directories are not used") \ 1330 \ 1331 product(bool, UseFastJNIAccessors, true, \ 1332 "Use optimized versions of Get<Primitive>Field") \ 1333 \ 1334 product(intx, MaxJNILocalCapacity, 65536, \ 1335 "Maximum allowable local JNI handle capacity to " \ 1336 "EnsureLocalCapacity() and PushLocalFrame(), " \ 1337 "where <= 0 is unlimited, default: 65536") \ 1338 range(min_intx, max_intx) \ 1339 \ 1340 product(bool, EagerXrunInit, false, \ 1341 "Eagerly initialize -Xrun libraries; allows startup profiling, " \ 1342 "but not all -Xrun libraries may support the state of the VM " \ 1343 "at this time") \ 1344 \ 1345 product(bool, PreserveAllAnnotations, false, \ 1346 "Preserve RuntimeInvisibleAnnotations as well " \ 1347 "as RuntimeVisibleAnnotations") \ 1348 \ 1349 develop(uintx, PreallocatedOutOfMemoryErrorCount, 4, \ 1350 "Number of OutOfMemoryErrors preallocated with backtrace") \ 1351 \ 1352 product(bool, UseXMMForArrayCopy, false, \ 1353 "Use SSE2 MOVQ instruction for Arraycopy") \ 1354 \ 1355 product(intx, FieldsAllocationStyle, 1, \ 1356 "0 - type based with oops first, " \ 1357 "1 - with oops last, " \ 1358 "2 - oops in super and sub classes are together") \ 1359 range(0, 2) \ 1360 \ 1361 product(bool, CompactFields, true, \ 1362 "Allocate nonstatic fields in gaps between previous fields") \ 1363 \ 1364 notproduct(bool, PrintFieldLayout, false, \ 1365 "Print field layout for each class") \ 1366 \ 1367 /* Need to limit the extent of the padding to reasonable size. */\ 1368 /* 8K is well beyond the reasonable HW cache line size, even with */\ 1369 /* aggressive prefetching, while still leaving the room for segregating */\ 1370 /* among the distinct pages. */\ 1371 product(intx, ContendedPaddingWidth, 128, \ 1372 "How many bytes to pad the fields/classes marked @Contended with")\ 1373 range(0, 8192) \ 1374 constraint(ContendedPaddingWidthConstraintFunc,AfterErgo) \ 1375 \ 1376 product(bool, EnableContended, true, \ 1377 "Enable @Contended annotation support") \ 1378 \ 1379 product(bool, RestrictContended, true, \ 1380 "Restrict @Contended to trusted classes") \ 1381 \ 1382 product(bool, UseBiasedLocking, true, \ 1383 "Enable biased locking in JVM") \ 1384 \ 1385 product(intx, BiasedLockingStartupDelay, 4000, \ 1386 "Number of milliseconds to wait before enabling biased locking") \ 1387 range(0, (intx)(max_jint-(max_jint%PeriodicTask::interval_gran))) \ 1388 constraint(BiasedLockingStartupDelayFunc,AfterErgo) \ 1389 \ 1390 diagnostic(bool, PrintBiasedLockingStatistics, false, \ 1391 "Print statistics of biased locking in JVM") \ 1392 \ 1393 product(intx, BiasedLockingBulkRebiasThreshold, 20, \ 1394 "Threshold of number of revocations per type to try to " \ 1395 "rebias all objects in the heap of that type") \ 1396 range(0, max_intx) \ 1397 constraint(BiasedLockingBulkRebiasThresholdFunc,AfterErgo) \ 1398 \ 1399 product(intx, BiasedLockingBulkRevokeThreshold, 40, \ 1400 "Threshold of number of revocations per type to permanently " \ 1401 "revoke biases of all objects in the heap of that type") \ 1402 range(0, max_intx) \ 1403 constraint(BiasedLockingBulkRevokeThresholdFunc,AfterErgo) \ 1404 \ 1405 product(intx, BiasedLockingDecayTime, 25000, \ 1406 "Decay time (in milliseconds) to re-enable bulk rebiasing of a " \ 1407 "type after previous bulk rebias") \ 1408 range(500, max_intx) \ 1409 constraint(BiasedLockingDecayTimeFunc,AfterErgo) \ 1410 \ 1411 product(bool, ExitOnOutOfMemoryError, false, \ 1412 "JVM exits on the first occurrence of an out-of-memory error") \ 1413 \ 1414 product(bool, CrashOnOutOfMemoryError, false, \ 1415 "JVM aborts, producing an error log and core/mini dump, on the " \ 1416 "first occurrence of an out-of-memory error") \ 1417 \ 1418 /* tracing */ \ 1419 \ 1420 develop(bool, StressRewriter, false, \ 1421 "Stress linktime bytecode rewriting") \ 1422 \ 1423 product(ccstr, TraceJVMTI, NULL, \ 1424 "Trace flags for JVMTI functions and events") \ 1425 \ 1426 /* This option can change an EMCP method into an obsolete method. */ \ 1427 /* This can affect tests that except specific methods to be EMCP. */ \ 1428 /* This option should be used with caution. */ \ 1429 product(bool, StressLdcRewrite, false, \ 1430 "Force ldc -> ldc_w rewrite during RedefineClasses") \ 1431 \ 1432 product(uintx, TraceRedefineClasses, 0, \ 1433 "Trace level for JVMTI RedefineClasses") \ 1434 range(0, 0xFFFFFFFF) \ 1435 \ 1436 /* change to false by default sometime after Mustang */ \ 1437 product(bool, VerifyMergedCPBytecodes, true, \ 1438 "Verify bytecodes after RedefineClasses constant pool merging") \ 1439 \ 1440 develop(bool, TraceJNIHandleAllocation, false, \ 1441 "Trace allocation/deallocation of JNI handle blocks") \ 1442 \ 1443 develop(bool, TraceBytecodes, false, \ 1444 "Trace bytecode execution") \ 1445 \ 1446 develop(bool, TraceICs, false, \ 1447 "Trace inline cache changes") \ 1448 \ 1449 notproduct(bool, TraceInvocationCounterOverflow, false, \ 1450 "Trace method invocation counter overflow") \ 1451 \ 1452 develop(bool, TraceInlineCacheClearing, false, \ 1453 "Trace clearing of inline caches in nmethods") \ 1454 \ 1455 develop(bool, TraceDependencies, false, \ 1456 "Trace dependencies") \ 1457 \ 1458 develop(bool, VerifyDependencies, trueInDebug, \ 1459 "Exercise and verify the compilation dependency mechanism") \ 1460 \ 1461 develop(bool, TraceNewOopMapGeneration, false, \ 1462 "Trace OopMapGeneration") \ 1463 \ 1464 develop(bool, TraceNewOopMapGenerationDetailed, false, \ 1465 "Trace OopMapGeneration: print detailed cell states") \ 1466 \ 1467 develop(bool, TimeOopMap, false, \ 1468 "Time calls to GenerateOopMap::compute_map() in sum") \ 1469 \ 1470 develop(bool, TimeOopMap2, false, \ 1471 "Time calls to GenerateOopMap::compute_map() individually") \ 1472 \ 1473 develop(bool, TraceMonitorMismatch, false, \ 1474 "Trace monitor matching failures during OopMapGeneration") \ 1475 \ 1476 develop(bool, TraceOopMapRewrites, false, \ 1477 "Trace rewriting of method oops during oop map generation") \ 1478 \ 1479 develop(bool, TraceICBuffer, false, \ 1480 "Trace usage of IC buffer") \ 1481 \ 1482 develop(bool, TraceCompiledIC, false, \ 1483 "Trace changes of compiled IC") \ 1484 \ 1485 develop(bool, TraceStartupTime, false, \ 1486 "Trace setup time") \ 1487 \ 1488 develop(bool, TraceProtectionDomainVerification, false, \ 1489 "Trace protection domain verification") \ 1490 \ 1491 develop(bool, TraceClearedExceptions, false, \ 1492 "Print when an exception is forcibly cleared") \ 1493 \ 1494 product(bool, TraceBiasedLocking, false, \ 1495 "Trace biased locking in JVM") \ 1496 \ 1497 /* gc */ \ 1498 \ 1499 product(bool, UseSerialGC, false, \ 1500 "Use the Serial garbage collector") \ 1501 \ 1502 product(bool, UseG1GC, false, \ 1503 "Use the Garbage-First garbage collector") \ 1504 \ 1505 product(bool, UseParallelGC, false, \ 1506 "Use the Parallel Scavenge garbage collector") \ 1507 \ 1508 product(bool, UseParallelOldGC, false, \ 1509 "Use the Parallel Old garbage collector") \ 1510 \ 1511 product(uintx, HeapMaximumCompactionInterval, 20, \ 1512 "How often should we maximally compact the heap (not allowing " \ 1513 "any dead space)") \ 1514 range(0, max_uintx) \ 1515 \ 1516 product(uintx, HeapFirstMaximumCompactionCount, 3, \ 1517 "The collection count for the first maximum compaction") \ 1518 range(0, max_uintx) \ 1519 \ 1520 product(bool, UseMaximumCompactionOnSystemGC, true, \ 1521 "Use maximum compaction in the Parallel Old garbage collector " \ 1522 "for a system GC") \ 1523 \ 1524 product(uintx, ParallelOldDeadWoodLimiterMean, 50, \ 1525 "The mean used by the parallel compact dead wood " \ 1526 "limiter (a number between 0-100)") \ 1527 range(0, 100) \ 1528 \ 1529 product(uintx, ParallelOldDeadWoodLimiterStdDev, 80, \ 1530 "The standard deviation used by the parallel compact dead wood " \ 1531 "limiter (a number between 0-100)") \ 1532 range(0, 100) \ 1533 \ 1534 product(uint, ParallelGCThreads, 0, \ 1535 "Number of parallel threads parallel gc will use") \ 1536 constraint(ParallelGCThreadsConstraintFunc,AfterErgo) \ 1537 \ 1538 diagnostic(bool, UseSemaphoreGCThreadsSynchronization, true, \ 1539 "Use semaphore synchronization for the GC Threads, " \ 1540 "instead of synchronization based on mutexes") \ 1541 \ 1542 product(bool, UseDynamicNumberOfGCThreads, false, \ 1543 "Dynamically choose the number of parallel threads " \ 1544 "parallel gc will use") \ 1545 \ 1546 diagnostic(bool, ForceDynamicNumberOfGCThreads, false, \ 1547 "Force dynamic selection of the number of " \ 1548 "parallel threads parallel gc will use to aid debugging") \ 1549 \ 1550 product(size_t, HeapSizePerGCThread, ScaleForWordSize(64*M), \ 1551 "Size of heap (bytes) per GC thread used in calculating the " \ 1552 "number of GC threads") \ 1553 range((size_t)os::vm_page_size(), (size_t)max_uintx) \ 1554 \ 1555 product(uint, ConcGCThreads, 0, \ 1556 "Number of threads concurrent gc will use") \ 1557 constraint(ConcGCThreadsConstraintFunc,AfterErgo) \ 1558 \ 1559 product(uintx, GCTaskTimeStampEntries, 200, \ 1560 "Number of time stamp entries per gc worker thread") \ 1561 range(1, max_uintx) \ 1562 \ 1563 product(bool, AlwaysTenure, false, \ 1564 "Always tenure objects in eden (ParallelGC only)") \ 1565 \ 1566 product(bool, NeverTenure, false, \ 1567 "Never tenure objects in eden, may tenure on overflow " \ 1568 "(ParallelGC only)") \ 1569 \ 1570 product(bool, ScavengeBeforeFullGC, true, \ 1571 "Scavenge youngest generation before each full GC.") \ 1572 \ 1573 product(bool, UseConcMarkSweepGC, false, \ 1574 "Use Concurrent Mark-Sweep GC in the old generation") \ 1575 \ 1576 product(bool, ExplicitGCInvokesConcurrent, false, \ 1577 "A System.gc() request invokes a concurrent collection; " \ 1578 "(effective only when using concurrent collectors)") \ 1579 \ 1580 product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false, \ 1581 "A System.gc() request invokes a concurrent collection and " \ 1582 "also unloads classes during such a concurrent gc cycle " \ 1583 "(effective only when UseConcMarkSweepGC)") \ 1584 \ 1585 product(bool, GCLockerInvokesConcurrent, false, \ 1586 "The exit of a JNI critical section necessitating a scavenge, " \ 1587 "also kicks off a background concurrent collection") \ 1588 \ 1589 product(uintx, GCLockerEdenExpansionPercent, 5, \ 1590 "How much the GC can expand the eden by while the GC locker " \ 1591 "is active (as a percentage)") \ 1592 range(0, 100) \ 1593 \ 1594 diagnostic(uintx, GCLockerRetryAllocationCount, 2, \ 1595 "Number of times to retry allocations when " \ 1596 "blocked by the GC locker") \ 1597 range(0, max_uintx) \ 1598 \ 1599 product(bool, UseCMSBestFit, true, \ 1600 "Use CMS best fit allocation strategy") \ 1601 \ 1602 product(bool, UseParNewGC, false, \ 1603 "Use parallel threads in the new generation") \ 1604 \ 1605 product(uintx, ParallelGCBufferWastePct, 10, \ 1606 "Wasted fraction of parallel allocation buffer") \ 1607 range(0, 100) \ 1608 \ 1609 product(uintx, TargetPLABWastePct, 10, \ 1610 "Target wasted space in last buffer as percent of overall " \ 1611 "allocation") \ 1612 range(1, 100) \ 1613 \ 1614 product(uintx, PLABWeight, 75, \ 1615 "Percentage (0-100) used to weight the current sample when " \ 1616 "computing exponentially decaying average for ResizePLAB") \ 1617 range(0, 100) \ 1618 \ 1619 product(bool, ResizePLAB, true, \ 1620 "Dynamically resize (survivor space) promotion LAB's") \ 1621 \ 1622 product(intx, ParGCArrayScanChunk, 50, \ 1623 "Scan a subset of object array and push remainder, if array is " \ 1624 "bigger than this") \ 1625 range(1, max_intx) \ 1626 \ 1627 product(bool, ParGCUseLocalOverflow, false, \ 1628 "Instead of a global overflow list, use local overflow stacks") \ 1629 \ 1630 product(bool, ParGCTrimOverflow, true, \ 1631 "Eagerly trim the local overflow lists " \ 1632 "(when ParGCUseLocalOverflow)") \ 1633 \ 1634 notproduct(bool, ParGCWorkQueueOverflowALot, false, \ 1635 "Simulate work queue overflow in ParNew") \ 1636 \ 1637 notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000, \ 1638 "An `interval' counter that determines how frequently " \ 1639 "we simulate overflow; a smaller number increases frequency") \ 1640 \ 1641 product(uintx, ParGCDesiredObjsFromOverflowList, 20, \ 1642 "The desired number of objects to claim from the overflow list") \ 1643 range(0, max_uintx) \ 1644 \ 1645 diagnostic(uintx, ParGCStridesPerThread, 2, \ 1646 "The number of strides per worker thread that we divide up the " \ 1647 "card table scanning work into") \ 1648 range(1, max_uintx) \ 1649 constraint(ParGCStridesPerThreadConstraintFunc,AfterErgo) \ 1650 \ 1651 diagnostic(intx, ParGCCardsPerStrideChunk, 256, \ 1652 "The number of cards in each chunk of the parallel chunks used " \ 1653 "during card table scanning") \ 1654 range(1, max_intx) \ 1655 \ 1656 product(uintx, OldPLABWeight, 50, \ 1657 "Percentage (0-100) used to weight the current sample when " \ 1658 "computing exponentially decaying average for resizing " \ 1659 "OldPLABSize") \ 1660 range(0, 100) \ 1661 \ 1662 product(bool, ResizeOldPLAB, true, \ 1663 "Dynamically resize (old gen) promotion LAB's") \ 1664 \ 1665 product(size_t, CMSOldPLABMax, 1024, \ 1666 "Maximum size of CMS gen promotion LAB caches per worker " \ 1667 "per block size") \ 1668 range(1, max_uintx) \ 1669 constraint(CMSOldPLABMaxConstraintFunc,AfterMemoryInit) \ 1670 \ 1671 product(size_t, CMSOldPLABMin, 16, \ 1672 "Minimum size of CMS gen promotion LAB caches per worker " \ 1673 "per block size") \ 1674 range(1, max_uintx) \ 1675 constraint(CMSOldPLABMinConstraintFunc,AfterMemoryInit) \ 1676 \ 1677 product(uintx, CMSOldPLABNumRefills, 4, \ 1678 "Nominal number of refills of CMS gen promotion LAB cache " \ 1679 "per worker per block size") \ 1680 range(1, max_uintx) \ 1681 \ 1682 product(bool, CMSOldPLABResizeQuicker, false, \ 1683 "React on-the-fly during a scavenge to a sudden " \ 1684 "change in block demand rate") \ 1685 \ 1686 product(uintx, CMSOldPLABToleranceFactor, 4, \ 1687 "The tolerance of the phase-change detector for on-the-fly " \ 1688 "PLAB resizing during a scavenge") \ 1689 range(1, max_uintx) \ 1690 \ 1691 product(uintx, CMSOldPLABReactivityFactor, 2, \ 1692 "The gain in the feedback loop for on-the-fly PLAB resizing " \ 1693 "during a scavenge") \ 1694 range(1, max_uintx) \ 1695 \ 1696 product(bool, AlwaysPreTouch, false, \ 1697 "Force all freshly committed pages to be pre-touched") \ 1698 \ 1699 product_pd(size_t, CMSYoungGenPerWorker, \ 1700 "The maximum size of young gen chosen by default per GC worker " \ 1701 "thread available") \ 1702 range(1, max_uintx) \ 1703 \ 1704 product(uintx, CMSIncrementalSafetyFactor, 10, \ 1705 "Percentage (0-100) used to add conservatism when computing the " \ 1706 "duty cycle") \ 1707 range(0, 100) \ 1708 \ 1709 product(uintx, CMSExpAvgFactor, 50, \ 1710 "Percentage (0-100) used to weight the current sample when " \ 1711 "computing exponential averages for CMS statistics") \ 1712 range(0, 100) \ 1713 \ 1714 product(uintx, CMS_FLSWeight, 75, \ 1715 "Percentage (0-100) used to weight the current sample when " \ 1716 "computing exponentially decaying averages for CMS FLS " \ 1717 "statistics") \ 1718 range(0, 100) \ 1719 \ 1720 product(uintx, CMS_FLSPadding, 1, \ 1721 "The multiple of deviation from mean to use for buffering " \ 1722 "against volatility in free list demand") \ 1723 range(0, max_juint) \ 1724 \ 1725 product(uintx, FLSCoalescePolicy, 2, \ 1726 "CMS: aggressiveness level for coalescing, increasing " \ 1727 "from 0 to 4") \ 1728 range(0, 4) \ 1729 \ 1730 product(bool, FLSAlwaysCoalesceLarge, false, \ 1731 "CMS: larger free blocks are always available for coalescing") \ 1732 \ 1733 product(double, FLSLargestBlockCoalesceProximity, 0.99, \ 1734 "CMS: the smaller the percentage the greater the coalescing " \ 1735 "force") \ 1736 range(0.0, 1.0) \ 1737 \ 1738 product(double, CMSSmallCoalSurplusPercent, 1.05, \ 1739 "CMS: the factor by which to inflate estimated demand of small " \ 1740 "block sizes to prevent coalescing with an adjoining block") \ 1741 range(0.0, DBL_MAX) \ 1742 \ 1743 product(double, CMSLargeCoalSurplusPercent, 0.95, \ 1744 "CMS: the factor by which to inflate estimated demand of large " \ 1745 "block sizes to prevent coalescing with an adjoining block") \ 1746 range(0.0, DBL_MAX) \ 1747 \ 1748 product(double, CMSSmallSplitSurplusPercent, 1.10, \ 1749 "CMS: the factor by which to inflate estimated demand of small " \ 1750 "block sizes to prevent splitting to supply demand for smaller " \ 1751 "blocks") \ 1752 range(0.0, DBL_MAX) \ 1753 \ 1754 product(double, CMSLargeSplitSurplusPercent, 1.00, \ 1755 "CMS: the factor by which to inflate estimated demand of large " \ 1756 "block sizes to prevent splitting to supply demand for smaller " \ 1757 "blocks") \ 1758 range(0.0, DBL_MAX) \ 1759 \ 1760 product(bool, CMSExtrapolateSweep, false, \ 1761 "CMS: cushion for block demand during sweep") \ 1762 \ 1763 product(uintx, CMS_SweepWeight, 75, \ 1764 "Percentage (0-100) used to weight the current sample when " \ 1765 "computing exponentially decaying average for inter-sweep " \ 1766 "duration") \ 1767 range(0, 100) \ 1768 \ 1769 product(uintx, CMS_SweepPadding, 1, \ 1770 "The multiple of deviation from mean to use for buffering " \ 1771 "against volatility in inter-sweep duration") \ 1772 range(0, max_juint) \ 1773 \ 1774 product(uintx, CMS_SweepTimerThresholdMillis, 10, \ 1775 "Skip block flux-rate sampling for an epoch unless inter-sweep " \ 1776 "duration exceeds this threshold in milliseconds") \ 1777 range(0, max_uintx) \ 1778 \ 1779 product(bool, CMSClassUnloadingEnabled, true, \ 1780 "Whether class unloading enabled when using CMS GC") \ 1781 \ 1782 product(uintx, CMSClassUnloadingMaxInterval, 0, \ 1783 "When CMS class unloading is enabled, the maximum CMS cycle " \ 1784 "count for which classes may not be unloaded") \ 1785 range(0, max_uintx) \ 1786 \ 1787 product(uintx, CMSIndexedFreeListReplenish, 4, \ 1788 "Replenish an indexed free list with this number of chunks") \ 1789 range(1, max_uintx) \ 1790 \ 1791 product(bool, CMSReplenishIntermediate, true, \ 1792 "Replenish all intermediate free-list caches") \ 1793 \ 1794 product(bool, CMSSplitIndexedFreeListBlocks, true, \ 1795 "When satisfying batched demand, split blocks from the " \ 1796 "IndexedFreeList whose size is a multiple of requested size") \ 1797 \ 1798 product(bool, CMSLoopWarn, false, \ 1799 "Warn in case of excessive CMS looping") \ 1800 \ 1801 /* where does the range max value of (max_jint - 1) come from? */ \ 1802 product(size_t, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \ 1803 "Maximum size of marking stack") \ 1804 range(1, (max_jint - 1)) \ 1805 \ 1806 product(size_t, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M), \ 1807 "Size of marking stack") \ 1808 constraint(MarkStackSizeConstraintFunc,AfterErgo) \ 1809 \ 1810 notproduct(bool, CMSMarkStackOverflowALot, false, \ 1811 "Simulate frequent marking stack / work queue overflow") \ 1812 \ 1813 notproduct(uintx, CMSMarkStackOverflowInterval, 1000, \ 1814 "An \"interval\" counter that determines how frequently " \ 1815 "to simulate overflow; a smaller number increases frequency") \ 1816 \ 1817 product(uintx, CMSMaxAbortablePrecleanLoops, 0, \ 1818 "Maximum number of abortable preclean iterations, if > 0") \ 1819 range(0, max_uintx) \ 1820 \ 1821 product(intx, CMSMaxAbortablePrecleanTime, 5000, \ 1822 "Maximum time in abortable preclean (in milliseconds)") \ 1823 range(0, max_intx) \ 1824 \ 1825 product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100, \ 1826 "Nominal minimum work per abortable preclean iteration") \ 1827 range(0, max_uintx) \ 1828 \ 1829 manageable(intx, CMSAbortablePrecleanWaitMillis, 100, \ 1830 "Time that we sleep between iterations when not given " \ 1831 "enough work per iteration") \ 1832 range(0, max_intx) \ 1833 \ 1834 product(size_t, CMSRescanMultiple, 32, \ 1835 "Size (in cards) of CMS parallel rescan task") \ 1836 range(1, max_uintx) \ 1837 \ 1838 product(size_t, CMSConcMarkMultiple, 32, \ 1839 "Size (in cards) of CMS concurrent MT marking task") \ 1840 range(1, max_uintx) \ 1841 \ 1842 product(bool, CMSAbortSemantics, false, \ 1843 "Whether abort-on-overflow semantics is implemented") \ 1844 \ 1845 product(bool, CMSParallelInitialMarkEnabled, true, \ 1846 "Use the parallel initial mark.") \ 1847 \ 1848 product(bool, CMSParallelRemarkEnabled, true, \ 1849 "Whether parallel remark enabled (only if ParNewGC)") \ 1850 \ 1851 product(bool, CMSParallelSurvivorRemarkEnabled, true, \ 1852 "Whether parallel remark of survivor space " \ 1853 "enabled (effective only if CMSParallelRemarkEnabled)") \ 1854 \ 1855 product(bool, CMSPLABRecordAlways, true, \ 1856 "Always record survivor space PLAB boundaries (effective only " \ 1857 "if CMSParallelSurvivorRemarkEnabled)") \ 1858 \ 1859 product(bool, CMSEdenChunksRecordAlways, true, \ 1860 "Always record eden chunks used for the parallel initial mark " \ 1861 "or remark of eden") \ 1862 \ 1863 product(bool, CMSConcurrentMTEnabled, true, \ 1864 "Whether multi-threaded concurrent work enabled " \ 1865 "(effective only if ParNewGC)") \ 1866 \ 1867 product(bool, CMSPrecleaningEnabled, true, \ 1868 "Whether concurrent precleaning enabled") \ 1869 \ 1870 product(uintx, CMSPrecleanIter, 3, \ 1871 "Maximum number of precleaning iteration passes") \ 1872 range(0, 9) \ 1873 \ 1874 product(uintx, CMSPrecleanDenominator, 3, \ 1875 "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \ 1876 "ratio") \ 1877 range(1, max_uintx) \ 1878 constraint(CMSPrecleanDenominatorConstraintFunc,AfterErgo) \ 1879 \ 1880 product(uintx, CMSPrecleanNumerator, 2, \ 1881 "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \ 1882 "ratio") \ 1883 range(0, max_uintx-1) \ 1884 constraint(CMSPrecleanNumeratorConstraintFunc,AfterErgo) \ 1885 \ 1886 product(bool, CMSPrecleanRefLists1, true, \ 1887 "Preclean ref lists during (initial) preclean phase") \ 1888 \ 1889 product(bool, CMSPrecleanRefLists2, false, \ 1890 "Preclean ref lists during abortable preclean phase") \ 1891 \ 1892 product(bool, CMSPrecleanSurvivors1, false, \ 1893 "Preclean survivors during (initial) preclean phase") \ 1894 \ 1895 product(bool, CMSPrecleanSurvivors2, true, \ 1896 "Preclean survivors during abortable preclean phase") \ 1897 \ 1898 product(uintx, CMSPrecleanThreshold, 1000, \ 1899 "Do not iterate again if number of dirty cards is less than this")\ 1900 range(100, max_uintx) \ 1901 \ 1902 product(bool, CMSCleanOnEnter, true, \ 1903 "Clean-on-enter optimization for reducing number of dirty cards") \ 1904 \ 1905 product(uintx, CMSRemarkVerifyVariant, 1, \ 1906 "Choose variant (1,2) of verification following remark") \ 1907 range(1, 2) \ 1908 \ 1909 product(size_t, CMSScheduleRemarkEdenSizeThreshold, 2*M, \ 1910 "If Eden size is below this, do not try to schedule remark") \ 1911 range(0, max_uintx) \ 1912 \ 1913 product(uintx, CMSScheduleRemarkEdenPenetration, 50, \ 1914 "The Eden occupancy percentage (0-100) at which " \ 1915 "to try and schedule remark pause") \ 1916 range(0, 100) \ 1917 \ 1918 product(uintx, CMSScheduleRemarkSamplingRatio, 5, \ 1919 "Start sampling eden top at least before young gen " \ 1920 "occupancy reaches 1/<ratio> of the size at which " \ 1921 "we plan to schedule remark") \ 1922 range(1, max_uintx) \ 1923 \ 1924 product(uintx, CMSSamplingGrain, 16*K, \ 1925 "The minimum distance between eden samples for CMS (see above)") \ 1926 range(1, max_uintx) \ 1927 \ 1928 product(bool, CMSScavengeBeforeRemark, false, \ 1929 "Attempt scavenge before the CMS remark step") \ 1930 \ 1931 product(uintx, CMSWorkQueueDrainThreshold, 10, \ 1932 "Don't drain below this size per parallel worker/thief") \ 1933 range(1, max_juint) \ 1934 constraint(CMSWorkQueueDrainThresholdConstraintFunc,AfterErgo) \ 1935 \ 1936 manageable(intx, CMSWaitDuration, 2000, \ 1937 "Time in milliseconds that CMS thread waits for young GC") \ 1938 range(min_jint, max_jint) \ 1939 \ 1940 develop(uintx, CMSCheckInterval, 1000, \ 1941 "Interval in milliseconds that CMS thread checks if it " \ 1942 "should start a collection cycle") \ 1943 \ 1944 product(bool, CMSYield, true, \ 1945 "Yield between steps of CMS") \ 1946 \ 1947 product(size_t, CMSBitMapYieldQuantum, 10*M, \ 1948 "Bitmap operations should process at most this many bits " \ 1949 "between yields") \ 1950 range(1, max_uintx) \ 1951 \ 1952 product(bool, CMSPrintChunksInDump, false, \ 1953 "If logging for the \"gc\" and \"promotion\" tags is enabled on" \ 1954 "trace level include more detailed information about the" \ 1955 "free chunks") \ 1956 \ 1957 product(bool, CMSPrintObjectsInDump, false, \ 1958 "If logging for the \"gc\" and \"promotion\" tags is enabled on" \ 1959 "trace level include more detailed information about the" \ 1960 "allocated objects") \ 1961 \ 1962 diagnostic(bool, FLSVerifyAllHeapReferences, false, \ 1963 "Verify that all references across the FLS boundary " \ 1964 "are to valid objects") \ 1965 \ 1966 diagnostic(bool, FLSVerifyLists, false, \ 1967 "Do lots of (expensive) FreeListSpace verification") \ 1968 \ 1969 diagnostic(bool, FLSVerifyIndexTable, false, \ 1970 "Do lots of (expensive) FLS index table verification") \ 1971 \ 1972 develop(bool, FLSVerifyDictionary, false, \ 1973 "Do lots of (expensive) FLS dictionary verification") \ 1974 \ 1975 develop(bool, VerifyBlockOffsetArray, false, \ 1976 "Do (expensive) block offset array verification") \ 1977 \ 1978 diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false, \ 1979 "Maintain _unallocated_block in BlockOffsetArray " \ 1980 "(currently applicable only to CMS collector)") \ 1981 \ 1982 product(intx, RefDiscoveryPolicy, 0, \ 1983 "Select type of reference discovery policy: " \ 1984 "reference-based(0) or referent-based(1)") \ 1985 range(ReferenceProcessor::DiscoveryPolicyMin, \ 1986 ReferenceProcessor::DiscoveryPolicyMax) \ 1987 \ 1988 product(bool, ParallelRefProcEnabled, false, \ 1989 "Enable parallel reference processing whenever possible") \ 1990 \ 1991 product(bool, ParallelRefProcBalancingEnabled, true, \ 1992 "Enable balancing of reference processing queues") \ 1993 \ 1994 product(uintx, CMSTriggerRatio, 80, \ 1995 "Percentage of MinHeapFreeRatio in CMS generation that is " \ 1996 "allocated before a CMS collection cycle commences") \ 1997 range(0, 100) \ 1998 \ 1999 product(uintx, CMSBootstrapOccupancy, 50, \ 2000 "Percentage CMS generation occupancy at which to " \ 2001 "initiate CMS collection for bootstrapping collection stats") \ 2002 range(0, 100) \ 2003 \ 2004 product(intx, CMSInitiatingOccupancyFraction, -1, \ 2005 "Percentage CMS generation occupancy to start a CMS collection " \ 2006 "cycle. A negative value means that CMSTriggerRatio is used") \ 2007 range(min_intx, 100) \ 2008 \ 2009 product(uintx, InitiatingHeapOccupancyPercent, 45, \ 2010 "The percent occupancy (IHOP) of the current old generation " \ 2011 "capacity above which a concurrent mark cycle will be initiated " \ 2012 "Its value may change over time if adaptive IHOP is enabled, " \ 2013 "otherwise the value remains constant. " \ 2014 "In the latter case a value of 0 will result as frequent as " \ 2015 "possible concurrent marking cycles. A value of 100 disables " \ 2016 "concurrent marking. " \ 2017 "Fragmentation waste in the old generation is not considered " \ 2018 "free space in this calculation. (G1 collector only)") \ 2019 range(0, 100) \ 2020 \ 2021 manageable(intx, CMSTriggerInterval, -1, \ 2022 "Commence a CMS collection cycle (at least) every so many " \ 2023 "milliseconds (0 permanently, -1 disabled)") \ 2024 range(-1, max_intx) \ 2025 \ 2026 product(bool, UseCMSInitiatingOccupancyOnly, false, \ 2027 "Only use occupancy as a criterion for starting a CMS collection")\ 2028 \ 2029 product(uintx, CMSIsTooFullPercentage, 98, \ 2030 "An absolute ceiling above which CMS will always consider the " \ 2031 "unloading of classes when class unloading is enabled") \ 2032 range(0, 100) \ 2033 \ 2034 develop(bool, CMSTestInFreeList, false, \ 2035 "Check if the coalesced range is already in the " \ 2036 "free lists as claimed") \ 2037 \ 2038 notproduct(bool, CMSVerifyReturnedBytes, false, \ 2039 "Check that all the garbage collected was returned to the " \ 2040 "free lists") \ 2041 \ 2042 notproduct(bool, ScavengeALot, false, \ 2043 "Force scavenge at every Nth exit from the runtime system " \ 2044 "(N=ScavengeALotInterval)") \ 2045 \ 2046 develop(bool, FullGCALot, false, \ 2047 "Force full gc at every Nth exit from the runtime system " \ 2048 "(N=FullGCALotInterval)") \ 2049 \ 2050 notproduct(bool, GCALotAtAllSafepoints, false, \ 2051 "Enforce ScavengeALot/GCALot at all potential safepoints") \ 2052 \ 2053 notproduct(bool, PromotionFailureALot, false, \ 2054 "Use promotion failure handling on every youngest generation " \ 2055 "collection") \ 2056 \ 2057 develop(uintx, PromotionFailureALotCount, 1000, \ 2058 "Number of promotion failures occurring at PLAB " \ 2059 "refill attempts (ParNew) or promotion attempts " \ 2060 "(other young collectors)") \ 2061 \ 2062 develop(uintx, PromotionFailureALotInterval, 5, \ 2063 "Total collections between promotion failures a lot") \ 2064 \ 2065 experimental(uintx, WorkStealingSleepMillis, 1, \ 2066 "Sleep time when sleep is used for yields") \ 2067 \ 2068 experimental(uintx, WorkStealingYieldsBeforeSleep, 5000, \ 2069 "Number of yields before a sleep is done during work stealing") \ 2070 \ 2071 experimental(uintx, WorkStealingHardSpins, 4096, \ 2072 "Number of iterations in a spin loop between checks on " \ 2073 "time out of hard spin") \ 2074 \ 2075 experimental(uintx, WorkStealingSpinToYieldRatio, 10, \ 2076 "Ratio of hard spins to calls to yield") \ 2077 \ 2078 develop(uintx, ObjArrayMarkingStride, 512, \ 2079 "Number of object array elements to push onto the marking stack " \ 2080 "before pushing a continuation entry") \ 2081 \ 2082 develop(bool, MetadataAllocationFailALot, false, \ 2083 "Fail metadata allocations at intervals controlled by " \ 2084 "MetadataAllocationFailALotInterval") \ 2085 \ 2086 develop(uintx, MetadataAllocationFailALotInterval, 1000, \ 2087 "Metadata allocation failure a lot interval") \ 2088 \ 2089 develop(bool, TraceMetadataChunkAllocation, false, \ 2090 "Trace chunk metadata allocations") \ 2091 \ 2092 notproduct(bool, ExecuteInternalVMTests, false, \ 2093 "Enable execution of internal VM tests") \ 2094 \ 2095 notproduct(bool, VerboseInternalVMTests, false, \ 2096 "Turn on logging for internal VM tests.") \ 2097 \ 2098 product_pd(bool, UseTLAB, "Use thread-local object allocation") \ 2099 \ 2100 product_pd(bool, ResizeTLAB, \ 2101 "Dynamically resize TLAB size for threads") \ 2102 \ 2103 product(bool, ZeroTLAB, false, \ 2104 "Zero out the newly created TLAB") \ 2105 \ 2106 product(bool, FastTLABRefill, true, \ 2107 "Use fast TLAB refill code") \ 2108 \ 2109 product(bool, TLABStats, true, \ 2110 "Provide more detailed and expensive TLAB statistics.") \ 2111 \ 2112 product_pd(bool, NeverActAsServerClassMachine, \ 2113 "Never act like a server-class machine") \ 2114 \ 2115 product(bool, AlwaysActAsServerClassMachine, false, \ 2116 "Always act like a server-class machine") \ 2117 \ 2118 product_pd(uint64_t, MaxRAM, \ 2119 "Real memory size (in bytes) used to set maximum heap size") \ 2120 range(0, 0XFFFFFFFFFFFFFFFF) \ 2121 \ 2122 product(size_t, ErgoHeapSizeLimit, 0, \ 2123 "Maximum ergonomically set heap size (in bytes); zero means use " \ 2124 "MaxRAM / MaxRAMFraction") \ 2125 range(0, max_uintx) \ 2126 \ 2127 product(uintx, MaxRAMFraction, 4, \ 2128 "Maximum fraction (1/n) of real memory used for maximum heap " \ 2129 "size") \ 2130 range(1, max_uintx) \ 2131 \ 2132 product(uintx, MinRAMFraction, 2, \ 2133 "Minimum fraction (1/n) of real memory used for maximum heap " \ 2134 "size on systems with small physical memory size") \ 2135 range(1, max_uintx) \ 2136 \ 2137 product(uintx, InitialRAMFraction, 64, \ 2138 "Fraction (1/n) of real memory used for initial heap size") \ 2139 range(1, max_uintx) \ 2140 \ 2141 develop(uintx, MaxVirtMemFraction, 2, \ 2142 "Maximum fraction (1/n) of virtual memory used for ergonomically "\ 2143 "determining maximum heap size") \ 2144 \ 2145 product(bool, UseAutoGCSelectPolicy, false, \ 2146 "Use automatic collection selection policy") \ 2147 \ 2148 product(uintx, AutoGCSelectPauseMillis, 5000, \ 2149 "Automatic GC selection pause threshold in milliseconds") \ 2150 range(0, max_uintx) \ 2151 \ 2152 product(bool, UseAdaptiveSizePolicy, true, \ 2153 "Use adaptive generation sizing policies") \ 2154 \ 2155 product(bool, UsePSAdaptiveSurvivorSizePolicy, true, \ 2156 "Use adaptive survivor sizing policies") \ 2157 \ 2158 product(bool, UseAdaptiveGenerationSizePolicyAtMinorCollection, true, \ 2159 "Use adaptive young-old sizing policies at minor collections") \ 2160 \ 2161 product(bool, UseAdaptiveGenerationSizePolicyAtMajorCollection, true, \ 2162 "Use adaptive young-old sizing policies at major collections") \ 2163 \ 2164 product(bool, UseAdaptiveSizePolicyWithSystemGC, false, \ 2165 "Include statistics from System.gc() for adaptive size policy") \ 2166 \ 2167 product(bool, UseAdaptiveGCBoundary, false, \ 2168 "Allow young-old boundary to move") \ 2169 \ 2170 develop(intx, PSAdaptiveSizePolicyResizeVirtualSpaceAlot, -1, \ 2171 "Resize the virtual spaces of the young or old generations") \ 2172 range(-1, 1) \ 2173 \ 2174 product(uintx, AdaptiveSizeThroughPutPolicy, 0, \ 2175 "Policy for changing generation size for throughput goals") \ 2176 range(0, 1) \ 2177 \ 2178 product(uintx, AdaptiveSizePolicyInitializingSteps, 20, \ 2179 "Number of steps where heuristics is used before data is used") \ 2180 range(0, max_uintx) \ 2181 \ 2182 develop(uintx, AdaptiveSizePolicyReadyThreshold, 5, \ 2183 "Number of collections before the adaptive sizing is started") \ 2184 \ 2185 product(uintx, AdaptiveSizePolicyOutputInterval, 0, \ 2186 "Collection interval for printing information; zero means never") \ 2187 range(0, max_uintx) \ 2188 \ 2189 product(bool, UseAdaptiveSizePolicyFootprintGoal, true, \ 2190 "Use adaptive minimum footprint as a goal") \ 2191 \ 2192 product(uintx, AdaptiveSizePolicyWeight, 10, \ 2193 "Weight given to exponential resizing, between 0 and 100") \ 2194 range(0, 100) \ 2195 \ 2196 product(uintx, AdaptiveTimeWeight, 25, \ 2197 "Weight given to time in adaptive policy, between 0 and 100") \ 2198 range(0, 100) \ 2199 \ 2200 product(uintx, PausePadding, 1, \ 2201 "How much buffer to keep for pause time") \ 2202 range(0, max_juint) \ 2203 \ 2204 product(uintx, PromotedPadding, 3, \ 2205 "How much buffer to keep for promotion failure") \ 2206 range(0, max_juint) \ 2207 \ 2208 product(uintx, SurvivorPadding, 3, \ 2209 "How much buffer to keep for survivor overflow") \ 2210 range(0, max_juint) \ 2211 \ 2212 product(uintx, ThresholdTolerance, 10, \ 2213 "Allowed collection cost difference between generations") \ 2214 range(0, 100) \ 2215 \ 2216 product(uintx, AdaptiveSizePolicyCollectionCostMargin, 50, \ 2217 "If collection costs are within margin, reduce both by full " \ 2218 "delta") \ 2219 range(0, 100) \ 2220 \ 2221 product(uintx, YoungGenerationSizeIncrement, 20, \ 2222 "Adaptive size percentage change in young generation") \ 2223 range(0, 100) \ 2224 \ 2225 product(uintx, YoungGenerationSizeSupplement, 80, \ 2226 "Supplement to YoungedGenerationSizeIncrement used at startup") \ 2227 range(0, 100) \ 2228 \ 2229 product(uintx, YoungGenerationSizeSupplementDecay, 8, \ 2230 "Decay factor to YoungedGenerationSizeSupplement") \ 2231 range(1, max_uintx) \ 2232 \ 2233 product(uintx, TenuredGenerationSizeIncrement, 20, \ 2234 "Adaptive size percentage change in tenured generation") \ 2235 range(0, 100) \ 2236 \ 2237 product(uintx, TenuredGenerationSizeSupplement, 80, \ 2238 "Supplement to TenuredGenerationSizeIncrement used at startup") \ 2239 range(0, 100) \ 2240 \ 2241 product(uintx, TenuredGenerationSizeSupplementDecay, 2, \ 2242 "Decay factor to TenuredGenerationSizeIncrement") \ 2243 range(1, max_uintx) \ 2244 \ 2245 product(uintx, MaxGCPauseMillis, max_uintx, \ 2246 "Adaptive size policy maximum GC pause time goal in millisecond, "\ 2247 "or (G1 Only) the maximum GC time per MMU time slice") \ 2248 range(1, max_uintx) \ 2249 constraint(MaxGCPauseMillisConstraintFunc,AfterMemoryInit) \ 2250 \ 2251 product(uintx, GCPauseIntervalMillis, 0, \ 2252 "Time slice for MMU specification") \ 2253 constraint(GCPauseIntervalMillisConstraintFunc,AfterMemoryInit) \ 2254 \ 2255 product(uintx, MaxGCMinorPauseMillis, max_uintx, \ 2256 "Adaptive size policy maximum GC minor pause time goal " \ 2257 "in millisecond") \ 2258 range(0, max_uintx) \ 2259 \ 2260 product(uintx, GCTimeRatio, 99, \ 2261 "Adaptive size policy application time to GC time ratio") \ 2262 range(0, max_juint) \ 2263 \ 2264 product(uintx, AdaptiveSizeDecrementScaleFactor, 4, \ 2265 "Adaptive size scale down factor for shrinking") \ 2266 range(1, max_uintx) \ 2267 \ 2268 product(bool, UseAdaptiveSizeDecayMajorGCCost, true, \ 2269 "Adaptive size decays the major cost for long major intervals") \ 2270 \ 2271 product(uintx, AdaptiveSizeMajorGCDecayTimeScale, 10, \ 2272 "Time scale over which major costs decay") \ 2273 range(0, max_uintx) \ 2274 \ 2275 product(uintx, MinSurvivorRatio, 3, \ 2276 "Minimum ratio of young generation/survivor space size") \ 2277 range(3, max_uintx) \ 2278 \ 2279 product(uintx, InitialSurvivorRatio, 8, \ 2280 "Initial ratio of young generation/survivor space size") \ 2281 range(0, max_uintx) \ 2282 \ 2283 product(size_t, BaseFootPrintEstimate, 256*M, \ 2284 "Estimate of footprint other than Java Heap") \ 2285 range(0, max_uintx) \ 2286 \ 2287 product(bool, UseGCOverheadLimit, true, \ 2288 "Use policy to limit of proportion of time spent in GC " \ 2289 "before an OutOfMemory error is thrown") \ 2290 \ 2291 product(uintx, GCTimeLimit, 98, \ 2292 "Limit of the proportion of time spent in GC before " \ 2293 "an OutOfMemoryError is thrown (used with GCHeapFreeLimit)") \ 2294 range(0, 100) \ 2295 \ 2296 product(uintx, GCHeapFreeLimit, 2, \ 2297 "Minimum percentage of free space after a full GC before an " \ 2298 "OutOfMemoryError is thrown (used with GCTimeLimit)") \ 2299 range(0, 100) \ 2300 \ 2301 develop(uintx, AdaptiveSizePolicyGCTimeLimitThreshold, 5, \ 2302 "Number of consecutive collections before gc time limit fires") \ 2303 range(1, max_uintx) \ 2304 \ 2305 product(intx, PrefetchCopyIntervalInBytes, -1, \ 2306 "How far ahead to prefetch destination area (<= 0 means off)") \ 2307 range(-1, max_jint) \ 2308 \ 2309 product(intx, PrefetchScanIntervalInBytes, -1, \ 2310 "How far ahead to prefetch scan area (<= 0 means off)") \ 2311 range(-1, max_jint) \ 2312 \ 2313 product(intx, PrefetchFieldsAhead, -1, \ 2314 "How many fields ahead to prefetch in oop scan (<= 0 means off)") \ 2315 range(-1, max_jint) \ 2316 \ 2317 diagnostic(bool, VerifyDuringStartup, false, \ 2318 "Verify memory system before executing any Java code " \ 2319 "during VM initialization") \ 2320 \ 2321 diagnostic(bool, VerifyBeforeExit, trueInDebug, \ 2322 "Verify system before exiting") \ 2323 \ 2324 diagnostic(bool, VerifyBeforeGC, false, \ 2325 "Verify memory system before GC") \ 2326 \ 2327 diagnostic(bool, VerifyAfterGC, false, \ 2328 "Verify memory system after GC") \ 2329 \ 2330 diagnostic(bool, VerifyDuringGC, false, \ 2331 "Verify memory system during GC (between phases)") \ 2332 \ 2333 diagnostic(ccstrlist, VerifySubSet, "", \ 2334 "Memory sub-systems to verify when Verify*GC flag(s) " \ 2335 "are enabled. One or more sub-systems can be specified " \ 2336 "in a comma separated string. Sub-systems are: " \ 2337 "threads, heap, symbol_table, string_table, codecache, " \ 2338 "dictionary, classloader_data_graph, metaspace, jni_handles, " \ 2339 "c-heap, codecache_oops") \ 2340 \ 2341 diagnostic(bool, GCParallelVerificationEnabled, true, \ 2342 "Enable parallel memory system verification") \ 2343 \ 2344 diagnostic(bool, DeferInitialCardMark, false, \ 2345 "When +ReduceInitialCardMarks, explicitly defer any that " \ 2346 "may arise from new_pre_store_barrier") \ 2347 \ 2348 product(bool, UseCondCardMark, false, \ 2349 "Check for already marked card before updating card table") \ 2350 \ 2351 diagnostic(bool, VerifyRememberedSets, false, \ 2352 "Verify GC remembered sets") \ 2353 \ 2354 diagnostic(bool, VerifyObjectStartArray, true, \ 2355 "Verify GC object start array if verify before/after") \ 2356 \ 2357 product(bool, DisableExplicitGC, false, \ 2358 "Ignore calls to System.gc()") \ 2359 \ 2360 notproduct(bool, CheckMemoryInitialization, false, \ 2361 "Check memory initialization") \ 2362 \ 2363 diagnostic(bool, BindCMSThreadToCPU, false, \ 2364 "Bind CMS Thread to CPU if possible") \ 2365 \ 2366 diagnostic(uintx, CPUForCMSThread, 0, \ 2367 "When BindCMSThreadToCPU is true, the CPU to bind CMS thread to") \ 2368 range(0, max_juint) \ 2369 \ 2370 product(bool, BindGCTaskThreadsToCPUs, false, \ 2371 "Bind GCTaskThreads to CPUs if possible") \ 2372 \ 2373 product(bool, UseGCTaskAffinity, false, \ 2374 "Use worker affinity when asking for GCTasks") \ 2375 \ 2376 product(uintx, ProcessDistributionStride, 4, \ 2377 "Stride through processors when distributing processes") \ 2378 range(0, max_juint) \ 2379 \ 2380 product(uintx, CMSCoordinatorYieldSleepCount, 10, \ 2381 "Number of times the coordinator GC thread will sleep while " \ 2382 "yielding before giving up and resuming GC") \ 2383 range(0, max_juint) \ 2384 \ 2385 product(uintx, CMSYieldSleepCount, 0, \ 2386 "Number of times a GC thread (minus the coordinator) " \ 2387 "will sleep while yielding before giving up and resuming GC") \ 2388 range(0, max_juint) \ 2389 \ 2390 product(bool, PrintGC, false, \ 2391 "Print message at garbage collection. " \ 2392 "Deprecated, use -Xlog:gc instead.") \ 2393 \ 2394 product(bool, PrintGCDetails, false, \ 2395 "Print more details at garbage collection. " \ 2396 "Deprecated, use -Xlog:gc* instead.") \ 2397 \ 2398 develop(intx, ConcGCYieldTimeout, 0, \ 2399 "If non-zero, assert that GC threads yield within this " \ 2400 "number of milliseconds") \ 2401 range(0, max_intx) \ 2402 \ 2403 develop(bool, TraceFinalizerRegistration, false, \ 2404 "Trace registration of final references") \ 2405 \ 2406 notproduct(bool, TraceScavenge, false, \ 2407 "Trace scavenge") \ 2408 \ 2409 product(bool, IgnoreEmptyClassPaths, false, \ 2410 "Ignore empty path elements in -classpath") \ 2411 \ 2412 product(bool, TraceClassPaths, false, \ 2413 "Trace processing of class paths") \ 2414 \ 2415 product(bool, TraceClassLoadingPreorder, false, \ 2416 "Trace all classes loaded in order referenced (not loaded)") \ 2417 \ 2418 product_rw(bool, TraceLoaderConstraints, false, \ 2419 "Trace loader constraints") \ 2420 \ 2421 product(size_t, InitialBootClassLoaderMetaspaceSize, \ 2422 NOT_LP64(2200*K) LP64_ONLY(4*M), \ 2423 "Initial size of the boot class loader data metaspace") \ 2424 range(30*K, max_uintx/BytesPerWord) \ 2425 constraint(InitialBootClassLoaderMetaspaceSizeConstraintFunc, AfterErgo)\ 2426 \ 2427 product(bool, TraceYoungGenTime, false, \ 2428 "Trace accumulated time for young collection") \ 2429 \ 2430 product(bool, TraceOldGenTime, false, \ 2431 "Trace accumulated time for old collection") \ 2432 \ 2433 product(bool, PrintHeapAtSIGBREAK, true, \ 2434 "Print heap layout in response to SIGBREAK") \ 2435 \ 2436 manageable(bool, PrintClassHistogram, false, \ 2437 "Print a histogram of class instances") \ 2438 \ 2439 develop(bool, TraceWorkGang, false, \ 2440 "Trace activities of work gangs") \ 2441 \ 2442 develop(bool, TraceGCTaskManager, false, \ 2443 "Trace actions of the GC task manager") \ 2444 \ 2445 develop(bool, TraceGCTaskQueue, false, \ 2446 "Trace actions of the GC task queues") \ 2447 \ 2448 diagnostic(bool, TraceGCTaskThread, false, \ 2449 "Trace actions of the GC task threads") \ 2450 \ 2451 develop(bool, TraceParallelOldGCMarkingPhase, false, \ 2452 "Trace marking phase in ParallelOldGC") \ 2453 \ 2454 develop(bool, TraceParallelOldGCDensePrefix, false, \ 2455 "Trace dense prefix computation for ParallelOldGC") \ 2456 \ 2457 develop(bool, IgnoreLibthreadGPFault, false, \ 2458 "Suppress workaround for libthread GP fault") \ 2459 \ 2460 experimental(double, ObjectCountCutOffPercent, 0.5, \ 2461 "The percentage of the used heap that the instances of a class " \ 2462 "must occupy for the class to generate a trace event") \ 2463 range(0.0, 100.0) \ 2464 \ 2465 /* JVMTI heap profiling */ \ 2466 \ 2467 diagnostic(bool, TraceJVMTIObjectTagging, false, \ 2468 "Trace JVMTI object tagging calls") \ 2469 \ 2470 diagnostic(bool, VerifyBeforeIteration, false, \ 2471 "Verify memory system before JVMTI iteration") \ 2472 \ 2473 /* compiler interface */ \ 2474 \ 2475 develop(bool, CIPrintCompilerName, false, \ 2476 "when CIPrint is active, print the name of the active compiler") \ 2477 \ 2478 diagnostic(bool, CIPrintCompileQueue, false, \ 2479 "display the contents of the compile queue whenever a " \ 2480 "compilation is enqueued") \ 2481 \ 2482 develop(bool, CIPrintRequests, false, \ 2483 "display every request for compilation") \ 2484 \ 2485 product(bool, CITime, false, \ 2486 "collect timing information for compilation") \ 2487 \ 2488 develop(bool, CITimeVerbose, false, \ 2489 "be more verbose in compilation timings") \ 2490 \ 2491 develop(bool, CITimeEach, false, \ 2492 "display timing information after each successful compilation") \ 2493 \ 2494 develop(bool, CICountOSR, false, \ 2495 "use a separate counter when assigning ids to osr compilations") \ 2496 \ 2497 develop(bool, CICompileNatives, true, \ 2498 "compile native methods if supported by the compiler") \ 2499 \ 2500 develop_pd(bool, CICompileOSR, \ 2501 "compile on stack replacement methods if supported by the " \ 2502 "compiler") \ 2503 \ 2504 develop(bool, CIPrintMethodCodes, false, \ 2505 "print method bytecodes of the compiled code") \ 2506 \ 2507 develop(bool, CIPrintTypeFlow, false, \ 2508 "print the results of ciTypeFlow analysis") \ 2509 \ 2510 develop(bool, CITraceTypeFlow, false, \ 2511 "detailed per-bytecode tracing of ciTypeFlow analysis") \ 2512 \ 2513 develop(intx, OSROnlyBCI, -1, \ 2514 "OSR only at this bci. Negative values mean exclude that bci") \ 2515 \ 2516 /* compiler */ \ 2517 \ 2518 /* notice: the max range value here is max_jint, not max_intx */ \ 2519 /* because of overflow issue */ \ 2520 product(intx, CICompilerCount, CI_COMPILER_COUNT, \ 2521 "Number of compiler threads to run") \ 2522 range(0, max_jint) \ 2523 constraint(CICompilerCountConstraintFunc, AtParse) \ 2524 \ 2525 product(intx, CompilationPolicyChoice, 0, \ 2526 "which compilation policy (0-3)") \ 2527 range(0, 3) \ 2528 \ 2529 develop(bool, UseStackBanging, true, \ 2530 "use stack banging for stack overflow checks (required for " \ 2531 "proper StackOverflow handling; disable only to measure cost " \ 2532 "of stackbanging)") \ 2533 \ 2534 develop(bool, UseStrictFP, true, \ 2535 "use strict fp if modifier strictfp is set") \ 2536 \ 2537 develop(bool, GenerateSynchronizationCode, true, \ 2538 "generate locking/unlocking code for synchronized methods and " \ 2539 "monitors") \ 2540 \ 2541 develop(bool, GenerateCompilerNullChecks, true, \ 2542 "Generate explicit null checks for loads/stores/calls") \ 2543 \ 2544 develop(bool, GenerateRangeChecks, true, \ 2545 "Generate range checks for array accesses") \ 2546 \ 2547 develop_pd(bool, ImplicitNullChecks, \ 2548 "Generate code for implicit null checks") \ 2549 \ 2550 product_pd(bool, TrapBasedNullChecks, \ 2551 "Generate code for null checks that uses a cmp and trap " \ 2552 "instruction raising SIGTRAP. This is only used if an access to" \ 2553 "null (+offset) will not raise a SIGSEGV, i.e.," \ 2554 "ImplicitNullChecks don't work (PPC64).") \ 2555 \ 2556 product(bool, PrintSafepointStatistics, false, \ 2557 "Print statistics about safepoint synchronization") \ 2558 \ 2559 product(intx, PrintSafepointStatisticsCount, 300, \ 2560 "Total number of safepoint statistics collected " \ 2561 "before printing them out") \ 2562 range(1, max_intx) \ 2563 \ 2564 product(intx, PrintSafepointStatisticsTimeout, -1, \ 2565 "Print safepoint statistics only when safepoint takes " \ 2566 "more than PrintSafepointSatisticsTimeout in millis") \ 2567 LP64_ONLY(range(-1, max_intx/MICROUNITS)) \ 2568 NOT_LP64(range(-1, max_intx)) \ 2569 \ 2570 product(bool, TraceSafepointCleanupTime, false, \ 2571 "Print the break down of clean up tasks performed during " \ 2572 "safepoint") \ 2573 \ 2574 product(bool, Inline, true, \ 2575 "Enable inlining") \ 2576 \ 2577 product(bool, ClipInlining, true, \ 2578 "Clip inlining if aggregate method exceeds DesiredMethodLimit") \ 2579 \ 2580 develop(bool, UseCHA, true, \ 2581 "Enable CHA") \ 2582 \ 2583 product(bool, UseTypeProfile, true, \ 2584 "Check interpreter profile for historically monomorphic calls") \ 2585 \ 2586 diagnostic(bool, PrintInlining, false, \ 2587 "Print inlining optimizations") \ 2588 \ 2589 product(bool, UsePopCountInstruction, false, \ 2590 "Use population count instruction") \ 2591 \ 2592 develop(bool, EagerInitialization, false, \ 2593 "Eagerly initialize classes if possible") \ 2594 \ 2595 diagnostic(bool, LogTouchedMethods, false, \ 2596 "Log methods which have been ever touched in runtime") \ 2597 \ 2598 diagnostic(bool, PrintTouchedMethodsAtExit, false, \ 2599 "Print all methods that have been ever touched in runtime") \ 2600 \ 2601 develop(bool, TraceMethodReplacement, false, \ 2602 "Print when methods are replaced do to recompilation") \ 2603 \ 2604 develop(bool, PrintMethodFlushing, false, \ 2605 "Print the nmethods being flushed") \ 2606 \ 2607 diagnostic(bool, PrintMethodFlushingStatistics, false, \ 2608 "print statistics about method flushing") \ 2609 \ 2610 diagnostic(intx, HotMethodDetectionLimit, 100000, \ 2611 "Number of compiled code invocations after which " \ 2612 "the method is considered as hot by the flusher") \ 2613 range(1, max_jint) \ 2614 \ 2615 diagnostic(intx, MinPassesBeforeFlush, 10, \ 2616 "Minimum number of sweeper passes before an nmethod " \ 2617 "can be flushed") \ 2618 range(0, max_intx) \ 2619 \ 2620 product(bool, UseCodeAging, true, \ 2621 "Insert counter to detect warm methods") \ 2622 \ 2623 diagnostic(bool, StressCodeAging, false, \ 2624 "Start with counters compiled in") \ 2625 \ 2626 develop(bool, UseRelocIndex, false, \ 2627 "Use an index to speed random access to relocations") \ 2628 \ 2629 develop(bool, StressCodeBuffers, false, \ 2630 "Exercise code buffer expansion and other rare state changes") \ 2631 \ 2632 diagnostic(bool, DebugNonSafepoints, trueInDebug, \ 2633 "Generate extra debugging information for non-safepoints in " \ 2634 "nmethods") \ 2635 \ 2636 product(bool, PrintVMOptions, false, \ 2637 "Print flags that appeared on the command line") \ 2638 \ 2639 product(bool, IgnoreUnrecognizedVMOptions, false, \ 2640 "Ignore unrecognized VM options") \ 2641 \ 2642 product(bool, PrintCommandLineFlags, false, \ 2643 "Print flags specified on command line or set by ergonomics") \ 2644 \ 2645 product(bool, PrintFlagsInitial, false, \ 2646 "Print all VM flags before argument processing and exit VM") \ 2647 \ 2648 product(bool, PrintFlagsFinal, false, \ 2649 "Print all VM flags after argument and ergonomic processing") \ 2650 \ 2651 notproduct(bool, PrintFlagsWithComments, false, \ 2652 "Print all VM flags with default values and descriptions and " \ 2653 "exit") \ 2654 \ 2655 product(bool, PrintFlagsRanges, false, \ 2656 "Print VM flags and their ranges and exit VM") \ 2657 \ 2658 diagnostic(bool, SerializeVMOutput, true, \ 2659 "Use a mutex to serialize output to tty and LogFile") \ 2660 \ 2661 diagnostic(bool, DisplayVMOutput, true, \ 2662 "Display all VM output on the tty, independently of LogVMOutput") \ 2663 \ 2664 diagnostic(bool, LogVMOutput, false, \ 2665 "Save VM output to LogFile") \ 2666 \ 2667 diagnostic(ccstr, LogFile, NULL, \ 2668 "If LogVMOutput or LogCompilation is on, save VM output to " \ 2669 "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)")\ 2670 \ 2671 product(ccstr, ErrorFile, NULL, \ 2672 "If an error occurs, save the error data to this file " \ 2673 "[default: ./hs_err_pid%p.log] (%p replaced with pid)") \ 2674 \ 2675 product(bool, DisplayVMOutputToStderr, false, \ 2676 "If DisplayVMOutput is true, display all VM output to stderr") \ 2677 \ 2678 product(bool, DisplayVMOutputToStdout, false, \ 2679 "If DisplayVMOutput is true, display all VM output to stdout") \ 2680 \ 2681 product(bool, UseHeavyMonitors, false, \ 2682 "use heavyweight instead of lightweight Java monitors") \ 2683 \ 2684 product(bool, PrintStringTableStatistics, false, \ 2685 "print statistics about the StringTable and SymbolTable") \ 2686 \ 2687 diagnostic(bool, VerifyStringTableAtExit, false, \ 2688 "verify StringTable contents at exit") \ 2689 \ 2690 notproduct(bool, PrintSymbolTableSizeHistogram, false, \ 2691 "print histogram of the symbol table") \ 2692 \ 2693 notproduct(bool, ExitVMOnVerifyError, false, \ 2694 "standard exit from VM if bytecode verify error " \ 2695 "(only in debug mode)") \ 2696 \ 2697 diagnostic(ccstr, AbortVMOnException, NULL, \ 2698 "Call fatal if this exception is thrown. Example: " \ 2699 "java -XX:AbortVMOnException=java.lang.NullPointerException Foo") \ 2700 \ 2701 diagnostic(ccstr, AbortVMOnExceptionMessage, NULL, \ 2702 "Call fatal if the exception pointed by AbortVMOnException " \ 2703 "has this message") \ 2704 \ 2705 develop(bool, DebugVtables, false, \ 2706 "add debugging code to vtable dispatch") \ 2707 \ 2708 notproduct(bool, PrintVtableStats, false, \ 2709 "print vtables stats at end of run") \ 2710 \ 2711 develop(bool, TraceCreateZombies, false, \ 2712 "trace creation of zombie nmethods") \ 2713 \ 2714 notproduct(bool, IgnoreLockingAssertions, false, \ 2715 "disable locking assertions (for speed)") \ 2716 \ 2717 product(bool, RangeCheckElimination, true, \ 2718 "Eliminate range checks") \ 2719 \ 2720 develop_pd(bool, UncommonNullCast, \ 2721 "track occurrences of null in casts; adjust compiler tactics") \ 2722 \ 2723 develop(bool, TypeProfileCasts, true, \ 2724 "treat casts like calls for purposes of type profiling") \ 2725 \ 2726 develop(bool, DelayCompilationDuringStartup, true, \ 2727 "Delay invoking the compiler until main application class is " \ 2728 "loaded") \ 2729 \ 2730 develop(bool, CompileTheWorld, false, \ 2731 "Compile all methods in all classes in bootstrap class path " \ 2732 "(stress test)") \ 2733 \ 2734 develop(bool, CompileTheWorldPreloadClasses, true, \ 2735 "Preload all classes used by a class before start loading") \ 2736 \ 2737 notproduct(intx, CompileTheWorldSafepointInterval, 100, \ 2738 "Force a safepoint every n compiles so sweeper can keep up") \ 2739 \ 2740 develop(bool, FillDelaySlots, true, \ 2741 "Fill delay slots (on SPARC only)") \ 2742 \ 2743 develop(bool, TimeLivenessAnalysis, false, \ 2744 "Time computation of bytecode liveness analysis") \ 2745 \ 2746 develop(bool, TraceLivenessGen, false, \ 2747 "Trace the generation of liveness analysis information") \ 2748 \ 2749 notproduct(bool, TraceLivenessQuery, false, \ 2750 "Trace queries of liveness analysis information") \ 2751 \ 2752 notproduct(bool, CollectIndexSetStatistics, false, \ 2753 "Collect information about IndexSets") \ 2754 \ 2755 develop(bool, UseLoopSafepoints, true, \ 2756 "Generate Safepoint nodes in every loop") \ 2757 \ 2758 develop(intx, FastAllocateSizeLimit, 128*K, \ 2759 /* Note: This value is zero mod 1<<13 for a cheap sparc set. */ \ 2760 "Inline allocations larger than this in doublewords must go slow")\ 2761 \ 2762 product(bool, AggressiveOpts, false, \ 2763 "Enable aggressive optimizations - see arguments.cpp") \ 2764 \ 2765 product_pd(bool, CompactStrings, \ 2766 "Enable Strings to use single byte chars in backing store") \ 2767 \ 2768 product_pd(uintx, TypeProfileLevel, \ 2769 "=XYZ, with Z: Type profiling of arguments at call; " \ 2770 "Y: Type profiling of return value at call; " \ 2771 "X: Type profiling of parameters to methods; " \ 2772 "X, Y and Z in 0=off ; 1=jsr292 only; 2=all methods") \ 2773 constraint(TypeProfileLevelConstraintFunc, AfterErgo) \ 2774 \ 2775 product(intx, TypeProfileArgsLimit, 2, \ 2776 "max number of call arguments to consider for type profiling") \ 2777 range(0, 16) \ 2778 \ 2779 product(intx, TypeProfileParmsLimit, 2, \ 2780 "max number of incoming parameters to consider for type profiling"\ 2781 ", -1 for all") \ 2782 range(-1, 64) \ 2783 \ 2784 /* statistics */ \ 2785 develop(bool, CountCompiledCalls, false, \ 2786 "Count method invocations") \ 2787 \ 2788 notproduct(bool, CountRuntimeCalls, false, \ 2789 "Count VM runtime calls") \ 2790 \ 2791 develop(bool, CountJNICalls, false, \ 2792 "Count jni method invocations") \ 2793 \ 2794 notproduct(bool, CountJVMCalls, false, \ 2795 "Count jvm method invocations") \ 2796 \ 2797 notproduct(bool, CountRemovableExceptions, false, \ 2798 "Count exceptions that could be replaced by branches due to " \ 2799 "inlining") \ 2800 \ 2801 notproduct(bool, ICMissHistogram, false, \ 2802 "Produce histogram of IC misses") \ 2803 \ 2804 /* interpreter */ \ 2805 develop(bool, ClearInterpreterLocals, false, \ 2806 "Always clear local variables of interpreter activations upon " \ 2807 "entry") \ 2808 \ 2809 product_pd(bool, RewriteBytecodes, \ 2810 "Allow rewriting of bytecodes (bytecodes are not immutable)") \ 2811 \ 2812 product_pd(bool, RewriteFrequentPairs, \ 2813 "Rewrite frequently used bytecode pairs into a single bytecode") \ 2814 \ 2815 diagnostic(bool, PrintInterpreter, false, \ 2816 "Print the generated interpreter code") \ 2817 \ 2818 product(bool, UseInterpreter, true, \ 2819 "Use interpreter for non-compiled methods") \ 2820 \ 2821 develop(bool, UseFastSignatureHandlers, true, \ 2822 "Use fast signature handlers for native calls") \ 2823 \ 2824 product(bool, UseLoopCounter, true, \ 2825 "Increment invocation counter on backward branch") \ 2826 \ 2827 product_pd(bool, UseOnStackReplacement, \ 2828 "Use on stack replacement, calls runtime if invoc. counter " \ 2829 "overflows in loop") \ 2830 \ 2831 notproduct(bool, TraceOnStackReplacement, false, \ 2832 "Trace on stack replacement") \ 2833 \ 2834 product_pd(bool, PreferInterpreterNativeStubs, \ 2835 "Use always interpreter stubs for native methods invoked via " \ 2836 "interpreter") \ 2837 \ 2838 develop(bool, CountBytecodes, false, \ 2839 "Count number of bytecodes executed") \ 2840 \ 2841 develop(bool, PrintBytecodeHistogram, false, \ 2842 "Print histogram of the executed bytecodes") \ 2843 \ 2844 develop(bool, PrintBytecodePairHistogram, false, \ 2845 "Print histogram of the executed bytecode pairs") \ 2846 \ 2847 diagnostic(bool, PrintSignatureHandlers, false, \ 2848 "Print code generated for native method signature handlers") \ 2849 \ 2850 develop(bool, VerifyOops, false, \ 2851 "Do plausibility checks for oops") \ 2852 \ 2853 develop(bool, CheckUnhandledOops, false, \ 2854 "Check for unhandled oops in VM code") \ 2855 \ 2856 develop(bool, VerifyJNIFields, trueInDebug, \ 2857 "Verify jfieldIDs for instance fields") \ 2858 \ 2859 notproduct(bool, VerifyJNIEnvThread, false, \ 2860 "Verify JNIEnv.thread == Thread::current() when entering VM " \ 2861 "from JNI") \ 2862 \ 2863 develop(bool, VerifyFPU, false, \ 2864 "Verify FPU state (check for NaN's, etc.)") \ 2865 \ 2866 develop(bool, VerifyThread, false, \ 2867 "Watch the thread register for corruption (SPARC only)") \ 2868 \ 2869 develop(bool, VerifyActivationFrameSize, false, \ 2870 "Verify that activation frame didn't become smaller than its " \ 2871 "minimal size") \ 2872 \ 2873 develop(bool, TraceFrequencyInlining, false, \ 2874 "Trace frequency based inlining") \ 2875 \ 2876 develop_pd(bool, InlineIntrinsics, \ 2877 "Inline intrinsics that can be statically resolved") \ 2878 \ 2879 product_pd(bool, ProfileInterpreter, \ 2880 "Profile at the bytecode level during interpretation") \ 2881 \ 2882 develop(bool, TraceProfileInterpreter, false, \ 2883 "Trace profiling at the bytecode level during interpretation. " \ 2884 "This outputs the profiling information collected to improve " \ 2885 "jit compilation.") \ 2886 \ 2887 develop_pd(bool, ProfileTraps, \ 2888 "Profile deoptimization traps at the bytecode level") \ 2889 \ 2890 product(intx, ProfileMaturityPercentage, 20, \ 2891 "number of method invocations/branches (expressed as % of " \ 2892 "CompileThreshold) before using the method's profile") \ 2893 range(0, 100) \ 2894 \ 2895 diagnostic(bool, PrintMethodData, false, \ 2896 "Print the results of +ProfileInterpreter at end of run") \ 2897 \ 2898 develop(bool, VerifyDataPointer, trueInDebug, \ 2899 "Verify the method data pointer during interpreter profiling") \ 2900 \ 2901 develop(bool, VerifyCompiledCode, false, \ 2902 "Include miscellaneous runtime verifications in nmethod code; " \ 2903 "default off because it disturbs nmethod size heuristics") \ 2904 \ 2905 notproduct(bool, CrashGCForDumpingJavaThread, false, \ 2906 "Manually make GC thread crash then dump java stack trace; " \ 2907 "Test only") \ 2908 \ 2909 /* compilation */ \ 2910 product(bool, UseCompiler, true, \ 2911 "Use Just-In-Time compilation") \ 2912 \ 2913 develop(bool, TraceCompilationPolicy, false, \ 2914 "Trace compilation policy") \ 2915 \ 2916 develop(bool, TimeCompilationPolicy, false, \ 2917 "Time the compilation policy") \ 2918 \ 2919 product(bool, UseCounterDecay, true, \ 2920 "Adjust recompilation counters") \ 2921 \ 2922 develop(intx, CounterHalfLifeTime, 30, \ 2923 "Half-life time of invocation counters (in seconds)") \ 2924 \ 2925 develop(intx, CounterDecayMinIntervalLength, 500, \ 2926 "The minimum interval (in milliseconds) between invocation of " \ 2927 "CounterDecay") \ 2928 \ 2929 product(bool, AlwaysCompileLoopMethods, false, \ 2930 "When using recompilation, never interpret methods " \ 2931 "containing loops") \ 2932 \ 2933 product(bool, DontCompileHugeMethods, true, \ 2934 "Do not compile methods > HugeMethodLimit") \ 2935 \ 2936 /* Bytecode escape analysis estimation. */ \ 2937 product(bool, EstimateArgEscape, true, \ 2938 "Analyze bytecodes to estimate escape state of arguments") \ 2939 \ 2940 product(intx, BCEATraceLevel, 0, \ 2941 "How much tracing to do of bytecode escape analysis estimates " \ 2942 "(0-3)") \ 2943 range(0, 3) \ 2944 \ 2945 product(intx, MaxBCEAEstimateLevel, 5, \ 2946 "Maximum number of nested calls that are analyzed by BC EA") \ 2947 range(0, max_jint) \ 2948 \ 2949 product(intx, MaxBCEAEstimateSize, 150, \ 2950 "Maximum bytecode size of a method to be analyzed by BC EA") \ 2951 range(0, max_jint) \ 2952 \ 2953 product(intx, AllocatePrefetchStyle, 1, \ 2954 "0 = no prefetch, " \ 2955 "1 = prefetch instructions for each allocation, " \ 2956 "2 = use TLAB watermark to gate allocation prefetch, " \ 2957 "3 = use BIS instruction on Sparc for allocation prefetch") \ 2958 range(0, 3) \ 2959 \ 2960 product(intx, AllocatePrefetchDistance, -1, \ 2961 "Distance to prefetch ahead of allocation pointer. " \ 2962 "-1: use system-specific value (automatically determined") \ 2963 constraint(AllocatePrefetchDistanceConstraintFunc, AfterMemoryInit)\ 2964 \ 2965 product(intx, AllocatePrefetchLines, 3, \ 2966 "Number of lines to prefetch ahead of array allocation pointer") \ 2967 range(1, max_jint / 2) \ 2968 \ 2969 product(intx, AllocateInstancePrefetchLines, 1, \ 2970 "Number of lines to prefetch ahead of instance allocation " \ 2971 "pointer") \ 2972 range(1, max_jint / 2) \ 2973 \ 2974 product(intx, AllocatePrefetchStepSize, 16, \ 2975 "Step size in bytes of sequential prefetch instructions") \ 2976 range(1, max_jint) \ 2977 constraint(AllocatePrefetchStepSizeConstraintFunc,AfterMemoryInit)\ 2978 \ 2979 product(intx, AllocatePrefetchInstr, 0, \ 2980 "Prefetch instruction to prefetch ahead of allocation pointer") \ 2981 constraint(AllocatePrefetchInstrConstraintFunc, AfterErgo) \ 2982 \ 2983 /* deoptimization */ \ 2984 develop(bool, TraceDeoptimization, false, \ 2985 "Trace deoptimization") \ 2986 \ 2987 develop(bool, PrintDeoptimizationDetails, false, \ 2988 "Print more information about deoptimization") \ 2989 \ 2990 develop(bool, DebugDeoptimization, false, \ 2991 "Tracing various information while debugging deoptimization") \ 2992 \ 2993 product(intx, SelfDestructTimer, 0, \ 2994 "Will cause VM to terminate after a given time (in minutes) " \ 2995 "(0 means off)") \ 2996 range(0, max_intx) \ 2997 \ 2998 product(intx, MaxJavaStackTraceDepth, 1024, \ 2999 "The maximum number of lines in the stack trace for Java " \ 3000 "exceptions (0 means all)") \ 3001 range(0, max_jint/2) \ 3002 \ 3003 develop(bool, TraceStackWalk, false, \ 3004 "Trace stack walking") \ 3005 \ 3006 product(bool, MemberNameInStackFrame, true, \ 3007 "Use MemberName in StackFrame") \ 3008 \ 3009 /* notice: the max range value here is max_jint, not max_intx */ \ 3010 /* because of overflow issue */ \ 3011 NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000, \ 3012 "Guarantee a safepoint (at least) every so many milliseconds " \ 3013 "(0 means none)")) \ 3014 NOT_EMBEDDED(range(0, max_jint)) \ 3015 \ 3016 EMBEDDED_ONLY(product(intx, GuaranteedSafepointInterval, 0, \ 3017 "Guarantee a safepoint (at least) every so many milliseconds " \ 3018 "(0 means none)")) \ 3019 EMBEDDED_ONLY(range(0, max_jint)) \ 3020 \ 3021 product(intx, SafepointTimeoutDelay, 10000, \ 3022 "Delay in milliseconds for option SafepointTimeout") \ 3023 LP64_ONLY(range(0, max_intx/MICROUNITS)) \ 3024 NOT_LP64(range(0, max_intx)) \ 3025 \ 3026 product(intx, NmethodSweepActivity, 10, \ 3027 "Removes cold nmethods from code cache if > 0. Higher values " \ 3028 "result in more aggressive sweeping") \ 3029 range(0, 2000) \ 3030 \ 3031 notproduct(bool, LogSweeper, false, \ 3032 "Keep a ring buffer of sweeper activity") \ 3033 \ 3034 notproduct(intx, SweeperLogEntries, 1024, \ 3035 "Number of records in the ring buffer of sweeper activity") \ 3036 \ 3037 notproduct(intx, MemProfilingInterval, 500, \ 3038 "Time between each invocation of the MemProfiler") \ 3039 \ 3040 develop(intx, MallocCatchPtr, -1, \ 3041 "Hit breakpoint when mallocing/freeing this pointer") \ 3042 \ 3043 notproduct(ccstrlist, SuppressErrorAt, "", \ 3044 "List of assertions (file:line) to muzzle") \ 3045 \ 3046 notproduct(size_t, HandleAllocationLimit, 1024, \ 3047 "Threshold for HandleMark allocation when +TraceHandleAllocation "\ 3048 "is used") \ 3049 \ 3050 develop(size_t, TotalHandleAllocationLimit, 1024, \ 3051 "Threshold for total handle allocation when " \ 3052 "+TraceHandleAllocation is used") \ 3053 \ 3054 develop(intx, StackPrintLimit, 100, \ 3055 "number of stack frames to print in VM-level stack dump") \ 3056 \ 3057 notproduct(intx, MaxElementPrintSize, 256, \ 3058 "maximum number of elements to print") \ 3059 \ 3060 notproduct(intx, MaxSubklassPrintSize, 4, \ 3061 "maximum number of subklasses to print when printing klass") \ 3062 \ 3063 product(intx, MaxInlineLevel, 9, \ 3064 "maximum number of nested calls that are inlined") \ 3065 range(0, max_jint) \ 3066 \ 3067 product(intx, MaxRecursiveInlineLevel, 1, \ 3068 "maximum number of nested recursive calls that are inlined") \ 3069 range(0, max_jint) \ 3070 \ 3071 develop(intx, MaxForceInlineLevel, 100, \ 3072 "maximum number of nested calls that are forced for inlining " \ 3073 "(using CompileCommand or marked w/ @ForceInline)") \ 3074 range(0, max_jint) \ 3075 \ 3076 product_pd(intx, InlineSmallCode, \ 3077 "Only inline already compiled methods if their code size is " \ 3078 "less than this") \ 3079 range(0, max_jint) \ 3080 \ 3081 product(intx, MaxInlineSize, 35, \ 3082 "The maximum bytecode size of a method to be inlined") \ 3083 range(0, max_jint) \ 3084 \ 3085 product_pd(intx, FreqInlineSize, \ 3086 "The maximum bytecode size of a frequent method to be inlined") \ 3087 range(0, max_jint) \ 3088 \ 3089 product(intx, MaxTrivialSize, 6, \ 3090 "The maximum bytecode size of a trivial method to be inlined") \ 3091 range(0, max_jint) \ 3092 \ 3093 product(intx, MinInliningThreshold, 250, \ 3094 "The minimum invocation count a method needs to have to be " \ 3095 "inlined") \ 3096 range(0, max_jint) \ 3097 \ 3098 develop(intx, MethodHistogramCutoff, 100, \ 3099 "The cutoff value for method invocation histogram (+CountCalls)") \ 3100 \ 3101 develop(intx, ProfilerNumberOfInterpretedMethods, 25, \ 3102 "Number of interpreted methods to show in profile") \ 3103 \ 3104 develop(intx, ProfilerNumberOfCompiledMethods, 25, \ 3105 "Number of compiled methods to show in profile") \ 3106 \ 3107 develop(intx, ProfilerNumberOfStubMethods, 25, \ 3108 "Number of stub methods to show in profile") \ 3109 \ 3110 develop(intx, ProfilerNumberOfRuntimeStubNodes, 25, \ 3111 "Number of runtime stub nodes to show in profile") \ 3112 \ 3113 product(intx, ProfileIntervalsTicks, 100, \ 3114 "Number of ticks between printing of interval profile " \ 3115 "(+ProfileIntervals)") \ 3116 range(0, max_intx) \ 3117 \ 3118 notproduct(intx, ScavengeALotInterval, 1, \ 3119 "Interval between which scavenge will occur with +ScavengeALot") \ 3120 \ 3121 notproduct(intx, FullGCALotInterval, 1, \ 3122 "Interval between which full gc will occur with +FullGCALot") \ 3123 \ 3124 notproduct(intx, FullGCALotStart, 0, \ 3125 "For which invocation to start FullGCAlot") \ 3126 \ 3127 notproduct(intx, FullGCALotDummies, 32*K, \ 3128 "Dummy object allocated with +FullGCALot, forcing all objects " \ 3129 "to move") \ 3130 \ 3131 develop(intx, DontYieldALotInterval, 10, \ 3132 "Interval between which yields will be dropped (milliseconds)") \ 3133 \ 3134 develop(intx, MinSleepInterval, 1, \ 3135 "Minimum sleep() interval (milliseconds) when " \ 3136 "ConvertSleepToYield is off (used for Solaris)") \ 3137 \ 3138 develop(intx, ProfilerPCTickThreshold, 15, \ 3139 "Number of ticks in a PC buckets to be a hotspot") \ 3140 \ 3141 notproduct(intx, DeoptimizeALotInterval, 5, \ 3142 "Number of exits until DeoptimizeALot kicks in") \ 3143 \ 3144 notproduct(intx, ZombieALotInterval, 5, \ 3145 "Number of exits until ZombieALot kicks in") \ 3146 \ 3147 diagnostic(intx, MallocVerifyInterval, 0, \ 3148 "If non-zero, verify C heap after every N calls to " \ 3149 "malloc/realloc/free") \ 3150 range(0, max_intx) \ 3151 \ 3152 diagnostic(intx, MallocVerifyStart, 0, \ 3153 "If non-zero, start verifying C heap after Nth call to " \ 3154 "malloc/realloc/free") \ 3155 range(0, max_intx) \ 3156 \ 3157 diagnostic(uintx, MallocMaxTestWords, 0, \ 3158 "If non-zero, maximum number of words that malloc/realloc can " \ 3159 "allocate (for testing only)") \ 3160 range(0, max_uintx) \ 3161 \ 3162 product(intx, TypeProfileWidth, 2, \ 3163 "Number of receiver types to record in call/cast profile") \ 3164 range(0, 8) \ 3165 \ 3166 experimental(intx, MethodProfileWidth, 0, \ 3167 "Number of methods to record in call profile") \ 3168 \ 3169 develop(intx, BciProfileWidth, 2, \ 3170 "Number of return bci's to record in ret profile") \ 3171 \ 3172 product(intx, PerMethodRecompilationCutoff, 400, \ 3173 "After recompiling N times, stay in the interpreter (-1=>'Inf')") \ 3174 range(-1, max_intx) \ 3175 \ 3176 product(intx, PerBytecodeRecompilationCutoff, 200, \ 3177 "Per-BCI limit on repeated recompilation (-1=>'Inf')") \ 3178 range(-1, max_intx) \ 3179 \ 3180 product(intx, PerMethodTrapLimit, 100, \ 3181 "Limit on traps (of one kind) in a method (includes inlines)") \ 3182 range(0, max_jint) \ 3183 \ 3184 experimental(intx, PerMethodSpecTrapLimit, 5000, \ 3185 "Limit on speculative traps (of one kind) in a method " \ 3186 "(includes inlines)") \ 3187 range(0, max_jint) \ 3188 \ 3189 product(intx, PerBytecodeTrapLimit, 4, \ 3190 "Limit on traps (of one kind) at a particular BCI") \ 3191 range(0, max_jint) \ 3192 \ 3193 experimental(intx, SpecTrapLimitExtraEntries, 3, \ 3194 "Extra method data trap entries for speculation") \ 3195 \ 3196 develop(intx, InlineFrequencyRatio, 20, \ 3197 "Ratio of call site execution to caller method invocation") \ 3198 range(0, max_jint) \ 3199 \ 3200 develop_pd(intx, InlineFrequencyCount, \ 3201 "Count of call site execution necessary to trigger frequent " \ 3202 "inlining") \ 3203 range(0, max_jint) \ 3204 \ 3205 develop(intx, InlineThrowCount, 50, \ 3206 "Force inlining of interpreted methods that throw this often") \ 3207 range(0, max_jint) \ 3208 \ 3209 develop(intx, InlineThrowMaxSize, 200, \ 3210 "Force inlining of throwing methods smaller than this") \ 3211 range(0, max_jint) \ 3212 \ 3213 develop(intx, ProfilerNodeSize, 1024, \ 3214 "Size in K to allocate for the Profile Nodes of each thread") \ 3215 range(0, 1024) \ 3216 \ 3217 /* gc parameters */ \ 3218 product(size_t, InitialHeapSize, 0, \ 3219 "Initial heap size (in bytes); zero means use ergonomics") \ 3220 constraint(InitialHeapSizeConstraintFunc,AfterErgo) \ 3221 \ 3222 product(size_t, MaxHeapSize, ScaleForWordSize(96*M), \ 3223 "Maximum heap size (in bytes)") \ 3224 constraint(MaxHeapSizeConstraintFunc,AfterErgo) \ 3225 \ 3226 product(size_t, OldSize, ScaleForWordSize(4*M), \ 3227 "Initial tenured generation size (in bytes)") \ 3228 range(0, max_uintx) \ 3229 \ 3230 product(size_t, NewSize, ScaleForWordSize(1*M), \ 3231 "Initial new generation size (in bytes)") \ 3232 constraint(NewSizeConstraintFunc,AfterErgo) \ 3233 \ 3234 product(size_t, MaxNewSize, max_uintx, \ 3235 "Maximum new generation size (in bytes), max_uintx means set " \ 3236 "ergonomically") \ 3237 range(0, max_uintx) \ 3238 \ 3239 product_pd(size_t, HeapBaseMinAddress, \ 3240 "OS specific low limit for heap base address") \ 3241 constraint(HeapBaseMinAddressConstraintFunc,AfterErgo) \ 3242 \ 3243 product(size_t, PretenureSizeThreshold, 0, \ 3244 "Maximum size in bytes of objects allocated in DefNew " \ 3245 "generation; zero means no maximum") \ 3246 range(0, max_uintx) \ 3247 \ 3248 product(size_t, MinTLABSize, 2*K, \ 3249 "Minimum allowed TLAB size (in bytes)") \ 3250 range(1, max_uintx/2) \ 3251 constraint(MinTLABSizeConstraintFunc,AfterMemoryInit) \ 3252 \ 3253 product(size_t, TLABSize, 0, \ 3254 "Starting TLAB size (in bytes); zero means set ergonomically") \ 3255 constraint(TLABSizeConstraintFunc,AfterMemoryInit) \ 3256 \ 3257 product(size_t, YoungPLABSize, 4096, \ 3258 "Size of young gen promotion LAB's (in HeapWords)") \ 3259 constraint(YoungPLABSizeConstraintFunc,AfterMemoryInit) \ 3260 \ 3261 product(size_t, OldPLABSize, 1024, \ 3262 "Size of old gen promotion LAB's (in HeapWords), or Number " \ 3263 "of blocks to attempt to claim when refilling CMS LAB's") \ 3264 constraint(OldPLABSizeConstraintFunc,AfterMemoryInit) \ 3265 \ 3266 product(uintx, TLABAllocationWeight, 35, \ 3267 "Allocation averaging weight") \ 3268 range(0, 100) \ 3269 \ 3270 /* Limit the lower bound of this flag to 1 as it is used */ \ 3271 /* in a division expression. */ \ 3272 product(uintx, TLABWasteTargetPercent, 1, \ 3273 "Percentage of Eden that can be wasted") \ 3274 range(1, 100) \ 3275 \ 3276 product(uintx, TLABRefillWasteFraction, 64, \ 3277 "Maximum TLAB waste at a refill (internal fragmentation)") \ 3278 range(1, max_juint) \ 3279 \ 3280 product(uintx, TLABWasteIncrement, 4, \ 3281 "Increment allowed waste at slow allocation") \ 3282 range(0, max_jint) \ 3283 constraint(TLABWasteIncrementConstraintFunc,AfterMemoryInit) \ 3284 \ 3285 product(uintx, SurvivorRatio, 8, \ 3286 "Ratio of eden/survivor space size") \ 3287 range(1, max_uintx-2) \ 3288 constraint(SurvivorRatioConstraintFunc,AfterMemoryInit) \ 3289 \ 3290 product(uintx, NewRatio, 2, \ 3291 "Ratio of old/new generation sizes") \ 3292 range(0, max_uintx-1) \ 3293 \ 3294 product_pd(size_t, NewSizeThreadIncrease, \ 3295 "Additional size added to desired new generation size per " \ 3296 "non-daemon thread (in bytes)") \ 3297 range(0, max_uintx) \ 3298 \ 3299 product_pd(size_t, MetaspaceSize, \ 3300 "Initial size of Metaspaces (in bytes)") \ 3301 constraint(MetaspaceSizeConstraintFunc,AfterErgo) \ 3302 \ 3303 product(size_t, MaxMetaspaceSize, max_uintx, \ 3304 "Maximum size of Metaspaces (in bytes)") \ 3305 constraint(MaxMetaspaceSizeConstraintFunc,AfterErgo) \ 3306 \ 3307 product(size_t, CompressedClassSpaceSize, 1*G, \ 3308 "Maximum size of class area in Metaspace when compressed " \ 3309 "class pointers are used") \ 3310 range(1*M, 3*G) \ 3311 \ 3312 manageable(uintx, MinHeapFreeRatio, 40, \ 3313 "The minimum percentage of heap free after GC to avoid expansion."\ 3314 " For most GCs this applies to the old generation. In G1 and" \ 3315 " ParallelGC it applies to the whole heap.") \ 3316 range(0, 100) \ 3317 constraint(MinHeapFreeRatioConstraintFunc,AfterErgo) \ 3318 \ 3319 manageable(uintx, MaxHeapFreeRatio, 70, \ 3320 "The maximum percentage of heap free after GC to avoid shrinking."\ 3321 " For most GCs this applies to the old generation. In G1 and" \ 3322 " ParallelGC it applies to the whole heap.") \ 3323 range(0, 100) \ 3324 constraint(MaxHeapFreeRatioConstraintFunc,AfterErgo) \ 3325 \ 3326 product(intx, SoftRefLRUPolicyMSPerMB, 1000, \ 3327 "Number of milliseconds per MB of free space in the heap") \ 3328 range(0, max_intx) \ 3329 constraint(SoftRefLRUPolicyMSPerMBConstraintFunc,AfterMemoryInit) \ 3330 \ 3331 product(size_t, MinHeapDeltaBytes, ScaleForWordSize(128*K), \ 3332 "The minimum change in heap space due to GC (in bytes)") \ 3333 range(0, max_uintx) \ 3334 \ 3335 product(size_t, MinMetaspaceExpansion, ScaleForWordSize(256*K), \ 3336 "The minimum expansion of Metaspace (in bytes)") \ 3337 range(0, max_uintx) \ 3338 \ 3339 product(uintx, MaxMetaspaceFreeRatio, 70, \ 3340 "The maximum percentage of Metaspace free after GC to avoid " \ 3341 "shrinking") \ 3342 range(0, 100) \ 3343 constraint(MaxMetaspaceFreeRatioConstraintFunc,AfterErgo) \ 3344 \ 3345 product(uintx, MinMetaspaceFreeRatio, 40, \ 3346 "The minimum percentage of Metaspace free after GC to avoid " \ 3347 "expansion") \ 3348 range(0, 99) \ 3349 constraint(MinMetaspaceFreeRatioConstraintFunc,AfterErgo) \ 3350 \ 3351 product(size_t, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \ 3352 "The maximum expansion of Metaspace without full GC (in bytes)") \ 3353 range(0, max_uintx) \ 3354 \ 3355 product(uintx, QueuedAllocationWarningCount, 0, \ 3356 "Number of times an allocation that queues behind a GC " \ 3357 "will retry before printing a warning") \ 3358 range(0, max_uintx) \ 3359 \ 3360 diagnostic(uintx, VerifyGCStartAt, 0, \ 3361 "GC invoke count where +VerifyBefore/AfterGC kicks in") \ 3362 range(0, max_uintx) \ 3363 \ 3364 diagnostic(intx, VerifyGCLevel, 0, \ 3365 "Generation level at which to start +VerifyBefore/AfterGC") \ 3366 range(0, 1) \ 3367 \ 3368 product(uintx, MaxTenuringThreshold, 15, \ 3369 "Maximum value for tenuring threshold") \ 3370 range(0, markOopDesc::max_age + 1) \ 3371 constraint(MaxTenuringThresholdConstraintFunc,AfterErgo) \ 3372 \ 3373 product(uintx, InitialTenuringThreshold, 7, \ 3374 "Initial value for tenuring threshold") \ 3375 range(0, markOopDesc::max_age + 1) \ 3376 constraint(InitialTenuringThresholdConstraintFunc,AfterErgo) \ 3377 \ 3378 product(uintx, TargetSurvivorRatio, 50, \ 3379 "Desired percentage of survivor space used after scavenge") \ 3380 range(0, 100) \ 3381 \ 3382 product(uintx, MarkSweepDeadRatio, 5, \ 3383 "Percentage (0-100) of the old gen allowed as dead wood. " \ 3384 "Serial mark sweep treats this as both the minimum and maximum " \ 3385 "value. " \ 3386 "CMS uses this value only if it falls back to mark sweep. " \ 3387 "Par compact uses a variable scale based on the density of the " \ 3388 "generation and treats this as the maximum value when the heap " \ 3389 "is either completely full or completely empty. Par compact " \ 3390 "also has a smaller default value; see arguments.cpp.") \ 3391 range(0, 100) \ 3392 \ 3393 product(uint, MarkSweepAlwaysCompactCount, 4, \ 3394 "How often should we fully compact the heap (ignoring the dead " \ 3395 "space parameters)") \ 3396 range(1, max_juint) \ 3397 \ 3398 develop(uintx, GCExpandToAllocateDelayMillis, 0, \ 3399 "Delay between expansion and allocation (in milliseconds)") \ 3400 \ 3401 develop(uintx, GCWorkerDelayMillis, 0, \ 3402 "Delay in scheduling GC workers (in milliseconds)") \ 3403 \ 3404 product(intx, DeferThrSuspendLoopCount, 4000, \ 3405 "(Unstable) Number of times to iterate in safepoint loop " \ 3406 "before blocking VM threads ") \ 3407 range(-1, max_jint-1) \ 3408 \ 3409 product(intx, DeferPollingPageLoopCount, -1, \ 3410 "(Unsafe,Unstable) Number of iterations in safepoint loop " \ 3411 "before changing safepoint polling page to RO ") \ 3412 range(-1, max_jint-1) \ 3413 \ 3414 product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \ 3415 range(0, max_intx) \ 3416 \ 3417 product(bool, PSChunkLargeArrays, true, \ 3418 "Process large arrays in chunks") \ 3419 \ 3420 product(uintx, GCDrainStackTargetSize, 64, \ 3421 "Number of entries we will try to leave on the stack " \ 3422 "during parallel gc") \ 3423 range(0, max_juint) \ 3424 \ 3425 /* stack parameters */ \ 3426 product_pd(intx, StackYellowPages, \ 3427 "Number of yellow zone (recoverable overflows) pages of size " \ 3428 "4KB. If pages are bigger yellow zone is aligned up.") \ 3429 range(MIN_STACK_YELLOW_PAGES, (DEFAULT_STACK_YELLOW_PAGES+5)) \ 3430 \ 3431 product_pd(intx, StackRedPages, \ 3432 "Number of red zone (unrecoverable overflows) pages of size " \ 3433 "4KB. If pages are bigger red zone is aligned up.") \ 3434 range(MIN_STACK_RED_PAGES, (DEFAULT_STACK_RED_PAGES+2)) \ 3435 \ 3436 product_pd(intx, StackReservedPages, \ 3437 "Number of reserved zone (reserved to annotated methods) pages" \ 3438 " of size 4KB. If pages are bigger reserved zone is aligned up.") \ 3439 range(MIN_STACK_RESERVED_PAGES, (DEFAULT_STACK_RESERVED_PAGES+10))\ 3440 \ 3441 product(bool, RestrictReservedStack, true, \ 3442 "Restrict @ReservedStackAccess to trusted classes") \ 3443 \ 3444 /* greater stack shadow pages can't generate instruction to bang stack */ \ 3445 product_pd(intx, StackShadowPages, \ 3446 "Number of shadow zone (for overflow checking) pages of size " \ 3447 "4KB. If pages are bigger shadow zone is aligned up. " \ 3448 "This should exceed the depth of the VM and native call stack.") \ 3449 range(MIN_STACK_SHADOW_PAGES, (DEFAULT_STACK_SHADOW_PAGES+30)) \ 3450 \ 3451 product_pd(intx, ThreadStackSize, \ 3452 "Thread Stack Size (in Kbytes)") \ 3453 range(0, (max_intx-os::vm_page_size())/(1 * K)) \ 3454 \ 3455 product_pd(intx, VMThreadStackSize, \ 3456 "Non-Java Thread Stack Size (in Kbytes)") \ 3457 range(0, max_intx/(1 * K)) \ 3458 \ 3459 product_pd(intx, CompilerThreadStackSize, \ 3460 "Compiler Thread Stack Size (in Kbytes)") \ 3461 range(0, max_intx/(1 * K)) \ 3462 \ 3463 develop_pd(size_t, JVMInvokeMethodSlack, \ 3464 "Stack space (bytes) required for JVM_InvokeMethod to complete") \ 3465 \ 3466 /* code cache parameters */ \ 3467 /* ppc64/tiered compilation has large code-entry alignment. */ \ 3468 develop(uintx, CodeCacheSegmentSize, \ 3469 64 PPC64_ONLY(+64) NOT_PPC64(TIERED_ONLY(+64)), \ 3470 "Code cache segment size (in bytes) - smallest unit of " \ 3471 "allocation") \ 3472 range(1, 1024) \ 3473 constraint(CodeCacheSegmentSizeConstraintFunc, AfterErgo) \ 3474 \ 3475 develop_pd(intx, CodeEntryAlignment, \ 3476 "Code entry alignment for generated code (in bytes)") \ 3477 constraint(CodeEntryAlignmentConstraintFunc, AfterErgo) \ 3478 \ 3479 product_pd(intx, OptoLoopAlignment, \ 3480 "Align inner loops to zero relative to this modulus") \ 3481 range(1, 16) \ 3482 constraint(OptoLoopAlignmentConstraintFunc, AfterErgo) \ 3483 \ 3484 product_pd(uintx, InitialCodeCacheSize, \ 3485 "Initial code cache size (in bytes)") \ 3486 range(0, max_uintx) \ 3487 \ 3488 develop_pd(uintx, CodeCacheMinimumUseSpace, \ 3489 "Minimum code cache size (in bytes) required to start VM.") \ 3490 range(0, max_uintx) \ 3491 \ 3492 product(bool, SegmentedCodeCache, false, \ 3493 "Use a segmented code cache") \ 3494 \ 3495 product_pd(uintx, ReservedCodeCacheSize, \ 3496 "Reserved code cache size (in bytes) - maximum code cache size") \ 3497 range(0, max_uintx) \ 3498 \ 3499 product_pd(uintx, NonProfiledCodeHeapSize, \ 3500 "Size of code heap with non-profiled methods (in bytes)") \ 3501 range(0, max_uintx) \ 3502 \ 3503 product_pd(uintx, ProfiledCodeHeapSize, \ 3504 "Size of code heap with profiled methods (in bytes)") \ 3505 range(0, max_uintx) \ 3506 \ 3507 product_pd(uintx, NonNMethodCodeHeapSize, \ 3508 "Size of code heap with non-nmethods (in bytes)") \ 3509 range(0, max_uintx) \ 3510 \ 3511 product_pd(uintx, CodeCacheExpansionSize, \ 3512 "Code cache expansion size (in bytes)") \ 3513 range(0, max_uintx) \ 3514 \ 3515 develop_pd(uintx, CodeCacheMinBlockLength, \ 3516 "Minimum number of segments in a code cache block") \ 3517 range(1, 100) \ 3518 \ 3519 notproduct(bool, ExitOnFullCodeCache, false, \ 3520 "Exit the VM if we fill the code cache") \ 3521 \ 3522 product(bool, UseCodeCacheFlushing, true, \ 3523 "Remove cold/old nmethods from the code cache") \ 3524 \ 3525 product(uintx, StartAggressiveSweepingAt, 10, \ 3526 "Start aggressive sweeping if X[%] of the code cache is free." \ 3527 "Segmented code cache: X[%] of the non-profiled heap." \ 3528 "Non-segmented code cache: X[%] of the total code cache") \ 3529 range(0, 100) \ 3530 \ 3531 /* interpreter debugging */ \ 3532 develop(intx, BinarySwitchThreshold, 5, \ 3533 "Minimal number of lookupswitch entries for rewriting to binary " \ 3534 "switch") \ 3535 \ 3536 develop(intx, StopInterpreterAt, 0, \ 3537 "Stop interpreter execution at specified bytecode number") \ 3538 \ 3539 develop(intx, TraceBytecodesAt, 0, \ 3540 "Trace bytecodes starting with specified bytecode number") \ 3541 \ 3542 /* compiler interface */ \ 3543 develop(intx, CIStart, 0, \ 3544 "The id of the first compilation to permit") \ 3545 \ 3546 develop(intx, CIStop, max_jint, \ 3547 "The id of the last compilation to permit") \ 3548 \ 3549 develop(intx, CIStartOSR, 0, \ 3550 "The id of the first osr compilation to permit " \ 3551 "(CICountOSR must be on)") \ 3552 \ 3553 develop(intx, CIStopOSR, max_jint, \ 3554 "The id of the last osr compilation to permit " \ 3555 "(CICountOSR must be on)") \ 3556 \ 3557 develop(intx, CIBreakAtOSR, -1, \ 3558 "The id of osr compilation to break at") \ 3559 \ 3560 develop(intx, CIBreakAt, -1, \ 3561 "The id of compilation to break at") \ 3562 \ 3563 product(ccstrlist, CompileOnly, "", \ 3564 "List of methods (pkg/class.name) to restrict compilation to") \ 3565 \ 3566 product(ccstr, CompileCommandFile, NULL, \ 3567 "Read compiler commands from this file [.hotspot_compiler]") \ 3568 \ 3569 diagnostic(ccstr, CompilerDirectivesFile, NULL, \ 3570 "Read compiler directives from this file") \ 3571 \ 3572 product(ccstrlist, CompileCommand, "", \ 3573 "Prepend to .hotspot_compiler; e.g. log,java/lang/String.<init>") \ 3574 \ 3575 develop(bool, ReplayCompiles, false, \ 3576 "Enable replay of compilations from ReplayDataFile") \ 3577 \ 3578 product(ccstr, ReplayDataFile, NULL, \ 3579 "File containing compilation replay information" \ 3580 "[default: ./replay_pid%p.log] (%p replaced with pid)") \ 3581 \ 3582 product(ccstr, InlineDataFile, NULL, \ 3583 "File containing inlining replay information" \ 3584 "[default: ./inline_pid%p.log] (%p replaced with pid)") \ 3585 \ 3586 develop(intx, ReplaySuppressInitializers, 2, \ 3587 "Control handling of class initialization during replay: " \ 3588 "0 - don't do anything special; " \ 3589 "1 - treat all class initializers as empty; " \ 3590 "2 - treat class initializers for application classes as empty; " \ 3591 "3 - allow all class initializers to run during bootstrap but " \ 3592 " pretend they are empty after starting replay") \ 3593 range(0, 3) \ 3594 \ 3595 develop(bool, ReplayIgnoreInitErrors, false, \ 3596 "Ignore exceptions thrown during initialization for replay") \ 3597 \ 3598 product(bool, DumpReplayDataOnError, true, \ 3599 "Record replay data for crashing compiler threads") \ 3600 \ 3601 product(bool, CICompilerCountPerCPU, false, \ 3602 "1 compiler thread for log(N CPUs)") \ 3603 \ 3604 develop(intx, CIFireOOMAt, -1, \ 3605 "Fire OutOfMemoryErrors throughout CI for testing the compiler " \ 3606 "(non-negative value throws OOM after this many CI accesses " \ 3607 "in each compile)") \ 3608 notproduct(intx, CICrashAt, -1, \ 3609 "id of compilation to trigger assert in compiler thread for " \ 3610 "the purpose of testing, e.g. generation of replay data") \ 3611 notproduct(bool, CIObjectFactoryVerify, false, \ 3612 "enable potentially expensive verification in ciObjectFactory") \ 3613 \ 3614 /* Priorities */ \ 3615 product_pd(bool, UseThreadPriorities, "Use native thread priorities") \ 3616 \ 3617 product(intx, ThreadPriorityPolicy, 0, \ 3618 "0 : Normal. "\ 3619 " VM chooses priorities that are appropriate for normal "\ 3620 " applications. On Solaris NORM_PRIORITY and above are mapped "\ 3621 " to normal native priority. Java priorities below " \ 3622 " NORM_PRIORITY map to lower native priority values. On "\ 3623 " Windows applications are allowed to use higher native "\ 3624 " priorities. However, with ThreadPriorityPolicy=0, VM will "\ 3625 " not use the highest possible native priority, "\ 3626 " THREAD_PRIORITY_TIME_CRITICAL, as it may interfere with "\ 3627 " system threads. On Linux thread priorities are ignored "\ 3628 " because the OS does not support static priority in "\ 3629 " SCHED_OTHER scheduling class which is the only choice for "\ 3630 " non-root, non-realtime applications. "\ 3631 "1 : Aggressive. "\ 3632 " Java thread priorities map over to the entire range of "\ 3633 " native thread priorities. Higher Java thread priorities map "\ 3634 " to higher native thread priorities. This policy should be "\ 3635 " used with care, as sometimes it can cause performance "\ 3636 " degradation in the application and/or the entire system. On "\ 3637 " Linux this policy requires root privilege.") \ 3638 range(0, 1) \ 3639 \ 3640 product(bool, ThreadPriorityVerbose, false, \ 3641 "Print priority changes") \ 3642 \ 3643 product(intx, CompilerThreadPriority, -1, \ 3644 "The native priority at which compiler threads should run " \ 3645 "(-1 means no change)") \ 3646 range(min_jint, max_jint) \ 3647 constraint(CompilerThreadPriorityConstraintFunc, AfterErgo) \ 3648 \ 3649 product(intx, VMThreadPriority, -1, \ 3650 "The native priority at which the VM thread should run " \ 3651 "(-1 means no change)") \ 3652 range(-1, 127) \ 3653 \ 3654 product(bool, CompilerThreadHintNoPreempt, true, \ 3655 "(Solaris only) Give compiler threads an extra quanta") \ 3656 \ 3657 product(bool, VMThreadHintNoPreempt, false, \ 3658 "(Solaris only) Give VM thread an extra quanta") \ 3659 \ 3660 product(intx, JavaPriority1_To_OSPriority, -1, \ 3661 "Map Java priorities to OS priorities") \ 3662 range(-1, 127) \ 3663 \ 3664 product(intx, JavaPriority2_To_OSPriority, -1, \ 3665 "Map Java priorities to OS priorities") \ 3666 range(-1, 127) \ 3667 \ 3668 product(intx, JavaPriority3_To_OSPriority, -1, \ 3669 "Map Java priorities to OS priorities") \ 3670 range(-1, 127) \ 3671 \ 3672 product(intx, JavaPriority4_To_OSPriority, -1, \ 3673 "Map Java priorities to OS priorities") \ 3674 range(-1, 127) \ 3675 \ 3676 product(intx, JavaPriority5_To_OSPriority, -1, \ 3677 "Map Java priorities to OS priorities") \ 3678 range(-1, 127) \ 3679 \ 3680 product(intx, JavaPriority6_To_OSPriority, -1, \ 3681 "Map Java priorities to OS priorities") \ 3682 range(-1, 127) \ 3683 \ 3684 product(intx, JavaPriority7_To_OSPriority, -1, \ 3685 "Map Java priorities to OS priorities") \ 3686 range(-1, 127) \ 3687 \ 3688 product(intx, JavaPriority8_To_OSPriority, -1, \ 3689 "Map Java priorities to OS priorities") \ 3690 range(-1, 127) \ 3691 \ 3692 product(intx, JavaPriority9_To_OSPriority, -1, \ 3693 "Map Java priorities to OS priorities") \ 3694 range(-1, 127) \ 3695 \ 3696 product(intx, JavaPriority10_To_OSPriority,-1, \ 3697 "Map Java priorities to OS priorities") \ 3698 range(-1, 127) \ 3699 \ 3700 experimental(bool, UseCriticalJavaThreadPriority, false, \ 3701 "Java thread priority 10 maps to critical scheduling priority") \ 3702 \ 3703 experimental(bool, UseCriticalCompilerThreadPriority, false, \ 3704 "Compiler thread(s) run at critical scheduling priority") \ 3705 \ 3706 experimental(bool, UseCriticalCMSThreadPriority, false, \ 3707 "ConcurrentMarkSweep thread runs at critical scheduling priority")\ 3708 \ 3709 /* compiler debugging */ \ 3710 notproduct(intx, CompileTheWorldStartAt, 1, \ 3711 "First class to consider when using +CompileTheWorld") \ 3712 \ 3713 notproduct(intx, CompileTheWorldStopAt, max_jint, \ 3714 "Last class to consider when using +CompileTheWorld") \ 3715 \ 3716 develop(intx, NewCodeParameter, 0, \ 3717 "Testing Only: Create a dedicated integer parameter before " \ 3718 "putback") \ 3719 \ 3720 /* new oopmap storage allocation */ \ 3721 develop(intx, MinOopMapAllocation, 8, \ 3722 "Minimum number of OopMap entries in an OopMapSet") \ 3723 \ 3724 /* Background Compilation */ \ 3725 develop(intx, LongCompileThreshold, 50, \ 3726 "Used with +TraceLongCompiles") \ 3727 \ 3728 /* recompilation */ \ 3729 product_pd(intx, CompileThreshold, \ 3730 "number of interpreted method invocations before (re-)compiling") \ 3731 constraint(CompileThresholdConstraintFunc, AfterErgo) \ 3732 \ 3733 product(double, CompileThresholdScaling, 1.0, \ 3734 "Factor to control when first compilation happens " \ 3735 "(both with and without tiered compilation): " \ 3736 "values greater than 1.0 delay counter overflow, " \ 3737 "values between 0 and 1.0 rush counter overflow, " \ 3738 "value of 1.0 leaves compilation thresholds unchanged " \ 3739 "value of 0.0 is equivalent to -Xint. " \ 3740 "" \ 3741 "Flag can be set as per-method option. " \ 3742 "If a value is specified for a method, compilation thresholds " \ 3743 "for that method are scaled by both the value of the global flag "\ 3744 "and the value of the per-method flag.") \ 3745 range(0.0, DBL_MAX) \ 3746 \ 3747 product(intx, Tier0InvokeNotifyFreqLog, 7, \ 3748 "Interpreter (tier 0) invocation notification frequency") \ 3749 range(0, 30) \ 3750 \ 3751 product(intx, Tier2InvokeNotifyFreqLog, 11, \ 3752 "C1 without MDO (tier 2) invocation notification frequency") \ 3753 range(0, 30) \ 3754 \ 3755 product(intx, Tier3InvokeNotifyFreqLog, 10, \ 3756 "C1 with MDO profiling (tier 3) invocation notification " \ 3757 "frequency") \ 3758 range(0, 30) \ 3759 \ 3760 product(intx, Tier23InlineeNotifyFreqLog, 20, \ 3761 "Inlinee invocation (tiers 2 and 3) notification frequency") \ 3762 range(0, 30) \ 3763 \ 3764 product(intx, Tier0BackedgeNotifyFreqLog, 10, \ 3765 "Interpreter (tier 0) invocation notification frequency") \ 3766 range(0, 30) \ 3767 \ 3768 product(intx, Tier2BackedgeNotifyFreqLog, 14, \ 3769 "C1 without MDO (tier 2) invocation notification frequency") \ 3770 range(0, 30) \ 3771 \ 3772 product(intx, Tier3BackedgeNotifyFreqLog, 13, \ 3773 "C1 with MDO profiling (tier 3) invocation notification " \ 3774 "frequency") \ 3775 range(0, 30) \ 3776 \ 3777 product(intx, Tier2CompileThreshold, 0, \ 3778 "threshold at which tier 2 compilation is invoked") \ 3779 range(0, max_jint) \ 3780 \ 3781 product(intx, Tier2BackEdgeThreshold, 0, \ 3782 "Back edge threshold at which tier 2 compilation is invoked") \ 3783 range(0, max_jint) \ 3784 \ 3785 product(intx, Tier3InvocationThreshold, 200, \ 3786 "Compile if number of method invocations crosses this " \ 3787 "threshold") \ 3788 range(0, max_jint) \ 3789 \ 3790 product(intx, Tier3MinInvocationThreshold, 100, \ 3791 "Minimum invocation to compile at tier 3") \ 3792 range(0, max_jint) \ 3793 \ 3794 product(intx, Tier3CompileThreshold, 2000, \ 3795 "Threshold at which tier 3 compilation is invoked (invocation " \ 3796 "minimum must be satisfied)") \ 3797 range(0, max_jint) \ 3798 \ 3799 product(intx, Tier3BackEdgeThreshold, 60000, \ 3800 "Back edge threshold at which tier 3 OSR compilation is invoked") \ 3801 range(0, max_jint) \ 3802 \ 3803 product(intx, Tier4InvocationThreshold, 5000, \ 3804 "Compile if number of method invocations crosses this " \ 3805 "threshold") \ 3806 range(0, max_jint) \ 3807 \ 3808 product(intx, Tier4MinInvocationThreshold, 600, \ 3809 "Minimum invocation to compile at tier 4") \ 3810 range(0, max_jint) \ 3811 \ 3812 product(intx, Tier4CompileThreshold, 15000, \ 3813 "Threshold at which tier 4 compilation is invoked (invocation " \ 3814 "minimum must be satisfied") \ 3815 range(0, max_jint) \ 3816 \ 3817 product(intx, Tier4BackEdgeThreshold, 40000, \ 3818 "Back edge threshold at which tier 4 OSR compilation is invoked") \ 3819 range(0, max_jint) \ 3820 \ 3821 product(intx, Tier3DelayOn, 5, \ 3822 "If C2 queue size grows over this amount per compiler thread " \ 3823 "stop compiling at tier 3 and start compiling at tier 2") \ 3824 range(0, max_jint) \ 3825 \ 3826 product(intx, Tier3DelayOff, 2, \ 3827 "If C2 queue size is less than this amount per compiler thread " \ 3828 "allow methods compiled at tier 2 transition to tier 3") \ 3829 range(0, max_jint) \ 3830 \ 3831 product(intx, Tier3LoadFeedback, 5, \ 3832 "Tier 3 thresholds will increase twofold when C1 queue size " \ 3833 "reaches this amount per compiler thread") \ 3834 range(0, max_jint) \ 3835 \ 3836 product(intx, Tier4LoadFeedback, 3, \ 3837 "Tier 4 thresholds will increase twofold when C2 queue size " \ 3838 "reaches this amount per compiler thread") \ 3839 range(0, max_jint) \ 3840 \ 3841 product(intx, TieredCompileTaskTimeout, 50, \ 3842 "Kill compile task if method was not used within " \ 3843 "given timeout in milliseconds") \ 3844 range(0, max_intx) \ 3845 \ 3846 product(intx, TieredStopAtLevel, 4, \ 3847 "Stop at given compilation level") \ 3848 range(0, 4) \ 3849 \ 3850 product(intx, Tier0ProfilingStartPercentage, 200, \ 3851 "Start profiling in interpreter if the counters exceed tier 3 " \ 3852 "thresholds by the specified percentage") \ 3853 range(0, max_jint) \ 3854 \ 3855 product(uintx, IncreaseFirstTierCompileThresholdAt, 50, \ 3856 "Increase the compile threshold for C1 compilation if the code " \ 3857 "cache is filled by the specified percentage") \ 3858 range(0, 99) \ 3859 \ 3860 product(intx, TieredRateUpdateMinTime, 1, \ 3861 "Minimum rate sampling interval (in milliseconds)") \ 3862 range(0, max_intx) \ 3863 \ 3864 product(intx, TieredRateUpdateMaxTime, 25, \ 3865 "Maximum rate sampling interval (in milliseconds)") \ 3866 range(0, max_intx) \ 3867 \ 3868 product_pd(bool, TieredCompilation, \ 3869 "Enable tiered compilation") \ 3870 \ 3871 product(bool, PrintTieredEvents, false, \ 3872 "Print tiered events notifications") \ 3873 \ 3874 product_pd(intx, OnStackReplacePercentage, \ 3875 "NON_TIERED number of method invocations/branches (expressed as " \ 3876 "% of CompileThreshold) before (re-)compiling OSR code") \ 3877 constraint(OnStackReplacePercentageConstraintFunc, AfterErgo) \ 3878 \ 3879 product(intx, InterpreterProfilePercentage, 33, \ 3880 "NON_TIERED number of method invocations/branches (expressed as " \ 3881 "% of CompileThreshold) before profiling in the interpreter") \ 3882 range(0, 100) \ 3883 \ 3884 develop(intx, MaxRecompilationSearchLength, 10, \ 3885 "The maximum number of frames to inspect when searching for " \ 3886 "recompilee") \ 3887 \ 3888 develop(intx, MaxInterpretedSearchLength, 3, \ 3889 "The maximum number of interpreted frames to skip when searching "\ 3890 "for recompilee") \ 3891 \ 3892 develop(intx, DesiredMethodLimit, 8000, \ 3893 "The desired maximum method size (in bytecodes) after inlining") \ 3894 \ 3895 develop(intx, HugeMethodLimit, 8000, \ 3896 "Don't compile methods larger than this if " \ 3897 "+DontCompileHugeMethods") \ 3898 \ 3899 /* New JDK 1.4 reflection implementation */ \ 3900 \ 3901 develop(intx, FastSuperclassLimit, 8, \ 3902 "Depth of hardwired instanceof accelerator array") \ 3903 \ 3904 /* Properties for Java libraries */ \ 3905 \ 3906 product(size_t, MaxDirectMemorySize, 0, \ 3907 "Maximum total size of NIO direct-buffer allocations") \ 3908 range(0, (size_t)SIZE_MAX) \ 3909 \ 3910 /* Flags used for temporary code during development */ \ 3911 \ 3912 diagnostic(bool, UseNewCode, false, \ 3913 "Testing Only: Use the new version while testing") \ 3914 \ 3915 diagnostic(bool, UseNewCode2, false, \ 3916 "Testing Only: Use the new version while testing") \ 3917 \ 3918 diagnostic(bool, UseNewCode3, false, \ 3919 "Testing Only: Use the new version while testing") \ 3920 \ 3921 /* flags for performance data collection */ \ 3922 \ 3923 product(bool, UsePerfData, falseInEmbedded, \ 3924 "Flag to disable jvmstat instrumentation for performance testing "\ 3925 "and problem isolation purposes") \ 3926 \ 3927 product(bool, PerfDataSaveToFile, false, \ 3928 "Save PerfData memory to hsperfdata_<pid> file on exit") \ 3929 \ 3930 product(ccstr, PerfDataSaveFile, NULL, \ 3931 "Save PerfData memory to the specified absolute pathname. " \ 3932 "The string %p in the file name (if present) " \ 3933 "will be replaced by pid") \ 3934 \ 3935 product(intx, PerfDataSamplingInterval, 50, \ 3936 "Data sampling interval (in milliseconds)") \ 3937 range(PeriodicTask::min_interval, max_jint) \ 3938 constraint(PerfDataSamplingIntervalFunc, AfterErgo) \ 3939 \ 3940 develop(bool, PerfTraceDataCreation, false, \ 3941 "Trace creation of Performance Data Entries") \ 3942 \ 3943 develop(bool, PerfTraceMemOps, false, \ 3944 "Trace PerfMemory create/attach/detach calls") \ 3945 \ 3946 product(bool, PerfDisableSharedMem, false, \ 3947 "Store performance data in standard memory") \ 3948 \ 3949 product(intx, PerfDataMemorySize, 32*K, \ 3950 "Size of performance data memory region. Will be rounded " \ 3951 "up to a multiple of the native os page size.") \ 3952 range(128, 32*64*K) \ 3953 \ 3954 product(intx, PerfMaxStringConstLength, 1024, \ 3955 "Maximum PerfStringConstant string length before truncation") \ 3956 range(32, 32*K) \ 3957 \ 3958 product(bool, PerfAllowAtExitRegistration, false, \ 3959 "Allow registration of atexit() methods") \ 3960 \ 3961 product(bool, PerfBypassFileSystemCheck, false, \ 3962 "Bypass Win32 file system criteria checks (Windows Only)") \ 3963 \ 3964 product(intx, UnguardOnExecutionViolation, 0, \ 3965 "Unguard page and retry on no-execute fault (Win32 only) " \ 3966 "0=off, 1=conservative, 2=aggressive") \ 3967 range(0, 2) \ 3968 \ 3969 /* Serviceability Support */ \ 3970 \ 3971 product(bool, ManagementServer, false, \ 3972 "Create JMX Management Server") \ 3973 \ 3974 product(bool, DisableAttachMechanism, false, \ 3975 "Disable mechanism that allows tools to attach to this VM") \ 3976 \ 3977 product(bool, StartAttachListener, false, \ 3978 "Always start Attach Listener at VM startup") \ 3979 \ 3980 manageable(bool, PrintConcurrentLocks, false, \ 3981 "Print java.util.concurrent locks in thread dump") \ 3982 \ 3983 product(bool, TransmitErrorReport, false, \ 3984 "Enable error report transmission on erroneous termination") \ 3985 \ 3986 product(ccstr, ErrorReportServer, NULL, \ 3987 "Override built-in error report server address") \ 3988 \ 3989 /* Shared spaces */ \ 3990 \ 3991 product(bool, UseSharedSpaces, true, \ 3992 "Use shared spaces for metadata") \ 3993 \ 3994 product(bool, VerifySharedSpaces, false, \ 3995 "Verify shared spaces (false for default archive, true for " \ 3996 "archive specified by -XX:SharedArchiveFile)") \ 3997 \ 3998 product(bool, RequireSharedSpaces, false, \ 3999 "Require shared spaces for metadata") \ 4000 \ 4001 product(bool, DumpSharedSpaces, false, \ 4002 "Special mode: JVM reads a class list, loads classes, builds " \ 4003 "shared spaces, and dumps the shared spaces to a file to be " \ 4004 "used in future JVM runs") \ 4005 \ 4006 product(bool, PrintSharedSpaces, false, \ 4007 "Print usage of shared spaces") \ 4008 \ 4009 product(bool, PrintSharedArchiveAndExit, false, \ 4010 "Print shared archive file contents") \ 4011 \ 4012 product(bool, PrintSharedDictionary, false, \ 4013 "If PrintSharedArchiveAndExit is true, also print the shared " \ 4014 "dictionary") \ 4015 \ 4016 product(size_t, SharedReadWriteSize, DEFAULT_SHARED_READ_WRITE_SIZE, \ 4017 "Size of read-write space for metadata (in bytes)") \ 4018 range(MIN_SHARED_READ_WRITE_SIZE, MAX_SHARED_READ_WRITE_SIZE) \ 4019 \ 4020 product(size_t, SharedReadOnlySize, DEFAULT_SHARED_READ_ONLY_SIZE, \ 4021 "Size of read-only space for metadata (in bytes)") \ 4022 range(MIN_SHARED_READ_ONLY_SIZE, MAX_SHARED_READ_ONLY_SIZE) \ 4023 \ 4024 product(size_t, SharedMiscDataSize, DEFAULT_SHARED_MISC_DATA_SIZE, \ 4025 "Size of the shared miscellaneous data area (in bytes)") \ 4026 range(MIN_SHARED_MISC_DATA_SIZE, MAX_SHARED_MISC_DATA_SIZE) \ 4027 \ 4028 product(size_t, SharedMiscCodeSize, DEFAULT_SHARED_MISC_CODE_SIZE, \ 4029 "Size of the shared miscellaneous code area (in bytes)") \ 4030 range(MIN_SHARED_MISC_CODE_SIZE, MAX_SHARED_MISC_CODE_SIZE) \ 4031 \ 4032 product(size_t, SharedBaseAddress, LP64_ONLY(32*G) \ 4033 NOT_LP64(LINUX_ONLY(2*G) NOT_LINUX(0)), \ 4034 "Address to allocate shared memory region for class data") \ 4035 range(0, SIZE_MAX) \ 4036 \ 4037 product(uintx, SharedSymbolTableBucketSize, 4, \ 4038 "Average number of symbols per bucket in shared table") \ 4039 range(2, 246) \ 4040 \ 4041 diagnostic(bool, IgnoreUnverifiableClassesDuringDump, false, \ 4042 "Do not quit -Xshare:dump even if we encounter unverifiable " \ 4043 "classes. Just exclude them from the shared dictionary.") \ 4044 \ 4045 diagnostic(bool, PrintMethodHandleStubs, false, \ 4046 "Print generated stub code for method handles") \ 4047 \ 4048 develop(bool, TraceMethodHandles, false, \ 4049 "trace internal method handle operations") \ 4050 \ 4051 diagnostic(bool, VerifyMethodHandles, trueInDebug, \ 4052 "perform extra checks when constructing method handles") \ 4053 \ 4054 diagnostic(bool, ShowHiddenFrames, false, \ 4055 "show method handle implementation frames (usually hidden)") \ 4056 \ 4057 experimental(bool, TrustFinalNonStaticFields, false, \ 4058 "trust final non-static declarations for constant folding") \ 4059 \ 4060 diagnostic(bool, FoldStableValues, true, \ 4061 "Optimize loads from stable fields (marked w/ @Stable)") \ 4062 \ 4063 develop(bool, TraceInvokeDynamic, false, \ 4064 "trace internal invoke dynamic operations") \ 4065 \ 4066 diagnostic(bool, PauseAtStartup, false, \ 4067 "Causes the VM to pause at startup time and wait for the pause " \ 4068 "file to be removed (default: ./vm.paused.<pid>)") \ 4069 \ 4070 diagnostic(ccstr, PauseAtStartupFile, NULL, \ 4071 "The file to create and for whose removal to await when pausing " \ 4072 "at startup. (default: ./vm.paused.<pid>)") \ 4073 \ 4074 diagnostic(bool, PauseAtExit, false, \ 4075 "Pause and wait for keypress on exit if a debugger is attached") \ 4076 \ 4077 product(bool, ExtendedDTraceProbes, false, \ 4078 "Enable performance-impacting dtrace probes") \ 4079 \ 4080 product(bool, DTraceMethodProbes, false, \ 4081 "Enable dtrace probes for method-entry and method-exit") \ 4082 \ 4083 product(bool, DTraceAllocProbes, false, \ 4084 "Enable dtrace probes for object allocation") \ 4085 \ 4086 product(bool, DTraceMonitorProbes, false, \ 4087 "Enable dtrace probes for monitor events") \ 4088 \ 4089 product(bool, RelaxAccessControlCheck, false, \ 4090 "Relax the access control checks in the verifier") \ 4091 \ 4092 product(uintx, StringTableSize, defaultStringTableSize, \ 4093 "Number of buckets in the interned String table") \ 4094 range(minimumStringTableSize, 111*defaultStringTableSize) \ 4095 \ 4096 experimental(uintx, SymbolTableSize, defaultSymbolTableSize, \ 4097 "Number of buckets in the JVM internal Symbol table") \ 4098 range(minimumSymbolTableSize, 111*defaultSymbolTableSize) \ 4099 \ 4100 product(bool, UseStringDeduplication, false, \ 4101 "Use string deduplication") \ 4102 \ 4103 product(uintx, StringDeduplicationAgeThreshold, 3, \ 4104 "A string must reach this age (or be promoted to an old region) " \ 4105 "to be considered for deduplication") \ 4106 range(1, markOopDesc::max_age) \ 4107 \ 4108 diagnostic(bool, StringDeduplicationResizeALot, false, \ 4109 "Force table resize every time the table is scanned") \ 4110 \ 4111 diagnostic(bool, StringDeduplicationRehashALot, false, \ 4112 "Force table rehash every time the table is scanned") \ 4113 \ 4114 diagnostic(bool, WhiteBoxAPI, false, \ 4115 "Enable internal testing APIs") \ 4116 \ 4117 experimental(intx, SurvivorAlignmentInBytes, 0, \ 4118 "Default survivor space alignment in bytes") \ 4119 constraint(SurvivorAlignmentInBytesConstraintFunc,AfterErgo) \ 4120 \ 4121 product(bool , AllowNonVirtualCalls, false, \ 4122 "Obey the ACC_SUPER flag and allow invokenonvirtual calls") \ 4123 \ 4124 product(ccstr, DumpLoadedClassList, NULL, \ 4125 "Dump the names all loaded classes, that could be stored into " \ 4126 "the CDS archive, in the specified file") \ 4127 \ 4128 product(ccstr, SharedClassListFile, NULL, \ 4129 "Override the default CDS class list") \ 4130 \ 4131 diagnostic(ccstr, SharedArchiveFile, NULL, \ 4132 "Override the default location of the CDS archive file") \ 4133 \ 4134 product(ccstr, ExtraSharedClassListFile, NULL, \ 4135 "Extra classlist for building the CDS archive file") \ 4136 \ 4137 experimental(size_t, ArrayAllocatorMallocLimit, \ 4138 SOLARIS_ONLY(64*K) NOT_SOLARIS((size_t)-1), \ 4139 "Allocation less than this value will be allocated " \ 4140 "using malloc. Larger allocations will use mmap.") \ 4141 \ 4142 experimental(bool, AlwaysAtomicAccesses, false, \ 4143 "Accesses to all variables should always be atomic") \ 4144 \ 4145 product(bool, EnableTracing, false, \ 4146 "Enable event-based tracing") \ 4147 \ 4148 product(bool, UseLockedTracing, false, \ 4149 "Use locked-tracing when doing event-based tracing") \ 4150 \ 4151 diagnostic(bool, UseUnalignedAccesses, false, \ 4152 "Use unaligned memory accesses in Unsafe") \ 4153 \ 4154 product_pd(bool, PreserveFramePointer, \ 4155 "Use the FP register for holding the frame pointer " \ 4156 "and not as a general purpose register.") \ 4157 \ 4158 diagnostic(bool, CheckIntrinsics, true, \ 4159 "When a class C is loaded, check that " \ 4160 "(1) all intrinsics defined by the VM for class C are present "\ 4161 "in the loaded class file and are marked with the " \ 4162 "@HotSpotIntrinsicCandidate annotation, that " \ 4163 "(2) there is an intrinsic registered for all loaded methods " \ 4164 "that are annotated with the @HotSpotIntrinsicCandidate " \ 4165 "annotation, and that " \ 4166 "(3) no orphan methods exist for class C (i.e., methods for " \ 4167 "which the VM declares an intrinsic but that are not declared "\ 4168 "in the loaded class C. " \ 4169 "Check (3) is available only in debug builds.") \ 4170 \ 4171 diagnostic(bool, CompilerDirectivesIgnoreCompileCommands, false, \ 4172 "Disable backwards compatibility for compile commands.") \ 4173 \ 4174 diagnostic(bool, CompilerDirectivesPrint, false, \ 4175 "Print compiler directives on installation.") \ 4176 diagnostic(int, CompilerDirectivesLimit, 50, \ 4177 "Limit on number of compiler directives.") 4178 4179 4180 /* 4181 * Macros for factoring of globals 4182 */ 4183 4184 // Interface macros 4185 #define DECLARE_PRODUCT_FLAG(type, name, value, doc) extern "C" type name; 4186 #define DECLARE_PD_PRODUCT_FLAG(type, name, doc) extern "C" type name; 4187 #define DECLARE_DIAGNOSTIC_FLAG(type, name, value, doc) extern "C" type name; 4188 #define DECLARE_EXPERIMENTAL_FLAG(type, name, value, doc) extern "C" type name; 4189 #define DECLARE_MANAGEABLE_FLAG(type, name, value, doc) extern "C" type name; 4190 #define DECLARE_PRODUCT_RW_FLAG(type, name, value, doc) extern "C" type name; 4191 #ifdef PRODUCT 4192 #define DECLARE_DEVELOPER_FLAG(type, name, value, doc) const type name = value; 4193 #define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) const type name = pd_##name; 4194 #define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) const type name = value; 4195 #else 4196 #define DECLARE_DEVELOPER_FLAG(type, name, value, doc) extern "C" type name; 4197 #define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) extern "C" type name; 4198 #define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) extern "C" type name; 4199 #endif // PRODUCT 4200 // Special LP64 flags, product only needed for now. 4201 #ifdef _LP64 4202 #define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) extern "C" type name; 4203 #else 4204 #define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) const type name = value; 4205 #endif // _LP64 4206 4207 // Implementation macros 4208 #define MATERIALIZE_PRODUCT_FLAG(type, name, value, doc) type name = value; 4209 #define MATERIALIZE_PD_PRODUCT_FLAG(type, name, doc) type name = pd_##name; 4210 #define MATERIALIZE_DIAGNOSTIC_FLAG(type, name, value, doc) type name = value; 4211 #define MATERIALIZE_EXPERIMENTAL_FLAG(type, name, value, doc) type name = value; 4212 #define MATERIALIZE_MANAGEABLE_FLAG(type, name, value, doc) type name = value; 4213 #define MATERIALIZE_PRODUCT_RW_FLAG(type, name, value, doc) type name = value; 4214 #ifdef PRODUCT 4215 #define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) 4216 #define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) 4217 #define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) 4218 #else 4219 #define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type name = value; 4220 #define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type name = pd_##name; 4221 #define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type name = value; 4222 #endif // PRODUCT 4223 #ifdef _LP64 4224 #define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) type name = value; 4225 #else 4226 #define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) /* flag is constant */ 4227 #endif // _LP64 4228 4229 // Only materialize src code for range checking when required, ignore otherwise 4230 #define IGNORE_RANGE(a, b) 4231 // Only materialize src code for contraint checking when required, ignore otherwise 4232 #define IGNORE_CONSTRAINT(func,type) 4233 4234 RUNTIME_FLAGS(DECLARE_DEVELOPER_FLAG, \ 4235 DECLARE_PD_DEVELOPER_FLAG, \ 4236 DECLARE_PRODUCT_FLAG, \ 4237 DECLARE_PD_PRODUCT_FLAG, \ 4238 DECLARE_DIAGNOSTIC_FLAG, \ 4239 DECLARE_EXPERIMENTAL_FLAG, \ 4240 DECLARE_NOTPRODUCT_FLAG, \ 4241 DECLARE_MANAGEABLE_FLAG, \ 4242 DECLARE_PRODUCT_RW_FLAG, \ 4243 DECLARE_LP64_PRODUCT_FLAG, \ 4244 IGNORE_RANGE, \ 4245 IGNORE_CONSTRAINT) 4246 4247 RUNTIME_OS_FLAGS(DECLARE_DEVELOPER_FLAG, \ 4248 DECLARE_PD_DEVELOPER_FLAG, \ 4249 DECLARE_PRODUCT_FLAG, \ 4250 DECLARE_PD_PRODUCT_FLAG, \ 4251 DECLARE_DIAGNOSTIC_FLAG, \ 4252 DECLARE_NOTPRODUCT_FLAG, \ 4253 IGNORE_RANGE, \ 4254 IGNORE_CONSTRAINT) 4255 4256 ARCH_FLAGS(DECLARE_DEVELOPER_FLAG, \ 4257 DECLARE_PRODUCT_FLAG, \ 4258 DECLARE_DIAGNOSTIC_FLAG, \ 4259 DECLARE_EXPERIMENTAL_FLAG, \ 4260 DECLARE_NOTPRODUCT_FLAG, \ 4261 IGNORE_RANGE, \ 4262 IGNORE_CONSTRAINT) 4263 4264 // Extensions 4265 4266 #include "runtime/globals_ext.hpp" 4267 4268 #endif // SHARE_VM_RUNTIME_GLOBALS_HPP