241
242 // flag kind
243 KIND_PRODUCT = 1 << 4,
244 KIND_MANAGEABLE = 1 << 5,
245 KIND_DIAGNOSTIC = 1 << 6,
246 KIND_EXPERIMENTAL = 1 << 7,
247 KIND_NOT_PRODUCT = 1 << 8,
248 KIND_DEVELOP = 1 << 9,
249 KIND_PLATFORM_DEPENDENT = 1 << 10,
250 KIND_READ_WRITE = 1 << 11,
251 KIND_C1 = 1 << 12,
252 KIND_C2 = 1 << 13,
253 KIND_ARCH = 1 << 14,
254 KIND_SHARK = 1 << 15,
255 KIND_LP64_PRODUCT = 1 << 16,
256 KIND_COMMERCIAL = 1 << 17,
257
258 KIND_MASK = ~VALUE_ORIGIN_MASK
259 };
260
261 const char* _type;
262 const char* _name;
263 void* _addr;
264 NOT_PRODUCT(const char* _doc;)
265 Flags _flags;
266
267 // points to all Flags static array
268 static Flag* flags;
269
270 // number of flags
271 static size_t numFlags;
272
273 static Flag* find_flag(const char* name, size_t length, bool allow_locked = false, bool return_flag = false);
274 static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false);
275
276 void check_writable();
277
278 bool is_bool() const;
279 bool get_bool() const;
280 void set_bool(bool value);
281
282 bool is_int() const;
283 int get_int() const;
284 void set_int(int value);
285
286 bool is_uint() const;
287 uint get_uint() const;
288 void set_uint(uint value);
289
290 bool is_intx() const;
291 intx get_intx() const;
292 void set_intx(intx value);
328 bool is_read_write() const;
329 bool is_commercial() const;
330
331 bool is_constant_in_binary() const;
332
333 bool is_unlocker() const;
334 bool is_unlocked() const;
335 bool is_writeable() const;
336 bool is_external() const;
337
338 bool is_unlocker_ext() const;
339 bool is_unlocked_ext() const;
340 bool is_writeable_ext() const;
341 bool is_external_ext() const;
342
343 void unlock_diagnostic();
344
345 void get_locked_message(char*, int) const;
346 void get_locked_message_ext(char*, int) const;
347
348 void print_on(outputStream* st, bool withComments = false );
349 void print_kind(outputStream* st);
350 void print_as_flag(outputStream* st);
351 };
352
353 // debug flags control various aspects of the VM and are global accessible
354
355 // use FlagSetting to temporarily change some debug flag
356 // e.g. FlagSetting fs(DebugThisAndThat, true);
357 // restored to previous value upon leaving scope
358 class FlagSetting {
359 bool val;
360 bool* flag;
361 public:
362 FlagSetting(bool& fl, bool newValue) { flag = &fl; val = fl; fl = newValue; }
363 ~FlagSetting() { *flag = val; }
364 };
365
366
367 class CounterSetting {
368 intx* counter;
369 public:
370 CounterSetting(intx* cnt) { counter = cnt; (*counter)++; }
396 };
397
398 class DoubleFlagSetting {
399 double val;
400 double* flag;
401 public:
402 DoubleFlagSetting(double& fl, double newValue) { flag = &fl; val = fl; fl = newValue; }
403 ~DoubleFlagSetting() { *flag = val; }
404 };
405
406 class SizeTFlagSetting {
407 size_t val;
408 size_t* flag;
409 public:
410 SizeTFlagSetting(size_t& fl, size_t newValue) { flag = &fl; val = fl; fl = newValue; }
411 ~SizeTFlagSetting() { *flag = val; }
412 };
413
414
415 class CommandLineFlags {
416 public:
417 static bool boolAt(const char* name, size_t len, bool* value, bool allow_locked = false, bool return_flag = false);
418 static bool boolAt(const char* name, bool* value, bool allow_locked = false, bool return_flag = false) { return boolAt(name, strlen(name), value, allow_locked, return_flag); }
419 static bool boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin);
420 static bool boolAtPut(const char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); }
421
422 static bool intAt(const char* name, size_t len, int* value, bool allow_locked = false, bool return_flag = false);
423 static bool intAt(const char* name, int* value, bool allow_locked = false, bool return_flag = false) { return intAt(name, strlen(name), value, allow_locked, return_flag); }
424 static bool intAtPut(const char* name, size_t len, int* value, Flag::Flags origin);
425 static bool intAtPut(const char* name, int* value, Flag::Flags origin) { return intAtPut(name, strlen(name), value, origin); }
426
427 static bool uintAt(const char* name, size_t len, uint* value, bool allow_locked = false, bool return_flag = false);
428 static bool uintAt(const char* name, uint* value, bool allow_locked = false, bool return_flag = false) { return uintAt(name, strlen(name), value, allow_locked, return_flag); }
429 static bool uintAtPut(const char* name, size_t len, uint* value, Flag::Flags origin);
430 static bool uintAtPut(const char* name, uint* value, Flag::Flags origin) { return uintAtPut(name, strlen(name), value, origin); }
431
432 static bool intxAt(const char* name, size_t len, intx* value, bool allow_locked = false, bool return_flag = false);
433 static bool intxAt(const char* name, intx* value, bool allow_locked = false, bool return_flag = false) { return intxAt(name, strlen(name), value, allow_locked, return_flag); }
434 static bool intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin);
435 static bool intxAtPut(const char* name, intx* value, Flag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); }
436
437 static bool uintxAt(const char* name, size_t len, uintx* value, bool allow_locked = false, bool return_flag = false);
438 static bool uintxAt(const char* name, uintx* value, bool allow_locked = false, bool return_flag = false) { return uintxAt(name, strlen(name), value, allow_locked, return_flag); }
439 static bool uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin);
440 static bool uintxAtPut(const char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); }
441
442 static bool size_tAt(const char* name, size_t len, size_t* value, bool allow_locked = false, bool return_flag = false);
443 static bool size_tAt(const char* name, size_t* value, bool allow_locked = false, bool return_flag = false) { return size_tAt(name, strlen(name), value, allow_locked, return_flag); }
444 static bool size_tAtPut(const char* name, size_t len, size_t* value, Flag::Flags origin);
445 static bool size_tAtPut(const char* name, size_t* value, Flag::Flags origin) { return size_tAtPut(name, strlen(name), value, origin); }
446
447 static bool uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked = false, bool return_flag = false);
448 static bool uint64_tAt(const char* name, uint64_t* value, bool allow_locked = false, bool return_flag = false) { return uint64_tAt(name, strlen(name), value, allow_locked, return_flag); }
449 static bool uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin);
450 static bool uint64_tAtPut(const char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); }
451
452 static bool doubleAt(const char* name, size_t len, double* value, bool allow_locked = false, bool return_flag = false);
453 static bool doubleAt(const char* name, double* value, bool allow_locked = false, bool return_flag = false) { return doubleAt(name, strlen(name), value, allow_locked, return_flag); }
454 static bool doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin);
455 static bool doubleAtPut(const char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); }
456
457 static bool ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked = false, bool return_flag = false);
458 static bool ccstrAt(const char* name, ccstr* value, bool allow_locked = false, bool return_flag = false) { return ccstrAt(name, strlen(name), value, allow_locked, return_flag); }
459 // Contract: Flag will make private copy of the incoming value.
460 // Outgoing value is always malloc-ed, and caller MUST call free.
461 static bool ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin);
462 static bool ccstrAtPut(const char* name, ccstr* value, Flag::Flags origin) { return ccstrAtPut(name, strlen(name), value, origin); }
463
464 // Returns false if name is not a command line flag.
465 static bool wasSetOnCmdline(const char* name, bool* value);
466 static void printSetFlags(outputStream* out);
467
468 static void printFlags(outputStream* out, bool withComments);
469
470 static void verify() PRODUCT_RETURN;
471 };
472
473 // use this for flags that are true by default in the debug version but
474 // false in the optimized version, and vice versa
475 #ifdef ASSERT
476 #define trueInDebug true
477 #define falseInDebug false
478 #else
479 #define trueInDebug false
480 #define falseInDebug true
481 #endif
482
483 // use this for flags that are true per default in the product build
484 // but false in development builds, and vice versa
485 #ifdef PRODUCT
486 #define trueInProduct true
487 #define falseInProduct false
488 #else
542 // - the flag is defined in a CCC as an external exported interface.
543 // - the VM implementation supports dynamic setting of the flag.
544 // This implies that the VM must *always* query the flag variable
545 // and not reuse state related to the flag state at any given time.
546 // - you want the flag to be queried programmatically by the customers.
547 //
548 // product_rw flags are writeable internal product flags.
549 // They are like "manageable" flags but for internal/private use.
550 // The list of product_rw flags are internal/private flags which
551 // may be changed/removed in a future release. It can be set
552 // through the management interface to get/set value
553 // when the name of flag is supplied.
554 //
555 // A flag can be made as "product_rw" only if
556 // - the VM implementation supports dynamic setting of the flag.
557 // This implies that the VM must *always* query the flag variable
558 // and not reuse state related to the flag state at any given time.
559 //
560 // Note that when there is a need to support develop flags to be writeable,
561 // it can be done in the same way as product_rw.
562
563 #define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw, lp64_product) \
564 \
565 lp64_product(bool, UseCompressedOops, false, \
566 "Use 32-bit object references in 64-bit VM. " \
567 "lp64_product means flag is always constant in 32 bit VM") \
568 \
569 lp64_product(bool, UseCompressedClassPointers, false, \
570 "Use 32-bit class pointers in 64-bit VM. " \
571 "lp64_product means flag is always constant in 32 bit VM") \
572 \
573 notproduct(bool, CheckCompressedOops, true, \
574 "Generate checks in encoding/decoding code in debug VM") \
575 \
576 product_pd(size_t, HeapBaseMinAddress, \
577 "OS specific low limit for heap base address") \
578 \
579 product(uintx, HeapSearchSteps, 3 PPC64_ONLY(+17), \
580 "Heap allocation steps through preferred address regions to find" \
581 " where it can allocate the heap. Number of steps to take per " \
582 "region.") \
583 \
584 diagnostic(bool, PrintCompressedOopsMode, false, \
585 "Print compressed oops base address and encoding mode") \
586 \
587 lp64_product(intx, ObjectAlignmentInBytes, 8, \
588 "Default object alignment in bytes, 8 is minimum") \
589 \
590 product(bool, AssumeMP, false, \
591 "Instruct the VM to assume multiple processors are available") \
592 \
593 /* UseMembar is theoretically a temp flag used for memory barrier \
594 * removal testing. It was supposed to be removed before FCS but has \
595 * been re-added (see 6401008) */ \
596 product_pd(bool, UseMembar, \
597 "(Unstable) Issues membars on thread state transitions") \
598 \
599 develop(bool, CleanChunkPoolAsync, falseInEmbedded, \
600 "Clean the chunk pool asynchronously") \
601 \
602 experimental(bool, AlwaysSafeConstructors, false, \
603 "Force safe construction, as if all fields are final.") \
604 \
605 /* Temporary: See 6948537 */ \
606 experimental(bool, UseMemSetInBOT, true, \
607 "(Unstable) uses memset in BOT updates in GC code") \
608 \
609 diagnostic(bool, UnlockDiagnosticVMOptions, trueInDebug, \
610 "Enable normal processing of flags relating to field diagnostics")\
611 \
612 experimental(bool, UnlockExperimentalVMOptions, false, \
613 "Enable normal processing of flags relating to experimental " \
614 "features") \
615 \
632 \
633 develop(bool, TracePageSizes, false, \
634 "Trace page size selection and usage") \
635 \
636 product(bool, UseNUMA, false, \
637 "Use NUMA if available") \
638 \
639 product(bool, UseNUMAInterleaving, false, \
640 "Interleave memory across NUMA nodes if available") \
641 \
642 product(size_t, NUMAInterleaveGranularity, 2*M, \
643 "Granularity to use for NUMA interleaving on Windows OS") \
644 \
645 product(bool, ForceNUMA, false, \
646 "Force NUMA optimizations on single-node/UMA systems") \
647 \
648 product(uintx, NUMAChunkResizeWeight, 20, \
649 "Percentage (0-100) used to weight the current sample when " \
650 "computing exponentially decaying average for " \
651 "AdaptiveNUMAChunkSizing") \
652 \
653 product(size_t, NUMASpaceResizeRate, 1*G, \
654 "Do not reallocate more than this amount per collection") \
655 \
656 product(bool, UseAdaptiveNUMAChunkSizing, true, \
657 "Enable adaptive chunk sizing for NUMA") \
658 \
659 product(bool, NUMAStats, false, \
660 "Print NUMA stats in detailed heap information") \
661 \
662 product(uintx, NUMAPageScanRate, 256, \
663 "Maximum number of pages to include in the page scan procedure") \
664 \
665 product_pd(bool, NeedsDeoptSuspend, \
666 "True for register window machines (sparc/ia64)") \
667 \
668 product(intx, UseSSE, 99, \
669 "Highest supported SSE instructions set on x86/x64") \
670 \
671 product(bool, UseAES, false, \
842 product(intx, SuspendRetryCount, 50, \
843 "Maximum retry count for an external suspend request") \
844 \
845 product(intx, SuspendRetryDelay, 5, \
846 "Milliseconds to delay per retry (* current_retry_count)") \
847 \
848 product(bool, AssertOnSuspendWaitFailure, false, \
849 "Assert/Guarantee on external suspend wait failure") \
850 \
851 product(bool, TraceSuspendWaitFailures, false, \
852 "Trace external suspend wait failures") \
853 \
854 product(bool, MaxFDLimit, true, \
855 "Bump the number of file descriptors to maximum in Solaris") \
856 \
857 diagnostic(bool, LogEvents, true, \
858 "Enable the various ring buffer event logs") \
859 \
860 diagnostic(uintx, LogEventsBufferEntries, 10, \
861 "Number of ring buffer event logs") \
862 \
863 product(bool, BytecodeVerificationRemote, true, \
864 "Enable the Java bytecode verifier for remote classes") \
865 \
866 product(bool, BytecodeVerificationLocal, false, \
867 "Enable the Java bytecode verifier for local classes") \
868 \
869 develop(bool, ForceFloatExceptions, trueInDebug, \
870 "Force exceptions on FP stack under/overflow") \
871 \
872 develop(bool, VerifyStackAtCalls, false, \
873 "Verify that the stack pointer is unchanged after calls") \
874 \
875 develop(bool, TraceJavaAssertions, false, \
876 "Trace java language assertions") \
877 \
878 notproduct(bool, CheckAssertionStatusDirectives, false, \
879 "Temporary - see javaClasses.cpp") \
880 \
881 notproduct(bool, PrintMallocFree, false, \
1014 \
1015 product(ccstr, NativeMemoryTracking, "off", \
1016 "Native memory tracking options") \
1017 \
1018 diagnostic(bool, PrintNMTStatistics, false, \
1019 "Print native memory tracking summary data if it is on") \
1020 \
1021 diagnostic(bool, LogCompilation, false, \
1022 "Log compilation activity in detail to LogFile") \
1023 \
1024 product(bool, PrintCompilation, false, \
1025 "Print compilations") \
1026 \
1027 diagnostic(bool, TraceNMethodInstalls, false, \
1028 "Trace nmethod installation") \
1029 \
1030 diagnostic(intx, ScavengeRootsInCode, 2, \
1031 "0: do not allow scavengable oops in the code cache; " \
1032 "1: allow scavenging from the code cache; " \
1033 "2: emit as many constants as the compiler can see") \
1034 \
1035 product(bool, AlwaysRestoreFPU, false, \
1036 "Restore the FPU control word after every JNI call (expensive)") \
1037 \
1038 diagnostic(bool, PrintCompilation2, false, \
1039 "Print additional statistics per compilation") \
1040 \
1041 diagnostic(bool, PrintAdapterHandlers, false, \
1042 "Print code generated for i2c/c2i adapters") \
1043 \
1044 diagnostic(bool, VerifyAdapterCalls, trueInDebug, \
1045 "Verify that i2c/c2i adapters are called properly") \
1046 \
1047 develop(bool, VerifyAdapterSharing, false, \
1048 "Verify that the code for shared adapters is the equivalent") \
1049 \
1050 diagnostic(bool, PrintAssembly, false, \
1051 "Print assembly code (using external disassembler.so)") \
1052 \
1053 diagnostic(ccstr, PrintAssemblyOptions, NULL, \
1287 \
1288 product(bool, EagerXrunInit, false, \
1289 "Eagerly initialize -Xrun libraries; allows startup profiling, " \
1290 "but not all -Xrun libraries may support the state of the VM " \
1291 "at this time") \
1292 \
1293 product(bool, PreserveAllAnnotations, false, \
1294 "Preserve RuntimeInvisibleAnnotations as well " \
1295 "as RuntimeVisibleAnnotations") \
1296 \
1297 develop(uintx, PreallocatedOutOfMemoryErrorCount, 4, \
1298 "Number of OutOfMemoryErrors preallocated with backtrace") \
1299 \
1300 product(bool, LazyBootClassLoader, true, \
1301 "Enable/disable lazy opening of boot class path entries") \
1302 \
1303 product(bool, UseXMMForArrayCopy, false, \
1304 "Use SSE2 MOVQ instruction for Arraycopy") \
1305 \
1306 product(intx, FieldsAllocationStyle, 1, \
1307 "0 - type based with oops first, 1 - with oops last, " \
1308 "2 - oops in super and sub classes are together") \
1309 \
1310 product(bool, CompactFields, true, \
1311 "Allocate nonstatic fields in gaps between previous fields") \
1312 \
1313 notproduct(bool, PrintFieldLayout, false, \
1314 "Print field layout for each class") \
1315 \
1316 product(intx, ContendedPaddingWidth, 128, \
1317 "How many bytes to pad the fields/classes marked @Contended with")\
1318 \
1319 product(bool, EnableContended, true, \
1320 "Enable @Contended annotation support") \
1321 \
1322 product(bool, RestrictContended, true, \
1323 "Restrict @Contended to trusted classes") \
1324 \
1325 product(bool, UseBiasedLocking, true, \
1326 "Enable biased locking in JVM") \
1327 \
1328 product(intx, BiasedLockingStartupDelay, 4000, \
1329 "Number of milliseconds to wait before enabling biased locking") \
1330 \
1331 diagnostic(bool, PrintBiasedLockingStatistics, false, \
1332 "Print statistics of biased locking in JVM") \
1333 \
1334 product(intx, BiasedLockingBulkRebiasThreshold, 20, \
1335 "Threshold of number of revocations per type to try to " \
1336 "rebias all objects in the heap of that type") \
1337 \
1459 product(bool, UseParallelGC, false, \
1460 "Use the Parallel Scavenge garbage collector") \
1461 \
1462 product(bool, UseParallelOldGC, false, \
1463 "Use the Parallel Old garbage collector") \
1464 \
1465 product(uintx, HeapMaximumCompactionInterval, 20, \
1466 "How often should we maximally compact the heap (not allowing " \
1467 "any dead space)") \
1468 \
1469 product(uintx, HeapFirstMaximumCompactionCount, 3, \
1470 "The collection count for the first maximum compaction") \
1471 \
1472 product(bool, UseMaximumCompactionOnSystemGC, true, \
1473 "Use maximum compaction in the Parallel Old garbage collector " \
1474 "for a system GC") \
1475 \
1476 product(uintx, ParallelOldDeadWoodLimiterMean, 50, \
1477 "The mean used by the parallel compact dead wood " \
1478 "limiter (a number between 0-100)") \
1479 \
1480 product(uintx, ParallelOldDeadWoodLimiterStdDev, 80, \
1481 "The standard deviation used by the parallel compact dead wood " \
1482 "limiter (a number between 0-100)") \
1483 \
1484 product(uint, ParallelGCThreads, 0, \
1485 "Number of parallel threads parallel gc will use") \
1486 \
1487 product(bool, UseDynamicNumberOfGCThreads, false, \
1488 "Dynamically choose the number of parallel threads " \
1489 "parallel gc will use") \
1490 \
1491 diagnostic(bool, ForceDynamicNumberOfGCThreads, false, \
1492 "Force dynamic selection of the number of " \
1493 "parallel threads parallel gc will use to aid debugging") \
1494 \
1495 product(size_t, HeapSizePerGCThread, ScaleForWordSize(64*M), \
1496 "Size of heap (bytes) per GC thread used in calculating the " \
1497 "number of GC threads") \
1498 \
1499 product(bool, TraceDynamicGCThreads, false, \
1500 "Trace the dynamic GC thread usage") \
1501 \
1502 develop(bool, ParallelOldGCSplitALot, false, \
1503 "Provoke splitting (copying data from a young gen space to " \
1504 "multiple destination spaces)") \
1505 \
1506 develop(uintx, ParallelOldGCSplitInterval, 3, \
1507 "How often to provoke splitting a young gen space") \
1508 \
1509 product(uint, ConcGCThreads, 0, \
1510 "Number of threads concurrent gc will use") \
1511 \
1512 product(size_t, YoungPLABSize, 4096, \
1513 "Size of young gen promotion LAB's (in HeapWords)") \
1514 \
1515 product(size_t, OldPLABSize, 1024, \
1516 "Size of old gen promotion LAB's (in HeapWords), or Number \
1517 of blocks to attempt to claim when refilling CMS LAB's") \
1518 \
1519 product(uintx, GCTaskTimeStampEntries, 200, \
1520 "Number of time stamp entries per gc worker thread") \
1521 \
1522 product(bool, AlwaysTenure, false, \
1523 "Always tenure objects in eden (ParallelGC only)") \
1524 \
1525 product(bool, NeverTenure, false, \
1526 "Never tenure objects in eden, may tenure on overflow " \
1527 "(ParallelGC only)") \
1528 \
1529 product(bool, ScavengeBeforeFullGC, true, \
1530 "Scavenge youngest generation before each full GC.") \
1531 \
1532 develop(bool, ScavengeWithObjectsInToSpace, false, \
1533 "Allow scavenges to occur when to-space contains objects") \
1534 \
1535 product(bool, UseConcMarkSweepGC, false, \
1536 "Use Concurrent Mark-Sweep GC in the old generation") \
1537 \
1538 product(bool, ExplicitGCInvokesConcurrent, false, \
1539 "A System.gc() request invokes a concurrent collection; " \
1540 "(effective only when using concurrent collectors)") \
1541 \
1542 product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false, \
1543 "A System.gc() request invokes a concurrent collection and " \
1544 "also unloads classes during such a concurrent gc cycle " \
1545 "(effective only when UseConcMarkSweepGC)") \
1546 \
1547 product(bool, GCLockerInvokesConcurrent, false, \
1548 "The exit of a JNI critical section necessitating a scavenge, " \
1549 "also kicks off a background concurrent collection") \
1550 \
1551 product(uintx, GCLockerEdenExpansionPercent, 5, \
1552 "How much the GC can expand the eden by while the GC locker " \
1553 "is active (as a percentage)") \
1554 \
1555 diagnostic(uintx, GCLockerRetryAllocationCount, 2, \
1556 "Number of times to retry allocations when " \
1557 "blocked by the GC locker") \
1558 \
1559 develop(bool, UseCMSAdaptiveFreeLists, true, \
1560 "Use adaptive free lists in the CMS generation") \
1561 \
1562 develop(bool, UseAsyncConcMarkSweepGC, true, \
1563 "Use Asynchronous Concurrent Mark-Sweep GC in the old generation")\
1564 \
1565 product(bool, UseCMSBestFit, true, \
1566 "Use CMS best fit allocation strategy") \
1567 \
1568 product(bool, UseParNewGC, false, \
1569 "Use parallel threads in the new generation") \
1570 \
1571 product(bool, PrintTaskqueue, false, \
1572 "Print taskqueue statistics for parallel collectors") \
1573 \
1574 product(bool, PrintTerminationStats, false, \
1575 "Print termination statistics for parallel collectors") \
1576 \
1577 product(uintx, ParallelGCBufferWastePct, 10, \
1578 "Wasted fraction of parallel allocation buffer") \
1579 \
1580 product(uintx, TargetPLABWastePct, 10, \
1581 "Target wasted space in last buffer as percent of overall " \
1582 "allocation") \
1583 \
1584 product(uintx, PLABWeight, 75, \
1585 "Percentage (0-100) used to weight the current sample when " \
1586 "computing exponentially decaying average for ResizePLAB") \
1587 \
1588 product(bool, ResizePLAB, true, \
1589 "Dynamically resize (survivor space) promotion LAB's") \
1590 \
1591 product(bool, PrintPLAB, false, \
1592 "Print (survivor space) promotion LAB's sizing decisions") \
1593 \
1594 product(intx, ParGCArrayScanChunk, 50, \
1595 "Scan a subset of object array and push remainder, if array is " \
1596 "bigger than this") \
1597 \
1598 product(bool, ParGCUseLocalOverflow, false, \
1599 "Instead of a global overflow list, use local overflow stacks") \
1600 \
1601 product(bool, ParGCTrimOverflow, true, \
1602 "Eagerly trim the local overflow lists " \
1603 "(when ParGCUseLocalOverflow)") \
1604 \
1605 notproduct(bool, ParGCWorkQueueOverflowALot, false, \
1606 "Simulate work queue overflow in ParNew") \
1607 \
1608 notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000, \
1609 "An `interval' counter that determines how frequently " \
1610 "we simulate overflow; a smaller number increases frequency") \
1611 \
1612 product(uintx, ParGCDesiredObjsFromOverflowList, 20, \
1613 "The desired number of objects to claim from the overflow list") \
1614 \
1615 diagnostic(uintx, ParGCStridesPerThread, 2, \
1616 "The number of strides per worker thread that we divide up the " \
1617 "card table scanning work into") \
1618 \
1619 diagnostic(intx, ParGCCardsPerStrideChunk, 256, \
1620 "The number of cards in each chunk of the parallel chunks used " \
1621 "during card table scanning") \
1622 \
1623 product(uintx, OldPLABWeight, 50, \
1624 "Percentage (0-100) used to weight the current sample when " \
1625 "computing exponentially decaying average for resizing " \
1626 "OldPLABSize") \
1627 \
1628 product(bool, ResizeOldPLAB, true, \
1629 "Dynamically resize (old gen) promotion LAB's") \
1630 \
1631 product(bool, PrintOldPLAB, false, \
1632 "Print (old gen) promotion LAB's sizing decisions") \
1633 \
1634 product(size_t, CMSOldPLABMin, 16, \
1635 "Minimum size of CMS gen promotion LAB caches per worker " \
1636 "per block size") \
1637 \
1638 product(size_t, CMSOldPLABMax, 1024, \
1639 "Maximum size of CMS gen promotion LAB caches per worker " \
1640 "per block size") \
1641 \
1642 product(uintx, CMSOldPLABNumRefills, 4, \
1643 "Nominal number of refills of CMS gen promotion LAB cache " \
1644 "per worker per block size") \
1645 \
1646 product(bool, CMSOldPLABResizeQuicker, false, \
1647 "React on-the-fly during a scavenge to a sudden " \
1648 "change in block demand rate") \
1649 \
1650 product(uintx, CMSOldPLABToleranceFactor, 4, \
1651 "The tolerance of the phase-change detector for on-the-fly " \
1652 "PLAB resizing during a scavenge") \
1653 \
1654 product(uintx, CMSOldPLABReactivityFactor, 2, \
1655 "The gain in the feedback loop for on-the-fly PLAB resizing " \
1656 "during a scavenge") \
1657 \
1658 product(bool, AlwaysPreTouch, false, \
1659 "Force all freshly committed pages to be pre-touched") \
1660 \
1661 product_pd(size_t, CMSYoungGenPerWorker, \
1662 "The maximum size of young gen chosen by default per GC worker " \
1663 "thread available") \
1664 \
1665 product(uintx, CMSIncrementalSafetyFactor, 10, \
1666 "Percentage (0-100) used to add conservatism when computing the " \
1667 "duty cycle") \
1668 \
1669 product(uintx, CMSExpAvgFactor, 50, \
1670 "Percentage (0-100) used to weight the current sample when " \
1671 "computing exponential averages for CMS statistics") \
1672 \
1673 product(uintx, CMS_FLSWeight, 75, \
1674 "Percentage (0-100) used to weight the current sample when " \
1675 "computing exponentially decaying averages for CMS FLS " \
1676 "statistics") \
1677 \
1678 product(uintx, CMS_FLSPadding, 1, \
1679 "The multiple of deviation from mean to use for buffering " \
1680 "against volatility in free list demand") \
1681 \
1682 product(uintx, FLSCoalescePolicy, 2, \
1683 "CMS: aggressiveness level for coalescing, increasing " \
1684 "from 0 to 4") \
1685 \
1686 product(bool, FLSAlwaysCoalesceLarge, false, \
1687 "CMS: larger free blocks are always available for coalescing") \
1688 \
1689 product(double, FLSLargestBlockCoalesceProximity, 0.99, \
1690 "CMS: the smaller the percentage the greater the coalescing " \
1691 "force") \
1692 \
1693 product(double, CMSSmallCoalSurplusPercent, 1.05, \
1694 "CMS: the factor by which to inflate estimated demand of small " \
1695 "block sizes to prevent coalescing with an adjoining block") \
1696 \
1697 product(double, CMSLargeCoalSurplusPercent, 0.95, \
1698 "CMS: the factor by which to inflate estimated demand of large " \
1699 "block sizes to prevent coalescing with an adjoining block") \
1700 \
1701 product(double, CMSSmallSplitSurplusPercent, 1.10, \
1702 "CMS: the factor by which to inflate estimated demand of small " \
1703 "block sizes to prevent splitting to supply demand for smaller " \
1704 "blocks") \
1705 \
1706 product(double, CMSLargeSplitSurplusPercent, 1.00, \
1707 "CMS: the factor by which to inflate estimated demand of large " \
1708 "block sizes to prevent splitting to supply demand for smaller " \
1709 "blocks") \
1710 \
1711 product(bool, CMSExtrapolateSweep, false, \
1712 "CMS: cushion for block demand during sweep") \
1713 \
1714 product(uintx, CMS_SweepWeight, 75, \
1715 "Percentage (0-100) used to weight the current sample when " \
1716 "computing exponentially decaying average for inter-sweep " \
1717 "duration") \
1718 \
1719 product(uintx, CMS_SweepPadding, 1, \
1720 "The multiple of deviation from mean to use for buffering " \
1721 "against volatility in inter-sweep duration") \
1722 \
1723 product(uintx, CMS_SweepTimerThresholdMillis, 10, \
1724 "Skip block flux-rate sampling for an epoch unless inter-sweep " \
1725 "duration exceeds this threshold in milliseconds") \
1726 \
1727 product(bool, CMSClassUnloadingEnabled, true, \
1728 "Whether class unloading enabled when using CMS GC") \
1729 \
1730 product(uintx, CMSClassUnloadingMaxInterval, 0, \
1731 "When CMS class unloading is enabled, the maximum CMS cycle " \
1732 "count for which classes may not be unloaded") \
1733 \
1734 develop(intx, CMSDictionaryChoice, 0, \
1735 "Use BinaryTreeDictionary as default in the CMS generation") \
1736 \
1737 product(uintx, CMSIndexedFreeListReplenish, 4, \
1738 "Replenish an indexed free list with this number of chunks") \
1739 \
1740 product(bool, CMSReplenishIntermediate, true, \
1741 "Replenish all intermediate free-list caches") \
1742 \
1743 product(bool, CMSSplitIndexedFreeListBlocks, true, \
1744 "When satisfying batched demand, split blocks from the " \
1745 "IndexedFreeList whose size is a multiple of requested size") \
1746 \
1747 product(bool, CMSLoopWarn, false, \
1748 "Warn in case of excessive CMS looping") \
1749 \
1750 develop(bool, CMSOverflowEarlyRestoration, false, \
1751 "Restore preserved marks early") \
1752 \
1753 product(size_t, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M), \
1754 "Size of marking stack") \
1755 \
1756 product(size_t, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \
1757 "Maximum size of marking stack") \
1758 \
1759 notproduct(bool, CMSMarkStackOverflowALot, false, \
1760 "Simulate frequent marking stack / work queue overflow") \
1761 \
1762 notproduct(uintx, CMSMarkStackOverflowInterval, 1000, \
1763 "An \"interval\" counter that determines how frequently " \
1764 "to simulate overflow; a smaller number increases frequency") \
1765 \
1766 product(uintx, CMSMaxAbortablePrecleanLoops, 0, \
1767 "Maximum number of abortable preclean iterations, if > 0") \
1768 \
1769 product(intx, CMSMaxAbortablePrecleanTime, 5000, \
1770 "Maximum time in abortable preclean (in milliseconds)") \
1771 \
1772 product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100, \
1773 "Nominal minimum work per abortable preclean iteration") \
1774 \
1775 manageable(intx, CMSAbortablePrecleanWaitMillis, 100, \
1776 "Time that we sleep between iterations when not given " \
1777 "enough work per iteration") \
1778 \
1779 product(size_t, CMSRescanMultiple, 32, \
1780 "Size (in cards) of CMS parallel rescan task") \
1781 \
1782 product(size_t, CMSConcMarkMultiple, 32, \
1783 "Size (in cards) of CMS concurrent MT marking task") \
1784 \
1785 product(bool, CMSAbortSemantics, false, \
1786 "Whether abort-on-overflow semantics is implemented") \
1787 \
1788 product(bool, CMSParallelInitialMarkEnabled, true, \
1789 "Use the parallel initial mark.") \
1790 \
1791 product(bool, CMSParallelRemarkEnabled, true, \
1792 "Whether parallel remark enabled (only if ParNewGC)") \
1793 \
1794 product(bool, CMSParallelSurvivorRemarkEnabled, true, \
1795 "Whether parallel remark of survivor space " \
1796 "enabled (effective only if CMSParallelRemarkEnabled)") \
1797 \
1798 product(bool, CMSPLABRecordAlways, true, \
1799 "Always record survivor space PLAB boundaries (effective only " \
1800 "if CMSParallelSurvivorRemarkEnabled)") \
1801 \
1802 product(bool, CMSEdenChunksRecordAlways, true, \
1803 "Always record eden chunks used for the parallel initial mark " \
1804 "or remark of eden") \
1805 \
1806 product(bool, CMSPrintEdenSurvivorChunks, false, \
1807 "Print the eden and the survivor chunks used for the parallel " \
1808 "initial mark or remark of the eden/survivor spaces") \
1809 \
1810 product(bool, CMSConcurrentMTEnabled, true, \
1811 "Whether multi-threaded concurrent work enabled " \
1812 "(effective only if ParNewGC)") \
1813 \
1814 product(bool, CMSPrecleaningEnabled, true, \
1815 "Whether concurrent precleaning enabled") \
1816 \
1817 product(uintx, CMSPrecleanIter, 3, \
1818 "Maximum number of precleaning iteration passes") \
1819 \
1820 product(uintx, CMSPrecleanNumerator, 2, \
1821 "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
1822 "ratio") \
1823 \
1824 product(uintx, CMSPrecleanDenominator, 3, \
1825 "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
1826 "ratio") \
1827 \
1828 product(bool, CMSPrecleanRefLists1, true, \
1829 "Preclean ref lists during (initial) preclean phase") \
1830 \
1831 product(bool, CMSPrecleanRefLists2, false, \
1832 "Preclean ref lists during abortable preclean phase") \
1833 \
1834 product(bool, CMSPrecleanSurvivors1, false, \
1835 "Preclean survivors during (initial) preclean phase") \
1836 \
1837 product(bool, CMSPrecleanSurvivors2, true, \
1838 "Preclean survivors during abortable preclean phase") \
1839 \
1840 product(uintx, CMSPrecleanThreshold, 1000, \
1841 "Do not iterate again if number of dirty cards is less than this")\
1842 \
1843 product(bool, CMSCleanOnEnter, true, \
1844 "Clean-on-enter optimization for reducing number of dirty cards") \
1845 \
1846 product(uintx, CMSRemarkVerifyVariant, 1, \
1847 "Choose variant (1,2) of verification following remark") \
1848 \
1849 product(size_t, CMSScheduleRemarkEdenSizeThreshold, 2*M, \
1850 "If Eden size is below this, do not try to schedule remark") \
1851 \
1852 product(uintx, CMSScheduleRemarkEdenPenetration, 50, \
1853 "The Eden occupancy percentage (0-100) at which " \
1854 "to try and schedule remark pause") \
1855 \
1856 product(uintx, CMSScheduleRemarkSamplingRatio, 5, \
1857 "Start sampling eden top at least before young gen " \
1858 "occupancy reaches 1/<ratio> of the size at which " \
1859 "we plan to schedule remark") \
1860 \
1861 product(uintx, CMSSamplingGrain, 16*K, \
1862 "The minimum distance between eden samples for CMS (see above)") \
1863 \
1864 product(bool, CMSScavengeBeforeRemark, false, \
1865 "Attempt scavenge before the CMS remark step") \
1866 \
1867 develop(bool, CMSTraceSweeper, false, \
1868 "Trace some actions of the CMS sweeper") \
1869 \
1870 product(uintx, CMSWorkQueueDrainThreshold, 10, \
1871 "Don't drain below this size per parallel worker/thief") \
1872 \
1873 manageable(intx, CMSWaitDuration, 2000, \
1874 "Time in milliseconds that CMS thread waits for young GC") \
1875 \
1876 develop(uintx, CMSCheckInterval, 1000, \
1877 "Interval in milliseconds that CMS thread checks if it " \
1878 "should start a collection cycle") \
1879 \
1880 product(bool, CMSYield, true, \
1881 "Yield between steps of CMS") \
1882 \
1883 product(size_t, CMSBitMapYieldQuantum, 10*M, \
1884 "Bitmap operations should process at most this many bits " \
1885 "between yields") \
1886 \
1887 product(bool, CMSDumpAtPromotionFailure, false, \
1888 "Dump useful information about the state of the CMS old " \
1889 "generation upon a promotion failure") \
1890 \
1891 product(bool, CMSPrintChunksInDump, false, \
1892 "In a dump enabled by CMSDumpAtPromotionFailure, include " \
1893 "more detailed information about the free chunks") \
1894 \
1895 product(bool, CMSPrintObjectsInDump, false, \
1896 "In a dump enabled by CMSDumpAtPromotionFailure, include " \
1897 "more detailed information about the allocated objects") \
1898 \
1899 diagnostic(bool, FLSVerifyAllHeapReferences, false, \
1900 "Verify that all references across the FLS boundary " \
1901 "are to valid objects") \
1902 \
1903 diagnostic(bool, FLSVerifyLists, false, \
1904 "Do lots of (expensive) FreeListSpace verification") \
1905 \
1906 diagnostic(bool, FLSVerifyIndexTable, false, \
1907 "Do lots of (expensive) FLS index table verification") \
1908 \
1909 develop(bool, FLSVerifyDictionary, false, \
1910 "Do lots of (expensive) FLS dictionary verification") \
1911 \
1912 develop(bool, VerifyBlockOffsetArray, false, \
1913 "Do (expensive) block offset array verification") \
1914 \
1915 diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false, \
1916 "Maintain _unallocated_block in BlockOffsetArray " \
1917 "(currently applicable only to CMS collector)") \
1918 \
1919 develop(bool, TraceCMSState, false, \
1920 "Trace the state of the CMS collection") \
1921 \
1922 product(intx, RefDiscoveryPolicy, 0, \
1923 "Select type of reference discovery policy: " \
1924 "reference-based(0) or referent-based(1)") \
1925 \
1926 product(bool, ParallelRefProcEnabled, false, \
1927 "Enable parallel reference processing whenever possible") \
1928 \
1929 product(bool, ParallelRefProcBalancingEnabled, true, \
1930 "Enable balancing of reference processing queues") \
1931 \
1932 product(uintx, CMSTriggerRatio, 80, \
1933 "Percentage of MinHeapFreeRatio in CMS generation that is " \
1934 "allocated before a CMS collection cycle commences") \
1935 \
1936 product(uintx, CMSBootstrapOccupancy, 50, \
1937 "Percentage CMS generation occupancy at which to " \
1938 "initiate CMS collection for bootstrapping collection stats") \
1939 \
1940 product(intx, CMSInitiatingOccupancyFraction, -1, \
1941 "Percentage CMS generation occupancy to start a CMS collection " \
1942 "cycle. A negative value means that CMSTriggerRatio is used") \
1943 \
1944 product(uintx, InitiatingHeapOccupancyPercent, 45, \
1945 "Percentage of the (entire) heap occupancy to start a " \
1946 "concurrent GC cycle. It is used by GCs that trigger a " \
1947 "concurrent GC cycle based on the occupancy of the entire heap, " \
1948 "not just one of the generations (e.g., G1). A value of 0 " \
1949 "denotes 'do constant GC cycles'.") \
1950 \
1951 manageable(intx, CMSTriggerInterval, -1, \
1952 "Commence a CMS collection cycle (at least) every so many " \
1953 "milliseconds (0 permanently, -1 disabled)") \
1954 \
1955 product(bool, UseCMSInitiatingOccupancyOnly, false, \
1956 "Only use occupancy as a criterion for starting a CMS collection")\
1957 \
1958 product(uintx, CMSIsTooFullPercentage, 98, \
1959 "An absolute ceiling above which CMS will always consider the " \
1960 "unloading of classes when class unloading is enabled") \
1961 \
1962 develop(bool, CMSTestInFreeList, false, \
1963 "Check if the coalesced range is already in the " \
1964 "free lists as claimed") \
1965 \
1966 notproduct(bool, CMSVerifyReturnedBytes, false, \
1967 "Check that all the garbage collected was returned to the " \
1968 "free lists") \
1969 \
1970 notproduct(bool, ScavengeALot, false, \
1971 "Force scavenge at every Nth exit from the runtime system " \
1972 "(N=ScavengeALotInterval)") \
1973 \
1974 develop(bool, FullGCALot, false, \
1975 "Force full gc at every Nth exit from the runtime system " \
1976 "(N=FullGCALotInterval)") \
1977 \
1978 notproduct(bool, GCALotAtAllSafepoints, false, \
1979 "Enforce ScavengeALot/GCALot at all potential safepoints") \
1980 \
2050 product(bool, TLABStats, true, \
2051 "Provide more detailed and expensive TLAB statistics " \
2052 "(with PrintTLAB)") \
2053 \
2054 product_pd(bool, NeverActAsServerClassMachine, \
2055 "Never act like a server-class machine") \
2056 \
2057 product(bool, AlwaysActAsServerClassMachine, false, \
2058 "Always act like a server-class machine") \
2059 \
2060 product_pd(uint64_t, MaxRAM, \
2061 "Real memory size (in bytes) used to set maximum heap size") \
2062 \
2063 product(size_t, ErgoHeapSizeLimit, 0, \
2064 "Maximum ergonomically set heap size (in bytes); zero means use " \
2065 "MaxRAM / MaxRAMFraction") \
2066 \
2067 product(uintx, MaxRAMFraction, 4, \
2068 "Maximum fraction (1/n) of real memory used for maximum heap " \
2069 "size") \
2070 \
2071 product(uintx, DefaultMaxRAMFraction, 4, \
2072 "Maximum fraction (1/n) of real memory used for maximum heap " \
2073 "size; deprecated: to be renamed to MaxRAMFraction") \
2074 \
2075 product(uintx, MinRAMFraction, 2, \
2076 "Minimum fraction (1/n) of real memory used for maximum heap " \
2077 "size on systems with small physical memory size") \
2078 \
2079 product(uintx, InitialRAMFraction, 64, \
2080 "Fraction (1/n) of real memory used for initial heap size") \
2081 \
2082 develop(uintx, MaxVirtMemFraction, 2, \
2083 "Maximum fraction (1/n) of virtual memory used for ergonomically "\
2084 "determining maximum heap size") \
2085 \
2086 product(bool, UseAutoGCSelectPolicy, false, \
2087 "Use automatic collection selection policy") \
2088 \
2089 product(uintx, AutoGCSelectPauseMillis, 5000, \
2090 "Automatic GC selection pause threshold in milliseconds") \
2091 \
2092 product(bool, UseAdaptiveSizePolicy, true, \
2093 "Use adaptive generation sizing policies") \
2094 \
2095 product(bool, UsePSAdaptiveSurvivorSizePolicy, true, \
2096 "Use adaptive survivor sizing policies") \
2097 \
2098 product(bool, UseAdaptiveGenerationSizePolicyAtMinorCollection, true, \
2099 "Use adaptive young-old sizing policies at minor collections") \
2100 \
2119 develop(bool, PSAdjustTenuredGenForMinorPause, false, \
2120 "Adjust tenured generation to achieve a minor pause goal") \
2121 \
2122 develop(bool, PSAdjustYoungGenForMajorPause, false, \
2123 "Adjust young generation to achieve a major pause goal") \
2124 \
2125 product(uintx, AdaptiveSizePolicyInitializingSteps, 20, \
2126 "Number of steps where heuristics is used before data is used") \
2127 \
2128 develop(uintx, AdaptiveSizePolicyReadyThreshold, 5, \
2129 "Number of collections before the adaptive sizing is started") \
2130 \
2131 product(uintx, AdaptiveSizePolicyOutputInterval, 0, \
2132 "Collection interval for printing information; zero means never") \
2133 \
2134 product(bool, UseAdaptiveSizePolicyFootprintGoal, true, \
2135 "Use adaptive minimum footprint as a goal") \
2136 \
2137 product(uintx, AdaptiveSizePolicyWeight, 10, \
2138 "Weight given to exponential resizing, between 0 and 100") \
2139 \
2140 product(uintx, AdaptiveTimeWeight, 25, \
2141 "Weight given to time in adaptive policy, between 0 and 100") \
2142 \
2143 product(uintx, PausePadding, 1, \
2144 "How much buffer to keep for pause time") \
2145 \
2146 product(uintx, PromotedPadding, 3, \
2147 "How much buffer to keep for promotion failure") \
2148 \
2149 product(uintx, SurvivorPadding, 3, \
2150 "How much buffer to keep for survivor overflow") \
2151 \
2152 product(uintx, ThresholdTolerance, 10, \
2153 "Allowed collection cost difference between generations") \
2154 \
2155 product(uintx, AdaptiveSizePolicyCollectionCostMargin, 50, \
2156 "If collection costs are within margin, reduce both by full " \
2157 "delta") \
2158 \
2159 product(uintx, YoungGenerationSizeIncrement, 20, \
2160 "Adaptive size percentage change in young generation") \
2161 \
2162 product(uintx, YoungGenerationSizeSupplement, 80, \
2163 "Supplement to YoungedGenerationSizeIncrement used at startup") \
2164 \
2165 product(uintx, YoungGenerationSizeSupplementDecay, 8, \
2166 "Decay factor to YoungedGenerationSizeSupplement") \
2167 \
2168 product(uintx, TenuredGenerationSizeIncrement, 20, \
2169 "Adaptive size percentage change in tenured generation") \
2170 \
2171 product(uintx, TenuredGenerationSizeSupplement, 80, \
2172 "Supplement to TenuredGenerationSizeIncrement used at startup") \
2173 \
2174 product(uintx, TenuredGenerationSizeSupplementDecay, 2, \
2175 "Decay factor to TenuredGenerationSizeIncrement") \
2176 \
2177 product(uintx, MaxGCPauseMillis, max_uintx, \
2178 "Adaptive size policy maximum GC pause time goal in millisecond, "\
2179 "or (G1 Only) the maximum GC time per MMU time slice") \
2180 \
2181 product(uintx, GCPauseIntervalMillis, 0, \
2182 "Time slice for MMU specification") \
2183 \
2184 product(uintx, MaxGCMinorPauseMillis, max_uintx, \
2185 "Adaptive size policy maximum GC minor pause time goal " \
2186 "in millisecond") \
2187 \
2188 product(uintx, GCTimeRatio, 99, \
2189 "Adaptive size policy application time to GC time ratio") \
2190 \
2191 product(uintx, AdaptiveSizeDecrementScaleFactor, 4, \
2192 "Adaptive size scale down factor for shrinking") \
2193 \
2194 product(bool, UseAdaptiveSizeDecayMajorGCCost, true, \
2195 "Adaptive size decays the major cost for long major intervals") \
2196 \
2197 product(uintx, AdaptiveSizeMajorGCDecayTimeScale, 10, \
2198 "Time scale over which major costs decay") \
2199 \
2200 product(uintx, MinSurvivorRatio, 3, \
2201 "Minimum ratio of young generation/survivor space size") \
2202 \
2203 product(uintx, InitialSurvivorRatio, 8, \
2204 "Initial ratio of young generation/survivor space size") \
2205 \
2206 product(size_t, BaseFootPrintEstimate, 256*M, \
2207 "Estimate of footprint other than Java Heap") \
2208 \
2209 product(bool, UseGCOverheadLimit, true, \
2210 "Use policy to limit of proportion of time spent in GC " \
2211 "before an OutOfMemory error is thrown") \
2212 \
2213 product(uintx, GCTimeLimit, 98, \
2214 "Limit of the proportion of time spent in GC before " \
2215 "an OutOfMemoryError is thrown (used with GCHeapFreeLimit)") \
2216 \
2217 product(uintx, GCHeapFreeLimit, 2, \
2218 "Minimum percentage of free space after a full GC before an " \
2219 "OutOfMemoryError is thrown (used with GCTimeLimit)") \
2220 \
2221 develop(uintx, AdaptiveSizePolicyGCTimeLimitThreshold, 5, \
2222 "Number of consecutive collections before gc time limit fires") \
2223 \
2224 product(bool, PrintAdaptiveSizePolicy, false, \
2225 "Print information about AdaptiveSizePolicy") \
2226 \
2227 product(intx, PrefetchCopyIntervalInBytes, -1, \
2228 "How far ahead to prefetch destination area (<= 0 means off)") \
2229 \
2230 product(intx, PrefetchScanIntervalInBytes, -1, \
2231 "How far ahead to prefetch scan area (<= 0 means off)") \
2232 \
2233 product(intx, PrefetchFieldsAhead, -1, \
2234 "How many fields ahead to prefetch in oop scan (<= 0 means off)") \
2235 \
2236 diagnostic(bool, VerifySilently, false, \
2237 "Do not print the verification progress") \
2238 \
2239 diagnostic(bool, VerifyDuringStartup, false, \
2484 "compile native methods if supported by the compiler") \
2485 \
2486 develop_pd(bool, CICompileOSR, \
2487 "compile on stack replacement methods if supported by the " \
2488 "compiler") \
2489 \
2490 develop(bool, CIPrintMethodCodes, false, \
2491 "print method bytecodes of the compiled code") \
2492 \
2493 develop(bool, CIPrintTypeFlow, false, \
2494 "print the results of ciTypeFlow analysis") \
2495 \
2496 develop(bool, CITraceTypeFlow, false, \
2497 "detailed per-bytecode tracing of ciTypeFlow analysis") \
2498 \
2499 develop(intx, OSROnlyBCI, -1, \
2500 "OSR only at this bci. Negative values mean exclude that bci") \
2501 \
2502 /* compiler */ \
2503 \
2504 product(intx, CICompilerCount, CI_COMPILER_COUNT, \
2505 "Number of compiler threads to run") \
2506 \
2507 product(intx, CompilationPolicyChoice, 0, \
2508 "which compilation policy (0-3)") \
2509 \
2510 develop(bool, UseStackBanging, true, \
2511 "use stack banging for stack overflow checks (required for " \
2512 "proper StackOverflow handling; disable only to measure cost " \
2513 "of stackbanging)") \
2514 \
2515 develop(bool, UseStrictFP, true, \
2516 "use strict fp if modifier strictfp is set") \
2517 \
2518 develop(bool, GenerateSynchronizationCode, true, \
2519 "generate locking/unlocking code for synchronized methods and " \
2520 "monitors") \
2521 \
2522 develop(bool, GenerateCompilerNullChecks, true, \
2523 "Generate explicit null checks for loads/stores/calls") \
2524 \
2525 develop(bool, GenerateRangeChecks, true, \
2526 "Generate range checks for array accesses") \
2527 \
2528 develop_pd(bool, ImplicitNullChecks, \
2605 \
2606 product(bool, PrintVMOptions, false, \
2607 "Print flags that appeared on the command line") \
2608 \
2609 product(bool, IgnoreUnrecognizedVMOptions, false, \
2610 "Ignore unrecognized VM options") \
2611 \
2612 product(bool, PrintCommandLineFlags, false, \
2613 "Print flags specified on command line or set by ergonomics") \
2614 \
2615 product(bool, PrintFlagsInitial, false, \
2616 "Print all VM flags before argument processing and exit VM") \
2617 \
2618 product(bool, PrintFlagsFinal, false, \
2619 "Print all VM flags after argument and ergonomic processing") \
2620 \
2621 notproduct(bool, PrintFlagsWithComments, false, \
2622 "Print all VM flags with default values and descriptions and " \
2623 "exit") \
2624 \
2625 diagnostic(bool, SerializeVMOutput, true, \
2626 "Use a mutex to serialize output to tty and LogFile") \
2627 \
2628 diagnostic(bool, DisplayVMOutput, true, \
2629 "Display all VM output on the tty, independently of LogVMOutput") \
2630 \
2631 diagnostic(bool, LogVMOutput, false, \
2632 "Save VM output to LogFile") \
2633 \
2634 diagnostic(ccstr, LogFile, NULL, \
2635 "If LogVMOutput or LogCompilation is on, save VM output to " \
2636 "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)")\
2637 \
2638 product(ccstr, ErrorFile, NULL, \
2639 "If an error occurs, save the error data to this file " \
2640 "[default: ./hs_err_pid%p.log] (%p replaced with pid)") \
2641 \
2642 product(bool, DisplayVMOutputToStderr, false, \
2643 "If DisplayVMOutput is true, display all VM output to stderr") \
2644 \
2843 develop(bool, TraceFrequencyInlining, false, \
2844 "Trace frequency based inlining") \
2845 \
2846 develop_pd(bool, InlineIntrinsics, \
2847 "Inline intrinsics that can be statically resolved") \
2848 \
2849 product_pd(bool, ProfileInterpreter, \
2850 "Profile at the bytecode level during interpretation") \
2851 \
2852 develop(bool, TraceProfileInterpreter, false, \
2853 "Trace profiling at the bytecode level during interpretation. " \
2854 "This outputs the profiling information collected to improve " \
2855 "jit compilation.") \
2856 \
2857 develop_pd(bool, ProfileTraps, \
2858 "Profile deoptimization traps at the bytecode level") \
2859 \
2860 product(intx, ProfileMaturityPercentage, 20, \
2861 "number of method invocations/branches (expressed as % of " \
2862 "CompileThreshold) before using the method's profile") \
2863 \
2864 diagnostic(bool, PrintMethodData, false, \
2865 "Print the results of +ProfileInterpreter at end of run") \
2866 \
2867 develop(bool, VerifyDataPointer, trueInDebug, \
2868 "Verify the method data pointer during interpreter profiling") \
2869 \
2870 develop(bool, VerifyCompiledCode, false, \
2871 "Include miscellaneous runtime verifications in nmethod code; " \
2872 "default off because it disturbs nmethod size heuristics") \
2873 \
2874 notproduct(bool, CrashGCForDumpingJavaThread, false, \
2875 "Manually make GC thread crash then dump java stack trace; " \
2876 "Test only") \
2877 \
2878 /* compilation */ \
2879 product(bool, UseCompiler, true, \
2880 "Use Just-In-Time compilation") \
2881 \
2882 develop(bool, TraceCompilationPolicy, false, \
2903 "Do not compile methods > HugeMethodLimit") \
2904 \
2905 /* Bytecode escape analysis estimation. */ \
2906 product(bool, EstimateArgEscape, true, \
2907 "Analyze bytecodes to estimate escape state of arguments") \
2908 \
2909 product(intx, BCEATraceLevel, 0, \
2910 "How much tracing to do of bytecode escape analysis estimates") \
2911 \
2912 product(intx, MaxBCEAEstimateLevel, 5, \
2913 "Maximum number of nested calls that are analyzed by BC EA") \
2914 \
2915 product(intx, MaxBCEAEstimateSize, 150, \
2916 "Maximum bytecode size of a method to be analyzed by BC EA") \
2917 \
2918 product(intx, AllocatePrefetchStyle, 1, \
2919 "0 = no prefetch, " \
2920 "1 = prefetch instructions for each allocation, " \
2921 "2 = use TLAB watermark to gate allocation prefetch, " \
2922 "3 = use BIS instruction on Sparc for allocation prefetch") \
2923 \
2924 product(intx, AllocatePrefetchDistance, -1, \
2925 "Distance to prefetch ahead of allocation pointer") \
2926 \
2927 product(intx, AllocatePrefetchLines, 3, \
2928 "Number of lines to prefetch ahead of array allocation pointer") \
2929 \
2930 product(intx, AllocateInstancePrefetchLines, 1, \
2931 "Number of lines to prefetch ahead of instance allocation " \
2932 "pointer") \
2933 \
2934 product(intx, AllocatePrefetchStepSize, 16, \
2935 "Step size in bytes of sequential prefetch instructions") \
2936 \
2937 product(intx, AllocatePrefetchInstr, 0, \
2938 "Prefetch instruction to prefetch ahead of allocation pointer") \
2939 \
2940 /* deoptimization */ \
2941 develop(bool, TraceDeoptimization, false, \
2942 "Trace deoptimization") \
2949 "(0 means off)") \
2950 \
2951 product(intx, MaxJavaStackTraceDepth, 1024, \
2952 "The maximum number of lines in the stack trace for Java " \
2953 "exceptions (0 means all)") \
2954 \
2955 NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000, \
2956 "Guarantee a safepoint (at least) every so many milliseconds " \
2957 "(0 means none)")) \
2958 \
2959 EMBEDDED_ONLY(product(intx, GuaranteedSafepointInterval, 0, \
2960 "Guarantee a safepoint (at least) every so many milliseconds " \
2961 "(0 means none)")) \
2962 \
2963 product(intx, SafepointTimeoutDelay, 10000, \
2964 "Delay in milliseconds for option SafepointTimeout") \
2965 \
2966 product(intx, NmethodSweepActivity, 10, \
2967 "Removes cold nmethods from code cache if > 0. Higher values " \
2968 "result in more aggressive sweeping") \
2969 \
2970 notproduct(bool, LogSweeper, false, \
2971 "Keep a ring buffer of sweeper activity") \
2972 \
2973 notproduct(intx, SweeperLogEntries, 1024, \
2974 "Number of records in the ring buffer of sweeper activity") \
2975 \
2976 notproduct(intx, MemProfilingInterval, 500, \
2977 "Time between each invocation of the MemProfiler") \
2978 \
2979 develop(intx, MallocCatchPtr, -1, \
2980 "Hit breakpoint when mallocing/freeing this pointer") \
2981 \
2982 notproduct(ccstrlist, SuppressErrorAt, "", \
2983 "List of assertions (file:line) to muzzle") \
2984 \
2985 notproduct(size_t, HandleAllocationLimit, 1024, \
2986 "Threshold for HandleMark allocation when +TraceHandleAllocation "\
2987 "is used") \
2988 \
3077 diagnostic(intx, MallocVerifyInterval, 0, \
3078 "If non-zero, verify C heap after every N calls to " \
3079 "malloc/realloc/free") \
3080 \
3081 diagnostic(intx, MallocVerifyStart, 0, \
3082 "If non-zero, start verifying C heap after Nth call to " \
3083 "malloc/realloc/free") \
3084 \
3085 diagnostic(uintx, MallocMaxTestWords, 0, \
3086 "If non-zero, maximum number of words that malloc/realloc can " \
3087 "allocate (for testing only)") \
3088 \
3089 product(intx, TypeProfileWidth, 2, \
3090 "Number of receiver types to record in call/cast profile") \
3091 \
3092 develop(intx, BciProfileWidth, 2, \
3093 "Number of return bci's to record in ret profile") \
3094 \
3095 product(intx, PerMethodRecompilationCutoff, 400, \
3096 "After recompiling N times, stay in the interpreter (-1=>'Inf')") \
3097 \
3098 product(intx, PerBytecodeRecompilationCutoff, 200, \
3099 "Per-BCI limit on repeated recompilation (-1=>'Inf')") \
3100 \
3101 product(intx, PerMethodTrapLimit, 100, \
3102 "Limit on traps (of one kind) in a method (includes inlines)") \
3103 \
3104 experimental(intx, PerMethodSpecTrapLimit, 5000, \
3105 "Limit on speculative traps (of one kind) in a method (includes inlines)") \
3106 \
3107 product(intx, PerBytecodeTrapLimit, 4, \
3108 "Limit on traps (of one kind) at a particular BCI") \
3109 \
3110 experimental(intx, SpecTrapLimitExtraEntries, 3, \
3111 "Extra method data trap entries for speculation") \
3112 \
3113 develop(intx, InlineFrequencyRatio, 20, \
3114 "Ratio of call site execution to caller method invocation") \
3115 \
3116 develop_pd(intx, InlineFrequencyCount, \
3117 "Count of call site execution necessary to trigger frequent " \
3118 "inlining") \
3119 \
3120 develop(intx, InlineThrowCount, 50, \
3121 "Force inlining of interpreted methods that throw this often") \
3122 \
3123 develop(intx, InlineThrowMaxSize, 200, \
3124 "Force inlining of throwing methods smaller than this") \
3125 \
3138 \
3139 product(size_t, OldSize, ScaleForWordSize(4*M), \
3140 "Initial tenured generation size (in bytes)") \
3141 \
3142 product(size_t, NewSize, ScaleForWordSize(1*M), \
3143 "Initial new generation size (in bytes)") \
3144 \
3145 product(size_t, MaxNewSize, max_uintx, \
3146 "Maximum new generation size (in bytes), max_uintx means set " \
3147 "ergonomically") \
3148 \
3149 product(size_t, PretenureSizeThreshold, 0, \
3150 "Maximum size in bytes of objects allocated in DefNew " \
3151 "generation; zero means no maximum") \
3152 \
3153 product(size_t, TLABSize, 0, \
3154 "Starting TLAB size (in bytes); zero means set ergonomically") \
3155 \
3156 product(size_t, MinTLABSize, 2*K, \
3157 "Minimum allowed TLAB size (in bytes)") \
3158 \
3159 product(uintx, TLABAllocationWeight, 35, \
3160 "Allocation averaging weight") \
3161 \
3162 product(uintx, TLABWasteTargetPercent, 1, \
3163 "Percentage of Eden that can be wasted") \
3164 \
3165 product(uintx, TLABRefillWasteFraction, 64, \
3166 "Maximum TLAB waste at a refill (internal fragmentation)") \
3167 \
3168 product(uintx, TLABWasteIncrement, 4, \
3169 "Increment allowed waste at slow allocation") \
3170 \
3171 product(uintx, SurvivorRatio, 8, \
3172 "Ratio of eden/survivor space size") \
3173 \
3174 product(uintx, NewRatio, 2, \
3175 "Ratio of old/new generation sizes") \
3176 \
3177 product_pd(size_t, NewSizeThreadIncrease, \
3178 "Additional size added to desired new generation size per " \
3179 "non-daemon thread (in bytes)") \
3180 \
3181 product_pd(size_t, MetaspaceSize, \
3182 "Initial size of Metaspaces (in bytes)") \
3183 \
3184 product(size_t, MaxMetaspaceSize, max_uintx, \
3185 "Maximum size of Metaspaces (in bytes)") \
3186 \
3187 product(size_t, CompressedClassSpaceSize, 1*G, \
3188 "Maximum size of class area in Metaspace when compressed " \
3189 "class pointers are used") \
3190 \
3191 manageable(uintx, MinHeapFreeRatio, 40, \
3192 "The minimum percentage of heap free after GC to avoid expansion."\
3193 " For most GCs this applies to the old generation. In G1 and" \
3194 " ParallelGC it applies to the whole heap.") \
3195 \
3196 manageable(uintx, MaxHeapFreeRatio, 70, \
3197 "The maximum percentage of heap free after GC to avoid shrinking."\
3198 " For most GCs this applies to the old generation. In G1 and" \
3199 " ParallelGC it applies to the whole heap.") \
3200 \
3201 product(intx, SoftRefLRUPolicyMSPerMB, 1000, \
3202 "Number of milliseconds per MB of free space in the heap") \
3203 \
3204 product(size_t, MinHeapDeltaBytes, ScaleForWordSize(128*K), \
3205 "The minimum change in heap space due to GC (in bytes)") \
3206 \
3207 product(size_t, MinMetaspaceExpansion, ScaleForWordSize(256*K), \
3208 "The minimum expansion of Metaspace (in bytes)") \
3209 \
3210 product(uintx, MinMetaspaceFreeRatio, 40, \
3211 "The minimum percentage of Metaspace free after GC to avoid " \
3212 "expansion") \
3213 \
3214 product(uintx, MaxMetaspaceFreeRatio, 70, \
3215 "The maximum percentage of Metaspace free after GC to avoid " \
3216 "shrinking") \
3217 \
3218 product(size_t, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \
3219 "The maximum expansion of Metaspace without full GC (in bytes)") \
3220 \
3221 product(uintx, QueuedAllocationWarningCount, 0, \
3222 "Number of times an allocation that queues behind a GC " \
3223 "will retry before printing a warning") \
3224 \
3225 diagnostic(uintx, VerifyGCStartAt, 0, \
3226 "GC invoke count where +VerifyBefore/AfterGC kicks in") \
3227 \
3228 diagnostic(intx, VerifyGCLevel, 0, \
3229 "Generation level at which to start +VerifyBefore/AfterGC") \
3230 \
3231 product(uintx, MaxTenuringThreshold, 15, \
3232 "Maximum value for tenuring threshold") \
3233 \
3234 product(uintx, InitialTenuringThreshold, 7, \
3235 "Initial value for tenuring threshold") \
3236 \
3237 product(uintx, TargetSurvivorRatio, 50, \
3238 "Desired percentage of survivor space used after scavenge") \
3239 \
3240 product(uintx, MarkSweepDeadRatio, 5, \
3241 "Percentage (0-100) of the old gen allowed as dead wood. " \
3242 "Serial mark sweep treats this as both the minimum and maximum " \
3243 "value. " \
3244 "CMS uses this value only if it falls back to mark sweep. " \
3245 "Par compact uses a variable scale based on the density of the " \
3246 "generation and treats this as the maximum value when the heap " \
3247 "is either completely full or completely empty. Par compact " \
3248 "also has a smaller default value; see arguments.cpp.") \
3249 \
3250 product(uintx, MarkSweepAlwaysCompactCount, 4, \
3251 "How often should we fully compact the heap (ignoring the dead " \
3252 "space parameters)") \
3253 \
3254 product(intx, PrintCMSStatistics, 0, \
3255 "Statistics for CMS") \
3256 \
3257 product(bool, PrintCMSInitiationStatistics, false, \
3258 "Statistics for initiating a CMS collection") \
3259 \
3260 product(intx, PrintFLSStatistics, 0, \
3261 "Statistics for CMS' FreeListSpace") \
3262 \
3263 product(intx, PrintFLSCensus, 0, \
3264 "Census for CMS' FreeListSpace") \
3265 \
3266 develop(uintx, GCExpandToAllocateDelayMillis, 0, \
3267 "Delay between expansion and allocation (in milliseconds)") \
3268 \
3269 develop(uintx, GCWorkerDelayMillis, 0, \
3270 "Delay in scheduling GC workers (in milliseconds)") \
3271 \
3272 product(intx, DeferThrSuspendLoopCount, 4000, \
3273 "(Unstable) Number of times to iterate in safepoint loop " \
3274 "before blocking VM threads ") \
3275 \
3276 product(intx, DeferPollingPageLoopCount, -1, \
3277 "(Unsafe,Unstable) Number of iterations in safepoint loop " \
3278 "before changing safepoint polling page to RO ") \
3279 \
3280 product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \
3281 \
3282 product(bool, PSChunkLargeArrays, true, \
3283 "Process large arrays in chunks") \
3284 \
3285 product(uintx, GCDrainStackTargetSize, 64, \
3286 "Number of entries we will try to leave on the stack " \
3287 "during parallel gc") \
3288 \
3289 /* stack parameters */ \
3290 product_pd(intx, StackYellowPages, \
3291 "Number of yellow zone (recoverable overflows) pages") \
3292 \
3293 product_pd(intx, StackRedPages, \
3294 "Number of red zone (unrecoverable overflows) pages") \
3295 \
3296 product_pd(intx, StackShadowPages, \
3297 "Number of shadow zone (for overflow checking) pages " \
3298 "this should exceed the depth of the VM and native call stack") \
3299 \
3300 product_pd(intx, ThreadStackSize, \
3301 "Thread Stack Size (in Kbytes)") \
3302 \
3303 product_pd(intx, VMThreadStackSize, \
3304 "Non-Java Thread Stack Size (in Kbytes)") \
3305 \
3306 product_pd(intx, CompilerThreadStackSize, \
3307 "Compiler Thread Stack Size (in Kbytes)") \
3308 \
3309 develop_pd(size_t, JVMInvokeMethodSlack, \
3310 "Stack space (bytes) required for JVM_InvokeMethod to complete") \
3311 \
3312 product(size_t, ThreadSafetyMargin, 50*M, \
3313 "Thread safety margin is used on fixed-stack LinuxThreads (on " \
3314 "Linux/x86 only) to prevent heap-stack collision. Set to 0 to " \
3315 "disable this feature") \
3316 \
3317 /* code cache parameters */ \
3318 /* ppc64/tiered compilation has large code-entry alignment. */ \
3319 develop(uintx, CodeCacheSegmentSize, 64 PPC64_ONLY(+64) NOT_PPC64(TIERED_ONLY(+64)),\
3320 "Code cache segment size (in bytes) - smallest unit of " \
3321 "allocation") \
3322 \
3323 develop_pd(intx, CodeEntryAlignment, \
3324 "Code entry alignment for generated code (in bytes)") \
3325 \
3326 product_pd(intx, OptoLoopAlignment, \
3327 "Align inner loops to zero relative to this modulus") \
3328 \
3329 product_pd(uintx, InitialCodeCacheSize, \
3330 "Initial code cache size (in bytes)") \
3331 \
3332 develop_pd(uintx, CodeCacheMinimumUseSpace, \
3333 "Minimum code cache size (in bytes) required to start VM.") \
3334 \
3335 product(bool, SegmentedCodeCache, false, \
3336 "Use a segmented code cache") \
3337 \
3338 product_pd(uintx, ReservedCodeCacheSize, \
3339 "Reserved code cache size (in bytes) - maximum code cache size") \
3340 \
3341 product_pd(uintx, NonProfiledCodeHeapSize, \
3342 "Size of code heap with non-profiled methods (in bytes)") \
3343 \
3344 product_pd(uintx, ProfiledCodeHeapSize, \
3345 "Size of code heap with profiled methods (in bytes)") \
3346 \
3347 product_pd(uintx, NonNMethodCodeHeapSize, \
3348 "Size of code heap with non-nmethods (in bytes)") \
3349 \
3350 product_pd(uintx, CodeCacheExpansionSize, \
3351 "Code cache expansion size (in bytes)") \
3352 \
3353 develop_pd(uintx, CodeCacheMinBlockLength, \
3354 "Minimum number of segments in a code cache block") \
3355 \
3356 notproduct(bool, ExitOnFullCodeCache, false, \
3357 "Exit the VM if we fill the code cache") \
3358 \
3359 product(bool, UseCodeCacheFlushing, true, \
3360 "Remove cold/old nmethods from the code cache") \
3361 \
3362 product(uintx, StartAggressiveSweepingAt, 10, \
3363 "Start aggressive sweeping if X[%] of the code cache is free." \
3364 "Segmented code cache: X[%] of the non-profiled heap." \
3365 "Non-segmented code cache: X[%] of the total code cache") \
3366 \
3367 /* interpreter debugging */ \
3368 develop(intx, BinarySwitchThreshold, 5, \
3369 "Minimal number of lookupswitch entries for rewriting to binary " \
3370 "switch") \
3371 \
3372 develop(intx, StopInterpreterAt, 0, \
3373 "Stop interpreter execution at specified bytecode number") \
3374 \
3375 develop(intx, TraceBytecodesAt, 0, \
3376 "Trace bytecodes starting with specified bytecode number") \
3377 \
3378 /* compiler interface */ \
3379 develop(intx, CIStart, 0, \
3380 "The id of the first compilation to permit") \
3381 \
3382 develop(intx, CIStop, max_jint, \
3383 "The id of the last compilation to permit") \
3384 \
3385 develop(intx, CIStartOSR, 0, \
3406 "Prepend to .hotspot_compiler; e.g. log,java/lang/String.<init>") \
3407 \
3408 develop(bool, ReplayCompiles, false, \
3409 "Enable replay of compilations from ReplayDataFile") \
3410 \
3411 product(ccstr, ReplayDataFile, NULL, \
3412 "File containing compilation replay information" \
3413 "[default: ./replay_pid%p.log] (%p replaced with pid)") \
3414 \
3415 product(ccstr, InlineDataFile, NULL, \
3416 "File containing inlining replay information" \
3417 "[default: ./inline_pid%p.log] (%p replaced with pid)") \
3418 \
3419 develop(intx, ReplaySuppressInitializers, 2, \
3420 "Control handling of class initialization during replay: " \
3421 "0 - don't do anything special; " \
3422 "1 - treat all class initializers as empty; " \
3423 "2 - treat class initializers for application classes as empty; " \
3424 "3 - allow all class initializers to run during bootstrap but " \
3425 " pretend they are empty after starting replay") \
3426 \
3427 develop(bool, ReplayIgnoreInitErrors, false, \
3428 "Ignore exceptions thrown during initialization for replay") \
3429 \
3430 product(bool, DumpReplayDataOnError, true, \
3431 "Record replay data for crashing compiler threads") \
3432 \
3433 product(bool, CICompilerCountPerCPU, false, \
3434 "1 compiler thread for log(N CPUs)") \
3435 \
3436 develop(intx, CIFireOOMAt, -1, \
3437 "Fire OutOfMemoryErrors throughout CI for testing the compiler " \
3438 "(non-negative value throws OOM after this many CI accesses " \
3439 "in each compile)") \
3440 notproduct(intx, CICrashAt, -1, \
3441 "id of compilation to trigger assert in compiler thread for " \
3442 "the purpose of testing, e.g. generation of replay data") \
3443 notproduct(bool, CIObjectFactoryVerify, false, \
3444 "enable potentially expensive verification in ciObjectFactory") \
3445 \
3450 "0 : Normal. "\
3451 " VM chooses priorities that are appropriate for normal "\
3452 " applications. On Solaris NORM_PRIORITY and above are mapped "\
3453 " to normal native priority. Java priorities below " \
3454 " NORM_PRIORITY map to lower native priority values. On "\
3455 " Windows applications are allowed to use higher native "\
3456 " priorities. However, with ThreadPriorityPolicy=0, VM will "\
3457 " not use the highest possible native priority, "\
3458 " THREAD_PRIORITY_TIME_CRITICAL, as it may interfere with "\
3459 " system threads. On Linux thread priorities are ignored "\
3460 " because the OS does not support static priority in "\
3461 " SCHED_OTHER scheduling class which is the only choice for "\
3462 " non-root, non-realtime applications. "\
3463 "1 : Aggressive. "\
3464 " Java thread priorities map over to the entire range of "\
3465 " native thread priorities. Higher Java thread priorities map "\
3466 " to higher native thread priorities. This policy should be "\
3467 " used with care, as sometimes it can cause performance "\
3468 " degradation in the application and/or the entire system. On "\
3469 " Linux this policy requires root privilege.") \
3470 \
3471 product(bool, ThreadPriorityVerbose, false, \
3472 "Print priority changes") \
3473 \
3474 product(intx, CompilerThreadPriority, -1, \
3475 "The native priority at which compiler threads should run " \
3476 "(-1 means no change)") \
3477 \
3478 product(intx, VMThreadPriority, -1, \
3479 "The native priority at which the VM thread should run " \
3480 "(-1 means no change)") \
3481 \
3482 product(bool, CompilerThreadHintNoPreempt, true, \
3483 "(Solaris only) Give compiler threads an extra quanta") \
3484 \
3485 product(bool, VMThreadHintNoPreempt, false, \
3486 "(Solaris only) Give VM thread an extra quanta") \
3487 \
3488 product(intx, JavaPriority1_To_OSPriority, -1, \
3489 "Map Java priorities to OS priorities") \
3633 "reaches this amount per compiler thread") \
3634 \
3635 product(intx, Tier4LoadFeedback, 3, \
3636 "Tier 4 thresholds will increase twofold when C2 queue size " \
3637 "reaches this amount per compiler thread") \
3638 \
3639 product(intx, TieredCompileTaskTimeout, 50, \
3640 "Kill compile task if method was not used within " \
3641 "given timeout in milliseconds") \
3642 \
3643 product(intx, TieredStopAtLevel, 4, \
3644 "Stop at given compilation level") \
3645 \
3646 product(intx, Tier0ProfilingStartPercentage, 200, \
3647 "Start profiling in interpreter if the counters exceed tier 3 " \
3648 "thresholds by the specified percentage") \
3649 \
3650 product(uintx, IncreaseFirstTierCompileThresholdAt, 50, \
3651 "Increase the compile threshold for C1 compilation if the code " \
3652 "cache is filled by the specified percentage") \
3653 \
3654 product(intx, TieredRateUpdateMinTime, 1, \
3655 "Minimum rate sampling interval (in milliseconds)") \
3656 \
3657 product(intx, TieredRateUpdateMaxTime, 25, \
3658 "Maximum rate sampling interval (in milliseconds)") \
3659 \
3660 product_pd(bool, TieredCompilation, \
3661 "Enable tiered compilation") \
3662 \
3663 product(bool, PrintTieredEvents, false, \
3664 "Print tiered events notifications") \
3665 \
3666 product_pd(intx, OnStackReplacePercentage, \
3667 "NON_TIERED number of method invocations/branches (expressed as " \
3668 "% of CompileThreshold) before (re-)compiling OSR code") \
3669 \
3670 product(intx, InterpreterProfilePercentage, 33, \
3671 "NON_TIERED number of method invocations/branches (expressed as " \
3672 "% of CompileThreshold) before profiling in the interpreter") \
3673 \
3674 develop(intx, MaxRecompilationSearchLength, 10, \
3675 "The maximum number of frames to inspect when searching for " \
3676 "recompilee") \
3677 \
3678 develop(intx, MaxInterpretedSearchLength, 3, \
3679 "The maximum number of interpreted frames to skip when searching "\
3680 "for recompilee") \
3681 \
3682 develop(intx, DesiredMethodLimit, 8000, \
3683 "The desired maximum method size (in bytecodes) after inlining") \
3684 \
3685 develop(intx, HugeMethodLimit, 8000, \
3686 "Don't compile methods larger than this if " \
3687 "+DontCompileHugeMethods") \
3688 \
3689 /* New JDK 1.4 reflection implementation */ \
3690 \
3691 develop(intx, FastSuperclassLimit, 8, \
3692 "Depth of hardwired instanceof accelerator array") \
3732 \
3733 product(bool, PerfDisableSharedMem, false, \
3734 "Store performance data in standard memory") \
3735 \
3736 product(intx, PerfDataMemorySize, 32*K, \
3737 "Size of performance data memory region. Will be rounded " \
3738 "up to a multiple of the native os page size.") \
3739 \
3740 product(intx, PerfMaxStringConstLength, 1024, \
3741 "Maximum PerfStringConstant string length before truncation") \
3742 \
3743 product(bool, PerfAllowAtExitRegistration, false, \
3744 "Allow registration of atexit() methods") \
3745 \
3746 product(bool, PerfBypassFileSystemCheck, false, \
3747 "Bypass Win32 file system criteria checks (Windows Only)") \
3748 \
3749 product(intx, UnguardOnExecutionViolation, 0, \
3750 "Unguard page and retry on no-execute fault (Win32 only) " \
3751 "0=off, 1=conservative, 2=aggressive") \
3752 \
3753 /* Serviceability Support */ \
3754 \
3755 product(bool, ManagementServer, false, \
3756 "Create JMX Management Server") \
3757 \
3758 product(bool, DisableAttachMechanism, false, \
3759 "Disable mechanism that allows tools to attach to this VM") \
3760 \
3761 product(bool, StartAttachListener, false, \
3762 "Always start Attach Listener at VM startup") \
3763 \
3764 manageable(bool, PrintConcurrentLocks, false, \
3765 "Print java.util.concurrent locks in thread dump") \
3766 \
3767 product(bool, TransmitErrorReport, false, \
3768 "Enable error report transmission on erroneous termination") \
3769 \
3770 product(ccstr, ErrorReportServer, NULL, \
3771 "Override built-in error report server address") \
3852 diagnostic(bool, PauseAtExit, false, \
3853 "Pause and wait for keypress on exit if a debugger is attached") \
3854 \
3855 product(bool, ExtendedDTraceProbes, false, \
3856 "Enable performance-impacting dtrace probes") \
3857 \
3858 product(bool, DTraceMethodProbes, false, \
3859 "Enable dtrace probes for method-entry and method-exit") \
3860 \
3861 product(bool, DTraceAllocProbes, false, \
3862 "Enable dtrace probes for object allocation") \
3863 \
3864 product(bool, DTraceMonitorProbes, false, \
3865 "Enable dtrace probes for monitor events") \
3866 \
3867 product(bool, RelaxAccessControlCheck, false, \
3868 "Relax the access control checks in the verifier") \
3869 \
3870 product(uintx, StringTableSize, defaultStringTableSize, \
3871 "Number of buckets in the interned String table") \
3872 \
3873 experimental(uintx, SymbolTableSize, defaultSymbolTableSize, \
3874 "Number of buckets in the JVM internal Symbol table") \
3875 \
3876 product(bool, UseStringDeduplication, false, \
3877 "Use string deduplication") \
3878 \
3879 product(bool, PrintStringDeduplicationStatistics, false, \
3880 "Print string deduplication statistics") \
3881 \
3882 product(uintx, StringDeduplicationAgeThreshold, 3, \
3883 "A string must reach this age (or be promoted to an old region) " \
3884 "to be considered for deduplication") \
3885 \
3886 diagnostic(bool, StringDeduplicationResizeALot, false, \
3887 "Force table resize every time the table is scanned") \
3888 \
3889 diagnostic(bool, StringDeduplicationRehashALot, false, \
3890 "Force table rehash every time the table is scanned") \
3891 \
3892 develop(bool, TraceDefaultMethods, false, \
3893 "Trace the default method processing steps") \
3894 \
3895 develop(bool, VerifyGenericSignatures, false, \
3896 "Abort VM on erroneous or inconsistent generic signatures") \
3897 \
3898 diagnostic(bool, WhiteBoxAPI, false, \
3899 "Enable internal testing APIs") \
3900 \
3901 product(bool, PrintGCCause, true, \
3902 "Include GC cause in GC logging") \
3903 \
3904 experimental(intx, SurvivorAlignmentInBytes, 0, \
3905 "Default survivor space alignment in bytes") \
3906 \
3907 product(bool , AllowNonVirtualCalls, false, \
3908 "Obey the ACC_SUPER flag and allow invokenonvirtual calls") \
3909 \
3910 product(ccstr, DumpLoadedClassList, NULL, \
3911 "Dump the names all loaded classes, that could be stored into " \
3912 "the CDS archive, in the specified file") \
3913 \
3914 product(ccstr, SharedClassListFile, NULL, \
3915 "Override the default CDS class list") \
3916 \
3917 diagnostic(ccstr, SharedArchiveFile, NULL, \
3918 "Override the default location of the CDS archive file") \
3919 \
3920 product(ccstr, ExtraSharedClassListFile, NULL, \
3921 "Extra classlist for building the CDS archive file") \
3922 \
3923 experimental(size_t, ArrayAllocatorMallocLimit, \
3924 SOLARIS_ONLY(64*K) NOT_SOLARIS((size_t)-1), \
3925 "Allocation less than this value will be allocated " \
3943
3944 /*
3945 * Macros for factoring of globals
3946 */
3947
3948 // Interface macros
3949 #define DECLARE_PRODUCT_FLAG(type, name, value, doc) extern "C" type name;
3950 #define DECLARE_PD_PRODUCT_FLAG(type, name, doc) extern "C" type name;
3951 #define DECLARE_DIAGNOSTIC_FLAG(type, name, value, doc) extern "C" type name;
3952 #define DECLARE_EXPERIMENTAL_FLAG(type, name, value, doc) extern "C" type name;
3953 #define DECLARE_MANAGEABLE_FLAG(type, name, value, doc) extern "C" type name;
3954 #define DECLARE_PRODUCT_RW_FLAG(type, name, value, doc) extern "C" type name;
3955 #ifdef PRODUCT
3956 #define DECLARE_DEVELOPER_FLAG(type, name, value, doc) extern "C" type CONST_##name; const type name = value;
3957 #define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) extern "C" type CONST_##name; const type name = pd_##name;
3958 #define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) extern "C" type CONST_##name;
3959 #else
3960 #define DECLARE_DEVELOPER_FLAG(type, name, value, doc) extern "C" type name;
3961 #define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) extern "C" type name;
3962 #define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) extern "C" type name;
3963 #endif
3964 // Special LP64 flags, product only needed for now.
3965 #ifdef _LP64
3966 #define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) extern "C" type name;
3967 #else
3968 #define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) const type name = value;
3969 #endif // _LP64
3970
3971 // Implementation macros
3972 #define MATERIALIZE_PRODUCT_FLAG(type, name, value, doc) type name = value;
3973 #define MATERIALIZE_PD_PRODUCT_FLAG(type, name, doc) type name = pd_##name;
3974 #define MATERIALIZE_DIAGNOSTIC_FLAG(type, name, value, doc) type name = value;
3975 #define MATERIALIZE_EXPERIMENTAL_FLAG(type, name, value, doc) type name = value;
3976 #define MATERIALIZE_MANAGEABLE_FLAG(type, name, value, doc) type name = value;
3977 #define MATERIALIZE_PRODUCT_RW_FLAG(type, name, value, doc) type name = value;
3978 #ifdef PRODUCT
3979 #define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type CONST_##name = value;
3980 #define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type CONST_##name = pd_##name;
3981 #define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type CONST_##name = value;
3982 #else
3983 #define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type name = value;
3984 #define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type name = pd_##name;
3985 #define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type name = value;
3986 #endif
3987 #ifdef _LP64
3988 #define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) type name = value;
3989 #else
3990 #define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) /* flag is constant */
3991 #endif // _LP64
3992
3993 RUNTIME_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG, DECLARE_LP64_PRODUCT_FLAG)
3994
3995 RUNTIME_OS_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
3996
3997 ARCH_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG)
3998
3999 // Extensions
4000
4001 #include "runtime/globals_ext.hpp"
4002
4003 #endif // SHARE_VM_RUNTIME_GLOBALS_HPP
|
241
242 // flag kind
243 KIND_PRODUCT = 1 << 4,
244 KIND_MANAGEABLE = 1 << 5,
245 KIND_DIAGNOSTIC = 1 << 6,
246 KIND_EXPERIMENTAL = 1 << 7,
247 KIND_NOT_PRODUCT = 1 << 8,
248 KIND_DEVELOP = 1 << 9,
249 KIND_PLATFORM_DEPENDENT = 1 << 10,
250 KIND_READ_WRITE = 1 << 11,
251 KIND_C1 = 1 << 12,
252 KIND_C2 = 1 << 13,
253 KIND_ARCH = 1 << 14,
254 KIND_SHARK = 1 << 15,
255 KIND_LP64_PRODUCT = 1 << 16,
256 KIND_COMMERCIAL = 1 << 17,
257
258 KIND_MASK = ~VALUE_ORIGIN_MASK
259 };
260
261 enum Error {
262 // no error
263 SUCCESS = 0,
264 // flag name is missing
265 MISSING_NAME,
266 // flag value is missing
267 MISSING_VALUE,
268 // error parsing the textual form of the value
269 WRONG_FORMAT,
270 // flag is not writeable
271 NON_WRITABLE,
272 // flag value is outside of its bounds
273 OUT_OF_BOUNDS,
274 // flag value violates its constraint
275 VIOLATES_CONSTRAINT,
276 // there is no flag with the given name
277 INVALID_FLAG,
278 // other, unspecified error related to setting the flag
279 ERR_OTHER
280 };
281
282 const char* _type;
283 const char* _name;
284 void* _addr;
285 NOT_PRODUCT(const char* _doc;)
286 Flags _flags;
287
288 // points to all Flags static array
289 static Flag* flags;
290
291 // number of flags
292 static size_t numFlags;
293
294 static Flag* find_flag(const char* name) { return find_flag(name, strlen(name), true, true); };
295 static Flag* find_flag(const char* name, size_t length, bool allow_locked = false, bool return_flag = false);
296 static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false);
297
298 void check_writable();
299
300 bool is_bool() const;
301 bool get_bool() const;
302 void set_bool(bool value);
303
304 bool is_int() const;
305 int get_int() const;
306 void set_int(int value);
307
308 bool is_uint() const;
309 uint get_uint() const;
310 void set_uint(uint value);
311
312 bool is_intx() const;
313 intx get_intx() const;
314 void set_intx(intx value);
350 bool is_read_write() const;
351 bool is_commercial() const;
352
353 bool is_constant_in_binary() const;
354
355 bool is_unlocker() const;
356 bool is_unlocked() const;
357 bool is_writeable() const;
358 bool is_external() const;
359
360 bool is_unlocker_ext() const;
361 bool is_unlocked_ext() const;
362 bool is_writeable_ext() const;
363 bool is_external_ext() const;
364
365 void unlock_diagnostic();
366
367 void get_locked_message(char*, int) const;
368 void get_locked_message_ext(char*, int) const;
369
370 // printRanges will print out flags type, name and range values as expected by -XX:+PrintFlagsRanges
371 void print_on(outputStream* st, bool withComments = false, bool printRanges = false);
372 void print_kind(outputStream* st);
373 void print_as_flag(outputStream* st);
374
375 static const char* flag_error_str(Flag::Error error) {
376 switch (error) {
377 case Flag::MISSING_NAME: return "MISSING_NAME";
378 case Flag::MISSING_VALUE: return "MISSING_VALUE";
379 case Flag::NON_WRITABLE: return "NON_WRITABLE";
380 case Flag::OUT_OF_BOUNDS: return "OUT_OF_BOUNDS";
381 case Flag::VIOLATES_CONSTRAINT: return "VIOLATES_CONSTRAINT";
382 case Flag::INVALID_FLAG: return "INVALID_FLAG";
383 case Flag::ERR_OTHER: return "ERR_OTHER";
384 case Flag::SUCCESS: return "SUCCESS";
385 default: return "NULL";
386 }
387 }
388 };
389
390 // debug flags control various aspects of the VM and are global accessible
391
392 // use FlagSetting to temporarily change some debug flag
393 // e.g. FlagSetting fs(DebugThisAndThat, true);
394 // restored to previous value upon leaving scope
395 class FlagSetting {
396 bool val;
397 bool* flag;
398 public:
399 FlagSetting(bool& fl, bool newValue) { flag = &fl; val = fl; fl = newValue; }
400 ~FlagSetting() { *flag = val; }
401 };
402
403
404 class CounterSetting {
405 intx* counter;
406 public:
407 CounterSetting(intx* cnt) { counter = cnt; (*counter)++; }
433 };
434
435 class DoubleFlagSetting {
436 double val;
437 double* flag;
438 public:
439 DoubleFlagSetting(double& fl, double newValue) { flag = &fl; val = fl; fl = newValue; }
440 ~DoubleFlagSetting() { *flag = val; }
441 };
442
443 class SizeTFlagSetting {
444 size_t val;
445 size_t* flag;
446 public:
447 SizeTFlagSetting(size_t& fl, size_t newValue) { flag = &fl; val = fl; fl = newValue; }
448 ~SizeTFlagSetting() { *flag = val; }
449 };
450
451
452 class CommandLineFlags {
453 static bool _finished_initializing;
454 public:
455 static Flag::Error boolAt(const char* name, size_t len, bool* value, bool allow_locked = false, bool return_flag = false);
456 static Flag::Error boolAt(const char* name, bool* value, bool allow_locked = false, bool return_flag = false) { return boolAt(name, strlen(name), value, allow_locked, return_flag); }
457 static Flag::Error boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin);
458 static Flag::Error boolAtPut(const char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); }
459
460 static Flag::Error intAt(const char* name, size_t len, int* value, bool allow_locked = false, bool return_flag = false);
461 static Flag::Error intAt(const char* name, int* value, bool allow_locked = false, bool return_flag = false) { return intAt(name, strlen(name), value, allow_locked, return_flag); }
462 static Flag::Error intAtPut(const char* name, size_t len, int* value, Flag::Flags origin);
463 static Flag::Error intAtPut(const char* name, int* value, Flag::Flags origin) { return intAtPut(name, strlen(name), value, origin); }
464
465 static Flag::Error uintAt(const char* name, size_t len, uint* value, bool allow_locked = false, bool return_flag = false);
466 static Flag::Error uintAt(const char* name, uint* value, bool allow_locked = false, bool return_flag = false) { return uintAt(name, strlen(name), value, allow_locked, return_flag); }
467 static Flag::Error uintAtPut(const char* name, size_t len, uint* value, Flag::Flags origin);
468 static Flag::Error uintAtPut(const char* name, uint* value, Flag::Flags origin) { return uintAtPut(name, strlen(name), value, origin); }
469
470 static Flag::Error intxAt(const char* name, size_t len, intx* value, bool allow_locked = false, bool return_flag = false);
471 static Flag::Error intxAt(const char* name, intx* value, bool allow_locked = false, bool return_flag = false) { return intxAt(name, strlen(name), value, allow_locked, return_flag); }
472 static Flag::Error intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin);
473 static Flag::Error intxAtPut(const char* name, intx* value, Flag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); }
474
475 static Flag::Error uintxAt(const char* name, size_t len, uintx* value, bool allow_locked = false, bool return_flag = false);
476 static Flag::Error uintxAt(const char* name, uintx* value, bool allow_locked = false, bool return_flag = false) { return uintxAt(name, strlen(name), value, allow_locked, return_flag); }
477 static Flag::Error uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin);
478 static Flag::Error uintxAtPut(const char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); }
479
480 static Flag::Error size_tAt(const char* name, size_t len, size_t* value, bool allow_locked = false, bool return_flag = false);
481 static Flag::Error size_tAt(const char* name, size_t* value, bool allow_locked = false, bool return_flag = false) { return size_tAt(name, strlen(name), value, allow_locked, return_flag); }
482 static Flag::Error size_tAtPut(const char* name, size_t len, size_t* value, Flag::Flags origin);
483 static Flag::Error size_tAtPut(const char* name, size_t* value, Flag::Flags origin) { return size_tAtPut(name, strlen(name), value, origin); }
484
485 static Flag::Error uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked = false, bool return_flag = false);
486 static Flag::Error uint64_tAt(const char* name, uint64_t* value, bool allow_locked = false, bool return_flag = false) { return uint64_tAt(name, strlen(name), value, allow_locked, return_flag); }
487 static Flag::Error uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin);
488 static Flag::Error uint64_tAtPut(const char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); }
489
490 static Flag::Error doubleAt(const char* name, size_t len, double* value, bool allow_locked = false, bool return_flag = false);
491 static Flag::Error doubleAt(const char* name, double* value, bool allow_locked = false, bool return_flag = false) { return doubleAt(name, strlen(name), value, allow_locked, return_flag); }
492 static Flag::Error doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin);
493 static Flag::Error doubleAtPut(const char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); }
494
495 static Flag::Error ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked = false, bool return_flag = false);
496 static Flag::Error ccstrAt(const char* name, ccstr* value, bool allow_locked = false, bool return_flag = false) { return ccstrAt(name, strlen(name), value, allow_locked, return_flag); }
497 // Contract: Flag will make private copy of the incoming value.
498 // Outgoing value is always malloc-ed, and caller MUST call free.
499 static Flag::Error ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin);
500 static Flag::Error ccstrAtPut(const char* name, ccstr* value, Flag::Flags origin) { return ccstrAtPut(name, strlen(name), value, origin); }
501
502 // Returns false if name is not a command line flag.
503 static bool wasSetOnCmdline(const char* name, bool* value);
504 static void printSetFlags(outputStream* out);
505
506 // printRanges will print out flags type, name and range values as expected by -XX:+PrintFlagsRanges
507 static void printFlags(outputStream* out, bool withComments, bool printRanges = false);
508
509 // Returns true if all flags have their final values set (ready for ranges and constraint check)
510 static bool finishedInitializing() { return _finished_initializing; }
511
512 // Check the final values of all flags for ranges and constraints
513 static bool check_all_ranges_and_constraints();
514
515 static void verify() PRODUCT_RETURN;
516 };
517
518 // use this for flags that are true by default in the debug version but
519 // false in the optimized version, and vice versa
520 #ifdef ASSERT
521 #define trueInDebug true
522 #define falseInDebug false
523 #else
524 #define trueInDebug false
525 #define falseInDebug true
526 #endif
527
528 // use this for flags that are true per default in the product build
529 // but false in development builds, and vice versa
530 #ifdef PRODUCT
531 #define trueInProduct true
532 #define falseInProduct false
533 #else
587 // - the flag is defined in a CCC as an external exported interface.
588 // - the VM implementation supports dynamic setting of the flag.
589 // This implies that the VM must *always* query the flag variable
590 // and not reuse state related to the flag state at any given time.
591 // - you want the flag to be queried programmatically by the customers.
592 //
593 // product_rw flags are writeable internal product flags.
594 // They are like "manageable" flags but for internal/private use.
595 // The list of product_rw flags are internal/private flags which
596 // may be changed/removed in a future release. It can be set
597 // through the management interface to get/set value
598 // when the name of flag is supplied.
599 //
600 // A flag can be made as "product_rw" only if
601 // - the VM implementation supports dynamic setting of the flag.
602 // This implies that the VM must *always* query the flag variable
603 // and not reuse state related to the flag state at any given time.
604 //
605 // Note that when there is a need to support develop flags to be writeable,
606 // it can be done in the same way as product_rw.
607 //
608 // range is a macro that will expand to min and max arguments for range
609 // checking code if provided - see commandLineFlagRangeList.hpp
610 //
611 // constraint is a macro that will expand to custom function call
612 // for constraint checking if provided - see commandLineFlagConstraintList.hpp
613 //
614
615 #define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw, lp64_product, range, constraint) \
616 \
617 lp64_product(bool, UseCompressedOops, false, \
618 "Use 32-bit object references in 64-bit VM. " \
619 "lp64_product means flag is always constant in 32 bit VM") \
620 \
621 lp64_product(bool, UseCompressedClassPointers, false, \
622 "Use 32-bit class pointers in 64-bit VM. " \
623 "lp64_product means flag is always constant in 32 bit VM") \
624 \
625 notproduct(bool, CheckCompressedOops, true, \
626 "Generate checks in encoding/decoding code in debug VM") \
627 \
628 product_pd(size_t, HeapBaseMinAddress, \
629 "OS specific low limit for heap base address") \
630 \
631 product(uintx, HeapSearchSteps, 3 PPC64_ONLY(+17), \
632 "Heap allocation steps through preferred address regions to find" \
633 " where it can allocate the heap. Number of steps to take per " \
634 "region.") \
635 range(1, max_uintx) \
636 \
637 diagnostic(bool, PrintCompressedOopsMode, false, \
638 "Print compressed oops base address and encoding mode") \
639 \
640 lp64_product(intx, ObjectAlignmentInBytes, 8, \
641 "Default object alignment in bytes, 8 is minimum") \
642 range(8, 256) \
643 constraint(ObjectAlignmentInBytesConstraintFunc) \
644 \
645 product(bool, AssumeMP, false, \
646 "Instruct the VM to assume multiple processors are available") \
647 \
648 /* UseMembar is theoretically a temp flag used for memory barrier */ \
649 /* removal testing. It was supposed to be removed before FCS but has */ \
650 /* been re-added (see 6401008) */ \
651 product_pd(bool, UseMembar, \
652 "(Unstable) Issues membars on thread state transitions") \
653 \
654 develop(bool, CleanChunkPoolAsync, falseInEmbedded, \
655 "Clean the chunk pool asynchronously") \
656 \
657 experimental(bool, AlwaysSafeConstructors, false, \
658 "Force safe construction, as if all fields are final.") \
659 \
660 /* Temporary: See 6948537 */ \
661 experimental(bool, UseMemSetInBOT, true, \
662 "(Unstable) uses memset in BOT updates in GC code") \
663 \
664 diagnostic(bool, UnlockDiagnosticVMOptions, trueInDebug, \
665 "Enable normal processing of flags relating to field diagnostics")\
666 \
667 experimental(bool, UnlockExperimentalVMOptions, false, \
668 "Enable normal processing of flags relating to experimental " \
669 "features") \
670 \
687 \
688 develop(bool, TracePageSizes, false, \
689 "Trace page size selection and usage") \
690 \
691 product(bool, UseNUMA, false, \
692 "Use NUMA if available") \
693 \
694 product(bool, UseNUMAInterleaving, false, \
695 "Interleave memory across NUMA nodes if available") \
696 \
697 product(size_t, NUMAInterleaveGranularity, 2*M, \
698 "Granularity to use for NUMA interleaving on Windows OS") \
699 \
700 product(bool, ForceNUMA, false, \
701 "Force NUMA optimizations on single-node/UMA systems") \
702 \
703 product(uintx, NUMAChunkResizeWeight, 20, \
704 "Percentage (0-100) used to weight the current sample when " \
705 "computing exponentially decaying average for " \
706 "AdaptiveNUMAChunkSizing") \
707 range(0, 100) \
708 \
709 product(size_t, NUMASpaceResizeRate, 1*G, \
710 "Do not reallocate more than this amount per collection") \
711 \
712 product(bool, UseAdaptiveNUMAChunkSizing, true, \
713 "Enable adaptive chunk sizing for NUMA") \
714 \
715 product(bool, NUMAStats, false, \
716 "Print NUMA stats in detailed heap information") \
717 \
718 product(uintx, NUMAPageScanRate, 256, \
719 "Maximum number of pages to include in the page scan procedure") \
720 \
721 product_pd(bool, NeedsDeoptSuspend, \
722 "True for register window machines (sparc/ia64)") \
723 \
724 product(intx, UseSSE, 99, \
725 "Highest supported SSE instructions set on x86/x64") \
726 \
727 product(bool, UseAES, false, \
898 product(intx, SuspendRetryCount, 50, \
899 "Maximum retry count for an external suspend request") \
900 \
901 product(intx, SuspendRetryDelay, 5, \
902 "Milliseconds to delay per retry (* current_retry_count)") \
903 \
904 product(bool, AssertOnSuspendWaitFailure, false, \
905 "Assert/Guarantee on external suspend wait failure") \
906 \
907 product(bool, TraceSuspendWaitFailures, false, \
908 "Trace external suspend wait failures") \
909 \
910 product(bool, MaxFDLimit, true, \
911 "Bump the number of file descriptors to maximum in Solaris") \
912 \
913 diagnostic(bool, LogEvents, true, \
914 "Enable the various ring buffer event logs") \
915 \
916 diagnostic(uintx, LogEventsBufferEntries, 10, \
917 "Number of ring buffer event logs") \
918 range(1, NOT_LP64(1*K) LP64_ONLY(1*M)) \
919 \
920 product(bool, BytecodeVerificationRemote, true, \
921 "Enable the Java bytecode verifier for remote classes") \
922 \
923 product(bool, BytecodeVerificationLocal, false, \
924 "Enable the Java bytecode verifier for local classes") \
925 \
926 develop(bool, ForceFloatExceptions, trueInDebug, \
927 "Force exceptions on FP stack under/overflow") \
928 \
929 develop(bool, VerifyStackAtCalls, false, \
930 "Verify that the stack pointer is unchanged after calls") \
931 \
932 develop(bool, TraceJavaAssertions, false, \
933 "Trace java language assertions") \
934 \
935 notproduct(bool, CheckAssertionStatusDirectives, false, \
936 "Temporary - see javaClasses.cpp") \
937 \
938 notproduct(bool, PrintMallocFree, false, \
1071 \
1072 product(ccstr, NativeMemoryTracking, "off", \
1073 "Native memory tracking options") \
1074 \
1075 diagnostic(bool, PrintNMTStatistics, false, \
1076 "Print native memory tracking summary data if it is on") \
1077 \
1078 diagnostic(bool, LogCompilation, false, \
1079 "Log compilation activity in detail to LogFile") \
1080 \
1081 product(bool, PrintCompilation, false, \
1082 "Print compilations") \
1083 \
1084 diagnostic(bool, TraceNMethodInstalls, false, \
1085 "Trace nmethod installation") \
1086 \
1087 diagnostic(intx, ScavengeRootsInCode, 2, \
1088 "0: do not allow scavengable oops in the code cache; " \
1089 "1: allow scavenging from the code cache; " \
1090 "2: emit as many constants as the compiler can see") \
1091 range(0, 2) \
1092 \
1093 product(bool, AlwaysRestoreFPU, false, \
1094 "Restore the FPU control word after every JNI call (expensive)") \
1095 \
1096 diagnostic(bool, PrintCompilation2, false, \
1097 "Print additional statistics per compilation") \
1098 \
1099 diagnostic(bool, PrintAdapterHandlers, false, \
1100 "Print code generated for i2c/c2i adapters") \
1101 \
1102 diagnostic(bool, VerifyAdapterCalls, trueInDebug, \
1103 "Verify that i2c/c2i adapters are called properly") \
1104 \
1105 develop(bool, VerifyAdapterSharing, false, \
1106 "Verify that the code for shared adapters is the equivalent") \
1107 \
1108 diagnostic(bool, PrintAssembly, false, \
1109 "Print assembly code (using external disassembler.so)") \
1110 \
1111 diagnostic(ccstr, PrintAssemblyOptions, NULL, \
1345 \
1346 product(bool, EagerXrunInit, false, \
1347 "Eagerly initialize -Xrun libraries; allows startup profiling, " \
1348 "but not all -Xrun libraries may support the state of the VM " \
1349 "at this time") \
1350 \
1351 product(bool, PreserveAllAnnotations, false, \
1352 "Preserve RuntimeInvisibleAnnotations as well " \
1353 "as RuntimeVisibleAnnotations") \
1354 \
1355 develop(uintx, PreallocatedOutOfMemoryErrorCount, 4, \
1356 "Number of OutOfMemoryErrors preallocated with backtrace") \
1357 \
1358 product(bool, LazyBootClassLoader, true, \
1359 "Enable/disable lazy opening of boot class path entries") \
1360 \
1361 product(bool, UseXMMForArrayCopy, false, \
1362 "Use SSE2 MOVQ instruction for Arraycopy") \
1363 \
1364 product(intx, FieldsAllocationStyle, 1, \
1365 "0 - type based with oops first, " \
1366 "1 - with oops last, " \
1367 "2 - oops in super and sub classes are together") \
1368 range(0, 2) \
1369 \
1370 product(bool, CompactFields, true, \
1371 "Allocate nonstatic fields in gaps between previous fields") \
1372 \
1373 notproduct(bool, PrintFieldLayout, false, \
1374 "Print field layout for each class") \
1375 \
1376 /* Need to limit the extent of the padding to reasonable size. */\
1377 /* 8K is well beyond the reasonable HW cache line size, even with */\
1378 /* aggressive prefetching, while still leaving the room for segregating */\
1379 /* among the distinct pages. */\
1380 product(intx, ContendedPaddingWidth, 128, \
1381 "How many bytes to pad the fields/classes marked @Contended with")\
1382 range(0, 8192) \
1383 constraint(ContendedPaddingWidthConstraintFunc) \
1384 \
1385 product(bool, EnableContended, true, \
1386 "Enable @Contended annotation support") \
1387 \
1388 product(bool, RestrictContended, true, \
1389 "Restrict @Contended to trusted classes") \
1390 \
1391 product(bool, UseBiasedLocking, true, \
1392 "Enable biased locking in JVM") \
1393 \
1394 product(intx, BiasedLockingStartupDelay, 4000, \
1395 "Number of milliseconds to wait before enabling biased locking") \
1396 \
1397 diagnostic(bool, PrintBiasedLockingStatistics, false, \
1398 "Print statistics of biased locking in JVM") \
1399 \
1400 product(intx, BiasedLockingBulkRebiasThreshold, 20, \
1401 "Threshold of number of revocations per type to try to " \
1402 "rebias all objects in the heap of that type") \
1403 \
1525 product(bool, UseParallelGC, false, \
1526 "Use the Parallel Scavenge garbage collector") \
1527 \
1528 product(bool, UseParallelOldGC, false, \
1529 "Use the Parallel Old garbage collector") \
1530 \
1531 product(uintx, HeapMaximumCompactionInterval, 20, \
1532 "How often should we maximally compact the heap (not allowing " \
1533 "any dead space)") \
1534 \
1535 product(uintx, HeapFirstMaximumCompactionCount, 3, \
1536 "The collection count for the first maximum compaction") \
1537 \
1538 product(bool, UseMaximumCompactionOnSystemGC, true, \
1539 "Use maximum compaction in the Parallel Old garbage collector " \
1540 "for a system GC") \
1541 \
1542 product(uintx, ParallelOldDeadWoodLimiterMean, 50, \
1543 "The mean used by the parallel compact dead wood " \
1544 "limiter (a number between 0-100)") \
1545 range(0, 100) \
1546 \
1547 product(uintx, ParallelOldDeadWoodLimiterStdDev, 80, \
1548 "The standard deviation used by the parallel compact dead wood " \
1549 "limiter (a number between 0-100)") \
1550 range(0, 100) \
1551 \
1552 product(uint, ParallelGCThreads, 0, \
1553 "Number of parallel threads parallel gc will use") \
1554 \
1555 product(bool, UseDynamicNumberOfGCThreads, false, \
1556 "Dynamically choose the number of parallel threads " \
1557 "parallel gc will use") \
1558 \
1559 diagnostic(bool, ForceDynamicNumberOfGCThreads, false, \
1560 "Force dynamic selection of the number of " \
1561 "parallel threads parallel gc will use to aid debugging") \
1562 \
1563 product(size_t, HeapSizePerGCThread, ScaleForWordSize(64*M), \
1564 "Size of heap (bytes) per GC thread used in calculating the " \
1565 "number of GC threads") \
1566 range((uintx)os::vm_page_size(), max_uintx) \
1567 \
1568 product(bool, TraceDynamicGCThreads, false, \
1569 "Trace the dynamic GC thread usage") \
1570 \
1571 develop(bool, ParallelOldGCSplitALot, false, \
1572 "Provoke splitting (copying data from a young gen space to " \
1573 "multiple destination spaces)") \
1574 \
1575 develop(uintx, ParallelOldGCSplitInterval, 3, \
1576 "How often to provoke splitting a young gen space") \
1577 range(0, max_uintx) \
1578 \
1579 product(uint, ConcGCThreads, 0, \
1580 "Number of threads concurrent gc will use") \
1581 \
1582 product(size_t, YoungPLABSize, 4096, \
1583 "Size of young gen promotion LAB's (in HeapWords)") \
1584 \
1585 product(size_t, OldPLABSize, 1024, \
1586 "Size of old gen promotion LAB's (in HeapWords), or Number \
1587 of blocks to attempt to claim when refilling CMS LAB's") \
1588 \
1589 product(uintx, GCTaskTimeStampEntries, 200, \
1590 "Number of time stamp entries per gc worker thread") \
1591 range(1, max_uintx) \
1592 \
1593 product(bool, AlwaysTenure, false, \
1594 "Always tenure objects in eden (ParallelGC only)") \
1595 \
1596 product(bool, NeverTenure, false, \
1597 "Never tenure objects in eden, may tenure on overflow " \
1598 "(ParallelGC only)") \
1599 \
1600 product(bool, ScavengeBeforeFullGC, true, \
1601 "Scavenge youngest generation before each full GC.") \
1602 \
1603 develop(bool, ScavengeWithObjectsInToSpace, false, \
1604 "Allow scavenges to occur when to-space contains objects") \
1605 \
1606 product(bool, UseConcMarkSweepGC, false, \
1607 "Use Concurrent Mark-Sweep GC in the old generation") \
1608 \
1609 product(bool, ExplicitGCInvokesConcurrent, false, \
1610 "A System.gc() request invokes a concurrent collection; " \
1611 "(effective only when using concurrent collectors)") \
1612 \
1613 product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false, \
1614 "A System.gc() request invokes a concurrent collection and " \
1615 "also unloads classes during such a concurrent gc cycle " \
1616 "(effective only when UseConcMarkSweepGC)") \
1617 \
1618 product(bool, GCLockerInvokesConcurrent, false, \
1619 "The exit of a JNI critical section necessitating a scavenge, " \
1620 "also kicks off a background concurrent collection") \
1621 \
1622 product(uintx, GCLockerEdenExpansionPercent, 5, \
1623 "How much the GC can expand the eden by while the GC locker " \
1624 "is active (as a percentage)") \
1625 range(0, 100) \
1626 \
1627 diagnostic(uintx, GCLockerRetryAllocationCount, 2, \
1628 "Number of times to retry allocations when " \
1629 "blocked by the GC locker") \
1630 \
1631 develop(bool, UseCMSAdaptiveFreeLists, true, \
1632 "Use adaptive free lists in the CMS generation") \
1633 \
1634 develop(bool, UseAsyncConcMarkSweepGC, true, \
1635 "Use Asynchronous Concurrent Mark-Sweep GC in the old generation")\
1636 \
1637 product(bool, UseCMSBestFit, true, \
1638 "Use CMS best fit allocation strategy") \
1639 \
1640 product(bool, UseParNewGC, false, \
1641 "Use parallel threads in the new generation") \
1642 \
1643 product(bool, PrintTaskqueue, false, \
1644 "Print taskqueue statistics for parallel collectors") \
1645 \
1646 product(bool, PrintTerminationStats, false, \
1647 "Print termination statistics for parallel collectors") \
1648 \
1649 product(uintx, ParallelGCBufferWastePct, 10, \
1650 "Wasted fraction of parallel allocation buffer") \
1651 range(0, 100) \
1652 \
1653 product(uintx, TargetPLABWastePct, 10, \
1654 "Target wasted space in last buffer as percent of overall " \
1655 "allocation") \
1656 range(1, 100) \
1657 \
1658 product(uintx, PLABWeight, 75, \
1659 "Percentage (0-100) used to weight the current sample when " \
1660 "computing exponentially decaying average for ResizePLAB") \
1661 range(0, 100) \
1662 \
1663 product(bool, ResizePLAB, true, \
1664 "Dynamically resize (survivor space) promotion LAB's") \
1665 \
1666 product(bool, PrintPLAB, false, \
1667 "Print (survivor space) promotion LAB's sizing decisions") \
1668 \
1669 product(intx, ParGCArrayScanChunk, 50, \
1670 "Scan a subset of object array and push remainder, if array is " \
1671 "bigger than this") \
1672 range(1, max_intx) \
1673 \
1674 product(bool, ParGCUseLocalOverflow, false, \
1675 "Instead of a global overflow list, use local overflow stacks") \
1676 \
1677 product(bool, ParGCTrimOverflow, true, \
1678 "Eagerly trim the local overflow lists " \
1679 "(when ParGCUseLocalOverflow)") \
1680 \
1681 notproduct(bool, ParGCWorkQueueOverflowALot, false, \
1682 "Simulate work queue overflow in ParNew") \
1683 \
1684 notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000, \
1685 "An `interval' counter that determines how frequently " \
1686 "we simulate overflow; a smaller number increases frequency") \
1687 \
1688 product(uintx, ParGCDesiredObjsFromOverflowList, 20, \
1689 "The desired number of objects to claim from the overflow list") \
1690 \
1691 diagnostic(uintx, ParGCStridesPerThread, 2, \
1692 "The number of strides per worker thread that we divide up the " \
1693 "card table scanning work into") \
1694 range(1, max_uintx) \
1695 \
1696 diagnostic(intx, ParGCCardsPerStrideChunk, 256, \
1697 "The number of cards in each chunk of the parallel chunks used " \
1698 "during card table scanning") \
1699 range(1, max_intx) \
1700 \
1701 product(uintx, OldPLABWeight, 50, \
1702 "Percentage (0-100) used to weight the current sample when " \
1703 "computing exponentially decaying average for resizing " \
1704 "OldPLABSize") \
1705 range(0, 100) \
1706 \
1707 product(bool, ResizeOldPLAB, true, \
1708 "Dynamically resize (old gen) promotion LAB's") \
1709 \
1710 product(bool, PrintOldPLAB, false, \
1711 "Print (old gen) promotion LAB's sizing decisions") \
1712 \
1713 product(size_t, CMSOldPLABMax, 1024, \
1714 "Maximum size of CMS gen promotion LAB caches per worker " \
1715 "per block size") \
1716 range(1, max_uintx) \
1717 \
1718 product(size_t, CMSOldPLABMin, 16, \
1719 "Minimum size of CMS gen promotion LAB caches per worker " \
1720 "per block size") \
1721 range(1, max_uintx) \
1722 constraint(CMSOldPLABMinConstraintFunc) \
1723 \
1724 product(uintx, CMSOldPLABNumRefills, 4, \
1725 "Nominal number of refills of CMS gen promotion LAB cache " \
1726 "per worker per block size") \
1727 range(1, max_uintx) \
1728 \
1729 product(bool, CMSOldPLABResizeQuicker, false, \
1730 "React on-the-fly during a scavenge to a sudden " \
1731 "change in block demand rate") \
1732 \
1733 product(uintx, CMSOldPLABToleranceFactor, 4, \
1734 "The tolerance of the phase-change detector for on-the-fly " \
1735 "PLAB resizing during a scavenge") \
1736 range(1, max_uintx) \
1737 \
1738 product(uintx, CMSOldPLABReactivityFactor, 2, \
1739 "The gain in the feedback loop for on-the-fly PLAB resizing " \
1740 "during a scavenge") \
1741 \
1742 product(bool, AlwaysPreTouch, false, \
1743 "Force all freshly committed pages to be pre-touched") \
1744 \
1745 product_pd(size_t, CMSYoungGenPerWorker, \
1746 "The maximum size of young gen chosen by default per GC worker " \
1747 "thread available") \
1748 range(1, max_uintx) \
1749 \
1750 product(uintx, CMSIncrementalSafetyFactor, 10, \
1751 "Percentage (0-100) used to add conservatism when computing the " \
1752 "duty cycle") \
1753 range(0, 100) \
1754 \
1755 product(uintx, CMSExpAvgFactor, 50, \
1756 "Percentage (0-100) used to weight the current sample when " \
1757 "computing exponential averages for CMS statistics") \
1758 range(0, 100) \
1759 \
1760 product(uintx, CMS_FLSWeight, 75, \
1761 "Percentage (0-100) used to weight the current sample when " \
1762 "computing exponentially decaying averages for CMS FLS " \
1763 "statistics") \
1764 range(0, 100) \
1765 \
1766 product(uintx, CMS_FLSPadding, 1, \
1767 "The multiple of deviation from mean to use for buffering " \
1768 "against volatility in free list demand") \
1769 \
1770 product(uintx, FLSCoalescePolicy, 2, \
1771 "CMS: aggressiveness level for coalescing, increasing " \
1772 "from 0 to 4") \
1773 range(0, 4) \
1774 \
1775 product(bool, FLSAlwaysCoalesceLarge, false, \
1776 "CMS: larger free blocks are always available for coalescing") \
1777 \
1778 product(double, FLSLargestBlockCoalesceProximity, 0.99, \
1779 "CMS: the smaller the percentage the greater the coalescing " \
1780 "force") \
1781 \
1782 product(double, CMSSmallCoalSurplusPercent, 1.05, \
1783 "CMS: the factor by which to inflate estimated demand of small " \
1784 "block sizes to prevent coalescing with an adjoining block") \
1785 \
1786 product(double, CMSLargeCoalSurplusPercent, 0.95, \
1787 "CMS: the factor by which to inflate estimated demand of large " \
1788 "block sizes to prevent coalescing with an adjoining block") \
1789 \
1790 product(double, CMSSmallSplitSurplusPercent, 1.10, \
1791 "CMS: the factor by which to inflate estimated demand of small " \
1792 "block sizes to prevent splitting to supply demand for smaller " \
1793 "blocks") \
1794 \
1795 product(double, CMSLargeSplitSurplusPercent, 1.00, \
1796 "CMS: the factor by which to inflate estimated demand of large " \
1797 "block sizes to prevent splitting to supply demand for smaller " \
1798 "blocks") \
1799 \
1800 product(bool, CMSExtrapolateSweep, false, \
1801 "CMS: cushion for block demand during sweep") \
1802 \
1803 product(uintx, CMS_SweepWeight, 75, \
1804 "Percentage (0-100) used to weight the current sample when " \
1805 "computing exponentially decaying average for inter-sweep " \
1806 "duration") \
1807 range(0, 100) \
1808 \
1809 product(uintx, CMS_SweepPadding, 1, \
1810 "The multiple of deviation from mean to use for buffering " \
1811 "against volatility in inter-sweep duration") \
1812 \
1813 product(uintx, CMS_SweepTimerThresholdMillis, 10, \
1814 "Skip block flux-rate sampling for an epoch unless inter-sweep " \
1815 "duration exceeds this threshold in milliseconds") \
1816 \
1817 product(bool, CMSClassUnloadingEnabled, true, \
1818 "Whether class unloading enabled when using CMS GC") \
1819 \
1820 product(uintx, CMSClassUnloadingMaxInterval, 0, \
1821 "When CMS class unloading is enabled, the maximum CMS cycle " \
1822 "count for which classes may not be unloaded") \
1823 \
1824 develop(intx, CMSDictionaryChoice, 0, \
1825 "Use BinaryTreeDictionary as default in the CMS generation") \
1826 \
1827 product(uintx, CMSIndexedFreeListReplenish, 4, \
1828 "Replenish an indexed free list with this number of chunks") \
1829 \
1830 product(bool, CMSReplenishIntermediate, true, \
1831 "Replenish all intermediate free-list caches") \
1832 \
1833 product(bool, CMSSplitIndexedFreeListBlocks, true, \
1834 "When satisfying batched demand, split blocks from the " \
1835 "IndexedFreeList whose size is a multiple of requested size") \
1836 \
1837 product(bool, CMSLoopWarn, false, \
1838 "Warn in case of excessive CMS looping") \
1839 \
1840 develop(bool, CMSOverflowEarlyRestoration, false, \
1841 "Restore preserved marks early") \
1842 \
1843 product(size_t, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M), \
1844 "Size of marking stack") \
1845 \
1846 product(size_t, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \
1847 "Maximum size of marking stack") \
1848 range(1, (max_jint - 1)) \
1849 \
1850 notproduct(bool, CMSMarkStackOverflowALot, false, \
1851 "Simulate frequent marking stack / work queue overflow") \
1852 \
1853 notproduct(uintx, CMSMarkStackOverflowInterval, 1000, \
1854 "An \"interval\" counter that determines how frequently " \
1855 "to simulate overflow; a smaller number increases frequency") \
1856 \
1857 product(uintx, CMSMaxAbortablePrecleanLoops, 0, \
1858 "Maximum number of abortable preclean iterations, if > 0") \
1859 \
1860 product(intx, CMSMaxAbortablePrecleanTime, 5000, \
1861 "Maximum time in abortable preclean (in milliseconds)") \
1862 \
1863 product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100, \
1864 "Nominal minimum work per abortable preclean iteration") \
1865 \
1866 manageable(intx, CMSAbortablePrecleanWaitMillis, 100, \
1867 "Time that we sleep between iterations when not given " \
1868 "enough work per iteration") \
1869 \
1870 product(size_t, CMSRescanMultiple, 32, \
1871 "Size (in cards) of CMS parallel rescan task") \
1872 range(1, max_uintx) \
1873 \
1874 product(size_t, CMSConcMarkMultiple, 32, \
1875 "Size (in cards) of CMS concurrent MT marking task") \
1876 range(1, max_uintx) \
1877 \
1878 product(bool, CMSAbortSemantics, false, \
1879 "Whether abort-on-overflow semantics is implemented") \
1880 \
1881 product(bool, CMSParallelInitialMarkEnabled, true, \
1882 "Use the parallel initial mark.") \
1883 \
1884 product(bool, CMSParallelRemarkEnabled, true, \
1885 "Whether parallel remark enabled (only if ParNewGC)") \
1886 \
1887 product(bool, CMSParallelSurvivorRemarkEnabled, true, \
1888 "Whether parallel remark of survivor space " \
1889 "enabled (effective only if CMSParallelRemarkEnabled)") \
1890 \
1891 product(bool, CMSPLABRecordAlways, true, \
1892 "Always record survivor space PLAB boundaries (effective only " \
1893 "if CMSParallelSurvivorRemarkEnabled)") \
1894 \
1895 product(bool, CMSEdenChunksRecordAlways, true, \
1896 "Always record eden chunks used for the parallel initial mark " \
1897 "or remark of eden") \
1898 \
1899 product(bool, CMSPrintEdenSurvivorChunks, false, \
1900 "Print the eden and the survivor chunks used for the parallel " \
1901 "initial mark or remark of the eden/survivor spaces") \
1902 \
1903 product(bool, CMSConcurrentMTEnabled, true, \
1904 "Whether multi-threaded concurrent work enabled " \
1905 "(effective only if ParNewGC)") \
1906 \
1907 product(bool, CMSPrecleaningEnabled, true, \
1908 "Whether concurrent precleaning enabled") \
1909 \
1910 product(uintx, CMSPrecleanIter, 3, \
1911 "Maximum number of precleaning iteration passes") \
1912 range(0, 9) \
1913 \
1914 product(uintx, CMSPrecleanDenominator, 3, \
1915 "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
1916 "ratio") \
1917 range(1, max_uintx) \
1918 constraint(CMSPrecleanDenominatorConstraintFunc) \
1919 \
1920 product(uintx, CMSPrecleanNumerator, 2, \
1921 "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
1922 "ratio") \
1923 range(0, max_uintx-1) \
1924 constraint(CMSPrecleanNumeratorConstraintFunc) \
1925 \
1926 product(bool, CMSPrecleanRefLists1, true, \
1927 "Preclean ref lists during (initial) preclean phase") \
1928 \
1929 product(bool, CMSPrecleanRefLists2, false, \
1930 "Preclean ref lists during abortable preclean phase") \
1931 \
1932 product(bool, CMSPrecleanSurvivors1, false, \
1933 "Preclean survivors during (initial) preclean phase") \
1934 \
1935 product(bool, CMSPrecleanSurvivors2, true, \
1936 "Preclean survivors during abortable preclean phase") \
1937 \
1938 product(uintx, CMSPrecleanThreshold, 1000, \
1939 "Do not iterate again if number of dirty cards is less than this")\
1940 range(100, max_uintx) \
1941 \
1942 product(bool, CMSCleanOnEnter, true, \
1943 "Clean-on-enter optimization for reducing number of dirty cards") \
1944 \
1945 product(uintx, CMSRemarkVerifyVariant, 1, \
1946 "Choose variant (1,2) of verification following remark") \
1947 range(1, 2) \
1948 \
1949 product(size_t, CMSScheduleRemarkEdenSizeThreshold, 2*M, \
1950 "If Eden size is below this, do not try to schedule remark") \
1951 \
1952 product(uintx, CMSScheduleRemarkEdenPenetration, 50, \
1953 "The Eden occupancy percentage (0-100) at which " \
1954 "to try and schedule remark pause") \
1955 range(0, 100) \
1956 \
1957 product(uintx, CMSScheduleRemarkSamplingRatio, 5, \
1958 "Start sampling eden top at least before young gen " \
1959 "occupancy reaches 1/<ratio> of the size at which " \
1960 "we plan to schedule remark") \
1961 range(1, max_uintx) \
1962 \
1963 product(uintx, CMSSamplingGrain, 16*K, \
1964 "The minimum distance between eden samples for CMS (see above)") \
1965 range(1, max_uintx) \
1966 \
1967 product(bool, CMSScavengeBeforeRemark, false, \
1968 "Attempt scavenge before the CMS remark step") \
1969 \
1970 develop(bool, CMSTraceSweeper, false, \
1971 "Trace some actions of the CMS sweeper") \
1972 \
1973 product(uintx, CMSWorkQueueDrainThreshold, 10, \
1974 "Don't drain below this size per parallel worker/thief") \
1975 \
1976 manageable(intx, CMSWaitDuration, 2000, \
1977 "Time in milliseconds that CMS thread waits for young GC") \
1978 \
1979 develop(uintx, CMSCheckInterval, 1000, \
1980 "Interval in milliseconds that CMS thread checks if it " \
1981 "should start a collection cycle") \
1982 \
1983 product(bool, CMSYield, true, \
1984 "Yield between steps of CMS") \
1985 \
1986 product(size_t, CMSBitMapYieldQuantum, 10*M, \
1987 "Bitmap operations should process at most this many bits " \
1988 "between yields") \
1989 range(1, max_uintx) \
1990 \
1991 product(bool, CMSDumpAtPromotionFailure, false, \
1992 "Dump useful information about the state of the CMS old " \
1993 "generation upon a promotion failure") \
1994 \
1995 product(bool, CMSPrintChunksInDump, false, \
1996 "In a dump enabled by CMSDumpAtPromotionFailure, include " \
1997 "more detailed information about the free chunks") \
1998 \
1999 product(bool, CMSPrintObjectsInDump, false, \
2000 "In a dump enabled by CMSDumpAtPromotionFailure, include " \
2001 "more detailed information about the allocated objects") \
2002 \
2003 diagnostic(bool, FLSVerifyAllHeapReferences, false, \
2004 "Verify that all references across the FLS boundary " \
2005 "are to valid objects") \
2006 \
2007 diagnostic(bool, FLSVerifyLists, false, \
2008 "Do lots of (expensive) FreeListSpace verification") \
2009 \
2010 diagnostic(bool, FLSVerifyIndexTable, false, \
2011 "Do lots of (expensive) FLS index table verification") \
2012 \
2013 develop(bool, FLSVerifyDictionary, false, \
2014 "Do lots of (expensive) FLS dictionary verification") \
2015 \
2016 develop(bool, VerifyBlockOffsetArray, false, \
2017 "Do (expensive) block offset array verification") \
2018 \
2019 diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false, \
2020 "Maintain _unallocated_block in BlockOffsetArray " \
2021 "(currently applicable only to CMS collector)") \
2022 \
2023 develop(bool, TraceCMSState, false, \
2024 "Trace the state of the CMS collection") \
2025 \
2026 product(intx, RefDiscoveryPolicy, 0, \
2027 "Select type of reference discovery policy: " \
2028 "reference-based(0) or referent-based(1)") \
2029 range(ReferenceProcessor::DiscoveryPolicyMin, \
2030 ReferenceProcessor::DiscoveryPolicyMax) \
2031 \
2032 product(bool, ParallelRefProcEnabled, false, \
2033 "Enable parallel reference processing whenever possible") \
2034 \
2035 product(bool, ParallelRefProcBalancingEnabled, true, \
2036 "Enable balancing of reference processing queues") \
2037 \
2038 product(uintx, CMSTriggerRatio, 80, \
2039 "Percentage of MinHeapFreeRatio in CMS generation that is " \
2040 "allocated before a CMS collection cycle commences") \
2041 range(0, 100) \
2042 \
2043 product(uintx, CMSBootstrapOccupancy, 50, \
2044 "Percentage CMS generation occupancy at which to " \
2045 "initiate CMS collection for bootstrapping collection stats") \
2046 range(0, 100) \
2047 \
2048 product(intx, CMSInitiatingOccupancyFraction, -1, \
2049 "Percentage CMS generation occupancy to start a CMS collection " \
2050 "cycle. A negative value means that CMSTriggerRatio is used") \
2051 range(min_intx, 100) \
2052 \
2053 product(uintx, InitiatingHeapOccupancyPercent, 45, \
2054 "Percentage of the (entire) heap occupancy to start a " \
2055 "concurrent GC cycle. It is used by GCs that trigger a " \
2056 "concurrent GC cycle based on the occupancy of the entire heap, " \
2057 "not just one of the generations (e.g., G1). A value of 0 " \
2058 "denotes 'do constant GC cycles'.") \
2059 range(0, 100) \
2060 \
2061 manageable(intx, CMSTriggerInterval, -1, \
2062 "Commence a CMS collection cycle (at least) every so many " \
2063 "milliseconds (0 permanently, -1 disabled)") \
2064 range(-1, max_intx) \
2065 \
2066 product(bool, UseCMSInitiatingOccupancyOnly, false, \
2067 "Only use occupancy as a criterion for starting a CMS collection")\
2068 \
2069 product(uintx, CMSIsTooFullPercentage, 98, \
2070 "An absolute ceiling above which CMS will always consider the " \
2071 "unloading of classes when class unloading is enabled") \
2072 range(0, 100) \
2073 \
2074 develop(bool, CMSTestInFreeList, false, \
2075 "Check if the coalesced range is already in the " \
2076 "free lists as claimed") \
2077 \
2078 notproduct(bool, CMSVerifyReturnedBytes, false, \
2079 "Check that all the garbage collected was returned to the " \
2080 "free lists") \
2081 \
2082 notproduct(bool, ScavengeALot, false, \
2083 "Force scavenge at every Nth exit from the runtime system " \
2084 "(N=ScavengeALotInterval)") \
2085 \
2086 develop(bool, FullGCALot, false, \
2087 "Force full gc at every Nth exit from the runtime system " \
2088 "(N=FullGCALotInterval)") \
2089 \
2090 notproduct(bool, GCALotAtAllSafepoints, false, \
2091 "Enforce ScavengeALot/GCALot at all potential safepoints") \
2092 \
2162 product(bool, TLABStats, true, \
2163 "Provide more detailed and expensive TLAB statistics " \
2164 "(with PrintTLAB)") \
2165 \
2166 product_pd(bool, NeverActAsServerClassMachine, \
2167 "Never act like a server-class machine") \
2168 \
2169 product(bool, AlwaysActAsServerClassMachine, false, \
2170 "Always act like a server-class machine") \
2171 \
2172 product_pd(uint64_t, MaxRAM, \
2173 "Real memory size (in bytes) used to set maximum heap size") \
2174 \
2175 product(size_t, ErgoHeapSizeLimit, 0, \
2176 "Maximum ergonomically set heap size (in bytes); zero means use " \
2177 "MaxRAM / MaxRAMFraction") \
2178 \
2179 product(uintx, MaxRAMFraction, 4, \
2180 "Maximum fraction (1/n) of real memory used for maximum heap " \
2181 "size") \
2182 range(1, max_uintx) \
2183 \
2184 product(uintx, DefaultMaxRAMFraction, 4, \
2185 "Maximum fraction (1/n) of real memory used for maximum heap " \
2186 "size; deprecated: to be renamed to MaxRAMFraction") \
2187 range(1, max_uintx) \
2188 \
2189 product(uintx, MinRAMFraction, 2, \
2190 "Minimum fraction (1/n) of real memory used for maximum heap " \
2191 "size on systems with small physical memory size") \
2192 range(1, max_uintx) \
2193 \
2194 product(uintx, InitialRAMFraction, 64, \
2195 "Fraction (1/n) of real memory used for initial heap size") \
2196 range(1, max_uintx) \
2197 \
2198 develop(uintx, MaxVirtMemFraction, 2, \
2199 "Maximum fraction (1/n) of virtual memory used for ergonomically "\
2200 "determining maximum heap size") \
2201 \
2202 product(bool, UseAutoGCSelectPolicy, false, \
2203 "Use automatic collection selection policy") \
2204 \
2205 product(uintx, AutoGCSelectPauseMillis, 5000, \
2206 "Automatic GC selection pause threshold in milliseconds") \
2207 \
2208 product(bool, UseAdaptiveSizePolicy, true, \
2209 "Use adaptive generation sizing policies") \
2210 \
2211 product(bool, UsePSAdaptiveSurvivorSizePolicy, true, \
2212 "Use adaptive survivor sizing policies") \
2213 \
2214 product(bool, UseAdaptiveGenerationSizePolicyAtMinorCollection, true, \
2215 "Use adaptive young-old sizing policies at minor collections") \
2216 \
2235 develop(bool, PSAdjustTenuredGenForMinorPause, false, \
2236 "Adjust tenured generation to achieve a minor pause goal") \
2237 \
2238 develop(bool, PSAdjustYoungGenForMajorPause, false, \
2239 "Adjust young generation to achieve a major pause goal") \
2240 \
2241 product(uintx, AdaptiveSizePolicyInitializingSteps, 20, \
2242 "Number of steps where heuristics is used before data is used") \
2243 \
2244 develop(uintx, AdaptiveSizePolicyReadyThreshold, 5, \
2245 "Number of collections before the adaptive sizing is started") \
2246 \
2247 product(uintx, AdaptiveSizePolicyOutputInterval, 0, \
2248 "Collection interval for printing information; zero means never") \
2249 \
2250 product(bool, UseAdaptiveSizePolicyFootprintGoal, true, \
2251 "Use adaptive minimum footprint as a goal") \
2252 \
2253 product(uintx, AdaptiveSizePolicyWeight, 10, \
2254 "Weight given to exponential resizing, between 0 and 100") \
2255 range(0, 100) \
2256 \
2257 product(uintx, AdaptiveTimeWeight, 25, \
2258 "Weight given to time in adaptive policy, between 0 and 100") \
2259 range(0, 100) \
2260 \
2261 product(uintx, PausePadding, 1, \
2262 "How much buffer to keep for pause time") \
2263 \
2264 product(uintx, PromotedPadding, 3, \
2265 "How much buffer to keep for promotion failure") \
2266 \
2267 product(uintx, SurvivorPadding, 3, \
2268 "How much buffer to keep for survivor overflow") \
2269 \
2270 product(uintx, ThresholdTolerance, 10, \
2271 "Allowed collection cost difference between generations") \
2272 range(0, 100) \
2273 \
2274 product(uintx, AdaptiveSizePolicyCollectionCostMargin, 50, \
2275 "If collection costs are within margin, reduce both by full " \
2276 "delta") \
2277 \
2278 product(uintx, YoungGenerationSizeIncrement, 20, \
2279 "Adaptive size percentage change in young generation") \
2280 range(0, 100) \
2281 \
2282 product(uintx, YoungGenerationSizeSupplement, 80, \
2283 "Supplement to YoungedGenerationSizeIncrement used at startup") \
2284 range(0, 100) \
2285 \
2286 product(uintx, YoungGenerationSizeSupplementDecay, 8, \
2287 "Decay factor to YoungedGenerationSizeSupplement") \
2288 range(1, max_uintx) \
2289 \
2290 product(uintx, TenuredGenerationSizeIncrement, 20, \
2291 "Adaptive size percentage change in tenured generation") \
2292 range(0, 100) \
2293 \
2294 product(uintx, TenuredGenerationSizeSupplement, 80, \
2295 "Supplement to TenuredGenerationSizeIncrement used at startup") \
2296 range(0, 100) \
2297 \
2298 product(uintx, TenuredGenerationSizeSupplementDecay, 2, \
2299 "Decay factor to TenuredGenerationSizeIncrement") \
2300 range(1, max_uintx) \
2301 \
2302 product(uintx, MaxGCPauseMillis, max_uintx, \
2303 "Adaptive size policy maximum GC pause time goal in millisecond, "\
2304 "or (G1 Only) the maximum GC time per MMU time slice") \
2305 \
2306 product(uintx, GCPauseIntervalMillis, 0, \
2307 "Time slice for MMU specification") \
2308 \
2309 product(uintx, MaxGCMinorPauseMillis, max_uintx, \
2310 "Adaptive size policy maximum GC minor pause time goal " \
2311 "in millisecond") \
2312 \
2313 product(uintx, GCTimeRatio, 99, \
2314 "Adaptive size policy application time to GC time ratio") \
2315 \
2316 product(uintx, AdaptiveSizeDecrementScaleFactor, 4, \
2317 "Adaptive size scale down factor for shrinking") \
2318 range(1, max_uintx) \
2319 \
2320 product(bool, UseAdaptiveSizeDecayMajorGCCost, true, \
2321 "Adaptive size decays the major cost for long major intervals") \
2322 \
2323 product(uintx, AdaptiveSizeMajorGCDecayTimeScale, 10, \
2324 "Time scale over which major costs decay") \
2325 \
2326 product(uintx, MinSurvivorRatio, 3, \
2327 "Minimum ratio of young generation/survivor space size") \
2328 \
2329 product(uintx, InitialSurvivorRatio, 8, \
2330 "Initial ratio of young generation/survivor space size") \
2331 \
2332 product(size_t, BaseFootPrintEstimate, 256*M, \
2333 "Estimate of footprint other than Java Heap") \
2334 \
2335 product(bool, UseGCOverheadLimit, true, \
2336 "Use policy to limit of proportion of time spent in GC " \
2337 "before an OutOfMemory error is thrown") \
2338 \
2339 product(uintx, GCTimeLimit, 98, \
2340 "Limit of the proportion of time spent in GC before " \
2341 "an OutOfMemoryError is thrown (used with GCHeapFreeLimit)") \
2342 range(0, 100) \
2343 \
2344 product(uintx, GCHeapFreeLimit, 2, \
2345 "Minimum percentage of free space after a full GC before an " \
2346 "OutOfMemoryError is thrown (used with GCTimeLimit)") \
2347 range(0, 100) \
2348 \
2349 develop(uintx, AdaptiveSizePolicyGCTimeLimitThreshold, 5, \
2350 "Number of consecutive collections before gc time limit fires") \
2351 \
2352 product(bool, PrintAdaptiveSizePolicy, false, \
2353 "Print information about AdaptiveSizePolicy") \
2354 \
2355 product(intx, PrefetchCopyIntervalInBytes, -1, \
2356 "How far ahead to prefetch destination area (<= 0 means off)") \
2357 \
2358 product(intx, PrefetchScanIntervalInBytes, -1, \
2359 "How far ahead to prefetch scan area (<= 0 means off)") \
2360 \
2361 product(intx, PrefetchFieldsAhead, -1, \
2362 "How many fields ahead to prefetch in oop scan (<= 0 means off)") \
2363 \
2364 diagnostic(bool, VerifySilently, false, \
2365 "Do not print the verification progress") \
2366 \
2367 diagnostic(bool, VerifyDuringStartup, false, \
2612 "compile native methods if supported by the compiler") \
2613 \
2614 develop_pd(bool, CICompileOSR, \
2615 "compile on stack replacement methods if supported by the " \
2616 "compiler") \
2617 \
2618 develop(bool, CIPrintMethodCodes, false, \
2619 "print method bytecodes of the compiled code") \
2620 \
2621 develop(bool, CIPrintTypeFlow, false, \
2622 "print the results of ciTypeFlow analysis") \
2623 \
2624 develop(bool, CITraceTypeFlow, false, \
2625 "detailed per-bytecode tracing of ciTypeFlow analysis") \
2626 \
2627 develop(intx, OSROnlyBCI, -1, \
2628 "OSR only at this bci. Negative values mean exclude that bci") \
2629 \
2630 /* compiler */ \
2631 \
2632 /* notice: the max range value here is max_jint, not max_intx */ \
2633 /* because of overflow issue */ \
2634 product(intx, CICompilerCount, CI_COMPILER_COUNT, \
2635 "Number of compiler threads to run") \
2636 range((intx)Arguments::get_min_number_of_compiler_threads(), \
2637 max_jint) \
2638 \
2639 product(intx, CompilationPolicyChoice, 0, \
2640 "which compilation policy (0-3)") \
2641 range(0, 3) \
2642 \
2643 develop(bool, UseStackBanging, true, \
2644 "use stack banging for stack overflow checks (required for " \
2645 "proper StackOverflow handling; disable only to measure cost " \
2646 "of stackbanging)") \
2647 \
2648 develop(bool, UseStrictFP, true, \
2649 "use strict fp if modifier strictfp is set") \
2650 \
2651 develop(bool, GenerateSynchronizationCode, true, \
2652 "generate locking/unlocking code for synchronized methods and " \
2653 "monitors") \
2654 \
2655 develop(bool, GenerateCompilerNullChecks, true, \
2656 "Generate explicit null checks for loads/stores/calls") \
2657 \
2658 develop(bool, GenerateRangeChecks, true, \
2659 "Generate range checks for array accesses") \
2660 \
2661 develop_pd(bool, ImplicitNullChecks, \
2738 \
2739 product(bool, PrintVMOptions, false, \
2740 "Print flags that appeared on the command line") \
2741 \
2742 product(bool, IgnoreUnrecognizedVMOptions, false, \
2743 "Ignore unrecognized VM options") \
2744 \
2745 product(bool, PrintCommandLineFlags, false, \
2746 "Print flags specified on command line or set by ergonomics") \
2747 \
2748 product(bool, PrintFlagsInitial, false, \
2749 "Print all VM flags before argument processing and exit VM") \
2750 \
2751 product(bool, PrintFlagsFinal, false, \
2752 "Print all VM flags after argument and ergonomic processing") \
2753 \
2754 notproduct(bool, PrintFlagsWithComments, false, \
2755 "Print all VM flags with default values and descriptions and " \
2756 "exit") \
2757 \
2758 product(bool, PrintFlagsRanges, false, \
2759 "Print VM flags and their ranges and exit VM") \
2760 \
2761 diagnostic(bool, SerializeVMOutput, true, \
2762 "Use a mutex to serialize output to tty and LogFile") \
2763 \
2764 diagnostic(bool, DisplayVMOutput, true, \
2765 "Display all VM output on the tty, independently of LogVMOutput") \
2766 \
2767 diagnostic(bool, LogVMOutput, false, \
2768 "Save VM output to LogFile") \
2769 \
2770 diagnostic(ccstr, LogFile, NULL, \
2771 "If LogVMOutput or LogCompilation is on, save VM output to " \
2772 "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)")\
2773 \
2774 product(ccstr, ErrorFile, NULL, \
2775 "If an error occurs, save the error data to this file " \
2776 "[default: ./hs_err_pid%p.log] (%p replaced with pid)") \
2777 \
2778 product(bool, DisplayVMOutputToStderr, false, \
2779 "If DisplayVMOutput is true, display all VM output to stderr") \
2780 \
2979 develop(bool, TraceFrequencyInlining, false, \
2980 "Trace frequency based inlining") \
2981 \
2982 develop_pd(bool, InlineIntrinsics, \
2983 "Inline intrinsics that can be statically resolved") \
2984 \
2985 product_pd(bool, ProfileInterpreter, \
2986 "Profile at the bytecode level during interpretation") \
2987 \
2988 develop(bool, TraceProfileInterpreter, false, \
2989 "Trace profiling at the bytecode level during interpretation. " \
2990 "This outputs the profiling information collected to improve " \
2991 "jit compilation.") \
2992 \
2993 develop_pd(bool, ProfileTraps, \
2994 "Profile deoptimization traps at the bytecode level") \
2995 \
2996 product(intx, ProfileMaturityPercentage, 20, \
2997 "number of method invocations/branches (expressed as % of " \
2998 "CompileThreshold) before using the method's profile") \
2999 range(0, 100) \
3000 \
3001 diagnostic(bool, PrintMethodData, false, \
3002 "Print the results of +ProfileInterpreter at end of run") \
3003 \
3004 develop(bool, VerifyDataPointer, trueInDebug, \
3005 "Verify the method data pointer during interpreter profiling") \
3006 \
3007 develop(bool, VerifyCompiledCode, false, \
3008 "Include miscellaneous runtime verifications in nmethod code; " \
3009 "default off because it disturbs nmethod size heuristics") \
3010 \
3011 notproduct(bool, CrashGCForDumpingJavaThread, false, \
3012 "Manually make GC thread crash then dump java stack trace; " \
3013 "Test only") \
3014 \
3015 /* compilation */ \
3016 product(bool, UseCompiler, true, \
3017 "Use Just-In-Time compilation") \
3018 \
3019 develop(bool, TraceCompilationPolicy, false, \
3040 "Do not compile methods > HugeMethodLimit") \
3041 \
3042 /* Bytecode escape analysis estimation. */ \
3043 product(bool, EstimateArgEscape, true, \
3044 "Analyze bytecodes to estimate escape state of arguments") \
3045 \
3046 product(intx, BCEATraceLevel, 0, \
3047 "How much tracing to do of bytecode escape analysis estimates") \
3048 \
3049 product(intx, MaxBCEAEstimateLevel, 5, \
3050 "Maximum number of nested calls that are analyzed by BC EA") \
3051 \
3052 product(intx, MaxBCEAEstimateSize, 150, \
3053 "Maximum bytecode size of a method to be analyzed by BC EA") \
3054 \
3055 product(intx, AllocatePrefetchStyle, 1, \
3056 "0 = no prefetch, " \
3057 "1 = prefetch instructions for each allocation, " \
3058 "2 = use TLAB watermark to gate allocation prefetch, " \
3059 "3 = use BIS instruction on Sparc for allocation prefetch") \
3060 range(0, 3) \
3061 \
3062 product(intx, AllocatePrefetchDistance, -1, \
3063 "Distance to prefetch ahead of allocation pointer") \
3064 \
3065 product(intx, AllocatePrefetchLines, 3, \
3066 "Number of lines to prefetch ahead of array allocation pointer") \
3067 \
3068 product(intx, AllocateInstancePrefetchLines, 1, \
3069 "Number of lines to prefetch ahead of instance allocation " \
3070 "pointer") \
3071 \
3072 product(intx, AllocatePrefetchStepSize, 16, \
3073 "Step size in bytes of sequential prefetch instructions") \
3074 \
3075 product(intx, AllocatePrefetchInstr, 0, \
3076 "Prefetch instruction to prefetch ahead of allocation pointer") \
3077 \
3078 /* deoptimization */ \
3079 develop(bool, TraceDeoptimization, false, \
3080 "Trace deoptimization") \
3087 "(0 means off)") \
3088 \
3089 product(intx, MaxJavaStackTraceDepth, 1024, \
3090 "The maximum number of lines in the stack trace for Java " \
3091 "exceptions (0 means all)") \
3092 \
3093 NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000, \
3094 "Guarantee a safepoint (at least) every so many milliseconds " \
3095 "(0 means none)")) \
3096 \
3097 EMBEDDED_ONLY(product(intx, GuaranteedSafepointInterval, 0, \
3098 "Guarantee a safepoint (at least) every so many milliseconds " \
3099 "(0 means none)")) \
3100 \
3101 product(intx, SafepointTimeoutDelay, 10000, \
3102 "Delay in milliseconds for option SafepointTimeout") \
3103 \
3104 product(intx, NmethodSweepActivity, 10, \
3105 "Removes cold nmethods from code cache if > 0. Higher values " \
3106 "result in more aggressive sweeping") \
3107 range(0, 2000) \
3108 \
3109 notproduct(bool, LogSweeper, false, \
3110 "Keep a ring buffer of sweeper activity") \
3111 \
3112 notproduct(intx, SweeperLogEntries, 1024, \
3113 "Number of records in the ring buffer of sweeper activity") \
3114 \
3115 notproduct(intx, MemProfilingInterval, 500, \
3116 "Time between each invocation of the MemProfiler") \
3117 \
3118 develop(intx, MallocCatchPtr, -1, \
3119 "Hit breakpoint when mallocing/freeing this pointer") \
3120 \
3121 notproduct(ccstrlist, SuppressErrorAt, "", \
3122 "List of assertions (file:line) to muzzle") \
3123 \
3124 notproduct(size_t, HandleAllocationLimit, 1024, \
3125 "Threshold for HandleMark allocation when +TraceHandleAllocation "\
3126 "is used") \
3127 \
3216 diagnostic(intx, MallocVerifyInterval, 0, \
3217 "If non-zero, verify C heap after every N calls to " \
3218 "malloc/realloc/free") \
3219 \
3220 diagnostic(intx, MallocVerifyStart, 0, \
3221 "If non-zero, start verifying C heap after Nth call to " \
3222 "malloc/realloc/free") \
3223 \
3224 diagnostic(uintx, MallocMaxTestWords, 0, \
3225 "If non-zero, maximum number of words that malloc/realloc can " \
3226 "allocate (for testing only)") \
3227 \
3228 product(intx, TypeProfileWidth, 2, \
3229 "Number of receiver types to record in call/cast profile") \
3230 \
3231 develop(intx, BciProfileWidth, 2, \
3232 "Number of return bci's to record in ret profile") \
3233 \
3234 product(intx, PerMethodRecompilationCutoff, 400, \
3235 "After recompiling N times, stay in the interpreter (-1=>'Inf')") \
3236 range(-1, max_intx) \
3237 \
3238 product(intx, PerBytecodeRecompilationCutoff, 200, \
3239 "Per-BCI limit on repeated recompilation (-1=>'Inf')") \
3240 range(-1, max_intx) \
3241 \
3242 product(intx, PerMethodTrapLimit, 100, \
3243 "Limit on traps (of one kind) in a method (includes inlines)") \
3244 \
3245 experimental(intx, PerMethodSpecTrapLimit, 5000, \
3246 "Limit on speculative traps (of one kind) in a method " \
3247 "(includes inlines)") \
3248 \
3249 product(intx, PerBytecodeTrapLimit, 4, \
3250 "Limit on traps (of one kind) at a particular BCI") \
3251 \
3252 experimental(intx, SpecTrapLimitExtraEntries, 3, \
3253 "Extra method data trap entries for speculation") \
3254 \
3255 develop(intx, InlineFrequencyRatio, 20, \
3256 "Ratio of call site execution to caller method invocation") \
3257 \
3258 develop_pd(intx, InlineFrequencyCount, \
3259 "Count of call site execution necessary to trigger frequent " \
3260 "inlining") \
3261 \
3262 develop(intx, InlineThrowCount, 50, \
3263 "Force inlining of interpreted methods that throw this often") \
3264 \
3265 develop(intx, InlineThrowMaxSize, 200, \
3266 "Force inlining of throwing methods smaller than this") \
3267 \
3280 \
3281 product(size_t, OldSize, ScaleForWordSize(4*M), \
3282 "Initial tenured generation size (in bytes)") \
3283 \
3284 product(size_t, NewSize, ScaleForWordSize(1*M), \
3285 "Initial new generation size (in bytes)") \
3286 \
3287 product(size_t, MaxNewSize, max_uintx, \
3288 "Maximum new generation size (in bytes), max_uintx means set " \
3289 "ergonomically") \
3290 \
3291 product(size_t, PretenureSizeThreshold, 0, \
3292 "Maximum size in bytes of objects allocated in DefNew " \
3293 "generation; zero means no maximum") \
3294 \
3295 product(size_t, TLABSize, 0, \
3296 "Starting TLAB size (in bytes); zero means set ergonomically") \
3297 \
3298 product(size_t, MinTLABSize, 2*K, \
3299 "Minimum allowed TLAB size (in bytes)") \
3300 range(1, max_uintx) \
3301 \
3302 product(uintx, TLABAllocationWeight, 35, \
3303 "Allocation averaging weight") \
3304 range(0, 100) \
3305 \
3306 /* Limit the lower bound of this flag to 1 as it is used */ \
3307 /* in a division expression. */ \
3308 product(uintx, TLABWasteTargetPercent, 1, \
3309 "Percentage of Eden that can be wasted") \
3310 range(1, 100) \
3311 \
3312 product(uintx, TLABRefillWasteFraction, 64, \
3313 "Maximum TLAB waste at a refill (internal fragmentation)") \
3314 range(1, max_uintx) \
3315 \
3316 product(uintx, TLABWasteIncrement, 4, \
3317 "Increment allowed waste at slow allocation") \
3318 \
3319 product(uintx, SurvivorRatio, 8, \
3320 "Ratio of eden/survivor space size") \
3321 \
3322 product(uintx, NewRatio, 2, \
3323 "Ratio of old/new generation sizes") \
3324 \
3325 product_pd(size_t, NewSizeThreadIncrease, \
3326 "Additional size added to desired new generation size per " \
3327 "non-daemon thread (in bytes)") \
3328 \
3329 product_pd(size_t, MetaspaceSize, \
3330 "Initial size of Metaspaces (in bytes)") \
3331 \
3332 product(size_t, MaxMetaspaceSize, max_uintx, \
3333 "Maximum size of Metaspaces (in bytes)") \
3334 \
3335 product(size_t, CompressedClassSpaceSize, 1*G, \
3336 "Maximum size of class area in Metaspace when compressed " \
3337 "class pointers are used") \
3338 range(1*M, 3*G) \
3339 \
3340 manageable(uintx, MinHeapFreeRatio, 40, \
3341 "The minimum percentage of heap free after GC to avoid expansion."\
3342 " For most GCs this applies to the old generation. In G1 and" \
3343 " ParallelGC it applies to the whole heap.") \
3344 range(0, 100) \
3345 constraint(MinHeapFreeRatioConstraintFunc) \
3346 \
3347 manageable(uintx, MaxHeapFreeRatio, 70, \
3348 "The maximum percentage of heap free after GC to avoid shrinking."\
3349 " For most GCs this applies to the old generation. In G1 and" \
3350 " ParallelGC it applies to the whole heap.") \
3351 range(0, 100) \
3352 constraint(MaxHeapFreeRatioConstraintFunc) \
3353 \
3354 product(intx, SoftRefLRUPolicyMSPerMB, 1000, \
3355 "Number of milliseconds per MB of free space in the heap") \
3356 \
3357 product(size_t, MinHeapDeltaBytes, ScaleForWordSize(128*K), \
3358 "The minimum change in heap space due to GC (in bytes)") \
3359 \
3360 product(size_t, MinMetaspaceExpansion, ScaleForWordSize(256*K), \
3361 "The minimum expansion of Metaspace (in bytes)") \
3362 \
3363 product(uintx, MaxMetaspaceFreeRatio, 70, \
3364 "The maximum percentage of Metaspace free after GC to avoid " \
3365 "shrinking") \
3366 range(0, 100) \
3367 constraint(MaxMetaspaceFreeRatioConstraintFunc) \
3368 \
3369 product(uintx, MinMetaspaceFreeRatio, 40, \
3370 "The minimum percentage of Metaspace free after GC to avoid " \
3371 "expansion") \
3372 range(0, 99) \
3373 constraint(MinMetaspaceFreeRatioConstraintFunc) \
3374 \
3375 product(size_t, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \
3376 "The maximum expansion of Metaspace without full GC (in bytes)") \
3377 \
3378 product(uintx, QueuedAllocationWarningCount, 0, \
3379 "Number of times an allocation that queues behind a GC " \
3380 "will retry before printing a warning") \
3381 \
3382 diagnostic(uintx, VerifyGCStartAt, 0, \
3383 "GC invoke count where +VerifyBefore/AfterGC kicks in") \
3384 \
3385 diagnostic(intx, VerifyGCLevel, 0, \
3386 "Generation level at which to start +VerifyBefore/AfterGC") \
3387 \
3388 product(uintx, MaxTenuringThreshold, 15, \
3389 "Maximum value for tenuring threshold") \
3390 range(0, markOopDesc::max_age + 1) \
3391 constraint(MaxTenuringThresholdConstraintFunc) \
3392 \
3393 product(uintx, InitialTenuringThreshold, 7, \
3394 "Initial value for tenuring threshold") \
3395 range(0, markOopDesc::max_age + 1) \
3396 constraint(InitialTenuringThresholdConstraintFunc) \
3397 \
3398 product(uintx, TargetSurvivorRatio, 50, \
3399 "Desired percentage of survivor space used after scavenge") \
3400 range(0, 100) \
3401 \
3402 product(uintx, MarkSweepDeadRatio, 5, \
3403 "Percentage (0-100) of the old gen allowed as dead wood. " \
3404 "Serial mark sweep treats this as both the minimum and maximum " \
3405 "value. " \
3406 "CMS uses this value only if it falls back to mark sweep. " \
3407 "Par compact uses a variable scale based on the density of the " \
3408 "generation and treats this as the maximum value when the heap " \
3409 "is either completely full or completely empty. Par compact " \
3410 "also has a smaller default value; see arguments.cpp.") \
3411 range(0, 100) \
3412 \
3413 product(uintx, MarkSweepAlwaysCompactCount, 4, \
3414 "How often should we fully compact the heap (ignoring the dead " \
3415 "space parameters)") \
3416 range(1, max_uintx) \
3417 \
3418 product(intx, PrintCMSStatistics, 0, \
3419 "Statistics for CMS") \
3420 \
3421 product(bool, PrintCMSInitiationStatistics, false, \
3422 "Statistics for initiating a CMS collection") \
3423 \
3424 product(intx, PrintFLSStatistics, 0, \
3425 "Statistics for CMS' FreeListSpace") \
3426 \
3427 product(intx, PrintFLSCensus, 0, \
3428 "Census for CMS' FreeListSpace") \
3429 \
3430 develop(uintx, GCExpandToAllocateDelayMillis, 0, \
3431 "Delay between expansion and allocation (in milliseconds)") \
3432 \
3433 develop(uintx, GCWorkerDelayMillis, 0, \
3434 "Delay in scheduling GC workers (in milliseconds)") \
3435 \
3436 product(intx, DeferThrSuspendLoopCount, 4000, \
3437 "(Unstable) Number of times to iterate in safepoint loop " \
3438 "before blocking VM threads ") \
3439 \
3440 product(intx, DeferPollingPageLoopCount, -1, \
3441 "(Unsafe,Unstable) Number of iterations in safepoint loop " \
3442 "before changing safepoint polling page to RO ") \
3443 \
3444 product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \
3445 \
3446 product(bool, PSChunkLargeArrays, true, \
3447 "Process large arrays in chunks") \
3448 \
3449 product(uintx, GCDrainStackTargetSize, 64, \
3450 "Number of entries we will try to leave on the stack " \
3451 "during parallel gc") \
3452 \
3453 /* stack parameters */ \
3454 product_pd(intx, StackYellowPages, \
3455 "Number of yellow zone (recoverable overflows) pages") \
3456 range(1, max_intx) \
3457 \
3458 product_pd(intx, StackRedPages, \
3459 "Number of red zone (unrecoverable overflows) pages") \
3460 range(1, max_intx) \
3461 \
3462 /* greater stack shadow pages can't generate instruction to bang stack */ \
3463 product_pd(intx, StackShadowPages, \
3464 "Number of shadow zone (for overflow checking) pages " \
3465 "this should exceed the depth of the VM and native call stack") \
3466 range(1, 50) \
3467 \
3468 product_pd(intx, ThreadStackSize, \
3469 "Thread Stack Size (in Kbytes)") \
3470 \
3471 product_pd(intx, VMThreadStackSize, \
3472 "Non-Java Thread Stack Size (in Kbytes)") \
3473 \
3474 product_pd(intx, CompilerThreadStackSize, \
3475 "Compiler Thread Stack Size (in Kbytes)") \
3476 \
3477 develop_pd(size_t, JVMInvokeMethodSlack, \
3478 "Stack space (bytes) required for JVM_InvokeMethod to complete") \
3479 \
3480 product(size_t, ThreadSafetyMargin, 50*M, \
3481 "Thread safety margin is used on fixed-stack LinuxThreads (on " \
3482 "Linux/x86 only) to prevent heap-stack collision. Set to 0 to " \
3483 "disable this feature") \
3484 \
3485 /* code cache parameters */ \
3486 /* ppc64/tiered compilation has large code-entry alignment. */ \
3487 develop(uintx, CodeCacheSegmentSize, 64 PPC64_ONLY(+64) NOT_PPC64(TIERED_ONLY(+64)),\
3488 "Code cache segment size (in bytes) - smallest unit of " \
3489 "allocation") \
3490 range(1, 1024) \
3491 \
3492 develop_pd(intx, CodeEntryAlignment, \
3493 "Code entry alignment for generated code (in bytes)") \
3494 \
3495 product_pd(intx, OptoLoopAlignment, \
3496 "Align inner loops to zero relative to this modulus") \
3497 \
3498 product_pd(uintx, InitialCodeCacheSize, \
3499 "Initial code cache size (in bytes)") \
3500 \
3501 develop_pd(uintx, CodeCacheMinimumUseSpace, \
3502 "Minimum code cache size (in bytes) required to start VM.") \
3503 \
3504 product(bool, SegmentedCodeCache, false, \
3505 "Use a segmented code cache") \
3506 \
3507 product_pd(uintx, ReservedCodeCacheSize, \
3508 "Reserved code cache size (in bytes) - maximum code cache size") \
3509 \
3510 product_pd(uintx, NonProfiledCodeHeapSize, \
3511 "Size of code heap with non-profiled methods (in bytes)") \
3512 \
3513 product_pd(uintx, ProfiledCodeHeapSize, \
3514 "Size of code heap with profiled methods (in bytes)") \
3515 \
3516 product_pd(uintx, NonNMethodCodeHeapSize, \
3517 "Size of code heap with non-nmethods (in bytes)") \
3518 \
3519 product_pd(uintx, CodeCacheExpansionSize, \
3520 "Code cache expansion size (in bytes)") \
3521 \
3522 develop_pd(uintx, CodeCacheMinBlockLength, \
3523 "Minimum number of segments in a code cache block") \
3524 range(1, 100) \
3525 \
3526 notproduct(bool, ExitOnFullCodeCache, false, \
3527 "Exit the VM if we fill the code cache") \
3528 \
3529 product(bool, UseCodeCacheFlushing, true, \
3530 "Remove cold/old nmethods from the code cache") \
3531 \
3532 product(uintx, StartAggressiveSweepingAt, 10, \
3533 "Start aggressive sweeping if X[%] of the code cache is free." \
3534 "Segmented code cache: X[%] of the non-profiled heap." \
3535 "Non-segmented code cache: X[%] of the total code cache") \
3536 range(0, 100) \
3537 \
3538 /* interpreter debugging */ \
3539 develop(intx, BinarySwitchThreshold, 5, \
3540 "Minimal number of lookupswitch entries for rewriting to binary " \
3541 "switch") \
3542 \
3543 develop(intx, StopInterpreterAt, 0, \
3544 "Stop interpreter execution at specified bytecode number") \
3545 \
3546 develop(intx, TraceBytecodesAt, 0, \
3547 "Trace bytecodes starting with specified bytecode number") \
3548 \
3549 /* compiler interface */ \
3550 develop(intx, CIStart, 0, \
3551 "The id of the first compilation to permit") \
3552 \
3553 develop(intx, CIStop, max_jint, \
3554 "The id of the last compilation to permit") \
3555 \
3556 develop(intx, CIStartOSR, 0, \
3577 "Prepend to .hotspot_compiler; e.g. log,java/lang/String.<init>") \
3578 \
3579 develop(bool, ReplayCompiles, false, \
3580 "Enable replay of compilations from ReplayDataFile") \
3581 \
3582 product(ccstr, ReplayDataFile, NULL, \
3583 "File containing compilation replay information" \
3584 "[default: ./replay_pid%p.log] (%p replaced with pid)") \
3585 \
3586 product(ccstr, InlineDataFile, NULL, \
3587 "File containing inlining replay information" \
3588 "[default: ./inline_pid%p.log] (%p replaced with pid)") \
3589 \
3590 develop(intx, ReplaySuppressInitializers, 2, \
3591 "Control handling of class initialization during replay: " \
3592 "0 - don't do anything special; " \
3593 "1 - treat all class initializers as empty; " \
3594 "2 - treat class initializers for application classes as empty; " \
3595 "3 - allow all class initializers to run during bootstrap but " \
3596 " pretend they are empty after starting replay") \
3597 range(0, 3) \
3598 \
3599 develop(bool, ReplayIgnoreInitErrors, false, \
3600 "Ignore exceptions thrown during initialization for replay") \
3601 \
3602 product(bool, DumpReplayDataOnError, true, \
3603 "Record replay data for crashing compiler threads") \
3604 \
3605 product(bool, CICompilerCountPerCPU, false, \
3606 "1 compiler thread for log(N CPUs)") \
3607 \
3608 develop(intx, CIFireOOMAt, -1, \
3609 "Fire OutOfMemoryErrors throughout CI for testing the compiler " \
3610 "(non-negative value throws OOM after this many CI accesses " \
3611 "in each compile)") \
3612 notproduct(intx, CICrashAt, -1, \
3613 "id of compilation to trigger assert in compiler thread for " \
3614 "the purpose of testing, e.g. generation of replay data") \
3615 notproduct(bool, CIObjectFactoryVerify, false, \
3616 "enable potentially expensive verification in ciObjectFactory") \
3617 \
3622 "0 : Normal. "\
3623 " VM chooses priorities that are appropriate for normal "\
3624 " applications. On Solaris NORM_PRIORITY and above are mapped "\
3625 " to normal native priority. Java priorities below " \
3626 " NORM_PRIORITY map to lower native priority values. On "\
3627 " Windows applications are allowed to use higher native "\
3628 " priorities. However, with ThreadPriorityPolicy=0, VM will "\
3629 " not use the highest possible native priority, "\
3630 " THREAD_PRIORITY_TIME_CRITICAL, as it may interfere with "\
3631 " system threads. On Linux thread priorities are ignored "\
3632 " because the OS does not support static priority in "\
3633 " SCHED_OTHER scheduling class which is the only choice for "\
3634 " non-root, non-realtime applications. "\
3635 "1 : Aggressive. "\
3636 " Java thread priorities map over to the entire range of "\
3637 " native thread priorities. Higher Java thread priorities map "\
3638 " to higher native thread priorities. This policy should be "\
3639 " used with care, as sometimes it can cause performance "\
3640 " degradation in the application and/or the entire system. On "\
3641 " Linux this policy requires root privilege.") \
3642 range(0, 1) \
3643 \
3644 product(bool, ThreadPriorityVerbose, false, \
3645 "Print priority changes") \
3646 \
3647 product(intx, CompilerThreadPriority, -1, \
3648 "The native priority at which compiler threads should run " \
3649 "(-1 means no change)") \
3650 \
3651 product(intx, VMThreadPriority, -1, \
3652 "The native priority at which the VM thread should run " \
3653 "(-1 means no change)") \
3654 \
3655 product(bool, CompilerThreadHintNoPreempt, true, \
3656 "(Solaris only) Give compiler threads an extra quanta") \
3657 \
3658 product(bool, VMThreadHintNoPreempt, false, \
3659 "(Solaris only) Give VM thread an extra quanta") \
3660 \
3661 product(intx, JavaPriority1_To_OSPriority, -1, \
3662 "Map Java priorities to OS priorities") \
3806 "reaches this amount per compiler thread") \
3807 \
3808 product(intx, Tier4LoadFeedback, 3, \
3809 "Tier 4 thresholds will increase twofold when C2 queue size " \
3810 "reaches this amount per compiler thread") \
3811 \
3812 product(intx, TieredCompileTaskTimeout, 50, \
3813 "Kill compile task if method was not used within " \
3814 "given timeout in milliseconds") \
3815 \
3816 product(intx, TieredStopAtLevel, 4, \
3817 "Stop at given compilation level") \
3818 \
3819 product(intx, Tier0ProfilingStartPercentage, 200, \
3820 "Start profiling in interpreter if the counters exceed tier 3 " \
3821 "thresholds by the specified percentage") \
3822 \
3823 product(uintx, IncreaseFirstTierCompileThresholdAt, 50, \
3824 "Increase the compile threshold for C1 compilation if the code " \
3825 "cache is filled by the specified percentage") \
3826 range(0, 99) \
3827 \
3828 product(intx, TieredRateUpdateMinTime, 1, \
3829 "Minimum rate sampling interval (in milliseconds)") \
3830 \
3831 product(intx, TieredRateUpdateMaxTime, 25, \
3832 "Maximum rate sampling interval (in milliseconds)") \
3833 \
3834 product_pd(bool, TieredCompilation, \
3835 "Enable tiered compilation") \
3836 \
3837 product(bool, PrintTieredEvents, false, \
3838 "Print tiered events notifications") \
3839 \
3840 product_pd(intx, OnStackReplacePercentage, \
3841 "NON_TIERED number of method invocations/branches (expressed as " \
3842 "% of CompileThreshold) before (re-)compiling OSR code") \
3843 \
3844 product(intx, InterpreterProfilePercentage, 33, \
3845 "NON_TIERED number of method invocations/branches (expressed as " \
3846 "% of CompileThreshold) before profiling in the interpreter") \
3847 range(0, 100) \
3848 \
3849 develop(intx, MaxRecompilationSearchLength, 10, \
3850 "The maximum number of frames to inspect when searching for " \
3851 "recompilee") \
3852 \
3853 develop(intx, MaxInterpretedSearchLength, 3, \
3854 "The maximum number of interpreted frames to skip when searching "\
3855 "for recompilee") \
3856 \
3857 develop(intx, DesiredMethodLimit, 8000, \
3858 "The desired maximum method size (in bytecodes) after inlining") \
3859 \
3860 develop(intx, HugeMethodLimit, 8000, \
3861 "Don't compile methods larger than this if " \
3862 "+DontCompileHugeMethods") \
3863 \
3864 /* New JDK 1.4 reflection implementation */ \
3865 \
3866 develop(intx, FastSuperclassLimit, 8, \
3867 "Depth of hardwired instanceof accelerator array") \
3907 \
3908 product(bool, PerfDisableSharedMem, false, \
3909 "Store performance data in standard memory") \
3910 \
3911 product(intx, PerfDataMemorySize, 32*K, \
3912 "Size of performance data memory region. Will be rounded " \
3913 "up to a multiple of the native os page size.") \
3914 \
3915 product(intx, PerfMaxStringConstLength, 1024, \
3916 "Maximum PerfStringConstant string length before truncation") \
3917 \
3918 product(bool, PerfAllowAtExitRegistration, false, \
3919 "Allow registration of atexit() methods") \
3920 \
3921 product(bool, PerfBypassFileSystemCheck, false, \
3922 "Bypass Win32 file system criteria checks (Windows Only)") \
3923 \
3924 product(intx, UnguardOnExecutionViolation, 0, \
3925 "Unguard page and retry on no-execute fault (Win32 only) " \
3926 "0=off, 1=conservative, 2=aggressive") \
3927 range(0, 2) \
3928 \
3929 /* Serviceability Support */ \
3930 \
3931 product(bool, ManagementServer, false, \
3932 "Create JMX Management Server") \
3933 \
3934 product(bool, DisableAttachMechanism, false, \
3935 "Disable mechanism that allows tools to attach to this VM") \
3936 \
3937 product(bool, StartAttachListener, false, \
3938 "Always start Attach Listener at VM startup") \
3939 \
3940 manageable(bool, PrintConcurrentLocks, false, \
3941 "Print java.util.concurrent locks in thread dump") \
3942 \
3943 product(bool, TransmitErrorReport, false, \
3944 "Enable error report transmission on erroneous termination") \
3945 \
3946 product(ccstr, ErrorReportServer, NULL, \
3947 "Override built-in error report server address") \
4028 diagnostic(bool, PauseAtExit, false, \
4029 "Pause and wait for keypress on exit if a debugger is attached") \
4030 \
4031 product(bool, ExtendedDTraceProbes, false, \
4032 "Enable performance-impacting dtrace probes") \
4033 \
4034 product(bool, DTraceMethodProbes, false, \
4035 "Enable dtrace probes for method-entry and method-exit") \
4036 \
4037 product(bool, DTraceAllocProbes, false, \
4038 "Enable dtrace probes for object allocation") \
4039 \
4040 product(bool, DTraceMonitorProbes, false, \
4041 "Enable dtrace probes for monitor events") \
4042 \
4043 product(bool, RelaxAccessControlCheck, false, \
4044 "Relax the access control checks in the verifier") \
4045 \
4046 product(uintx, StringTableSize, defaultStringTableSize, \
4047 "Number of buckets in the interned String table") \
4048 range(minimumStringTableSize, 111*defaultStringTableSize) \
4049 \
4050 experimental(uintx, SymbolTableSize, defaultSymbolTableSize, \
4051 "Number of buckets in the JVM internal Symbol table") \
4052 range(minimumSymbolTableSize, 111*defaultSymbolTableSize) \
4053 \
4054 product(bool, UseStringDeduplication, false, \
4055 "Use string deduplication") \
4056 \
4057 product(bool, PrintStringDeduplicationStatistics, false, \
4058 "Print string deduplication statistics") \
4059 \
4060 product(uintx, StringDeduplicationAgeThreshold, 3, \
4061 "A string must reach this age (or be promoted to an old region) " \
4062 "to be considered for deduplication") \
4063 range(1, markOopDesc::max_age) \
4064 \
4065 diagnostic(bool, StringDeduplicationResizeALot, false, \
4066 "Force table resize every time the table is scanned") \
4067 \
4068 diagnostic(bool, StringDeduplicationRehashALot, false, \
4069 "Force table rehash every time the table is scanned") \
4070 \
4071 develop(bool, TraceDefaultMethods, false, \
4072 "Trace the default method processing steps") \
4073 \
4074 develop(bool, VerifyGenericSignatures, false, \
4075 "Abort VM on erroneous or inconsistent generic signatures") \
4076 \
4077 diagnostic(bool, WhiteBoxAPI, false, \
4078 "Enable internal testing APIs") \
4079 \
4080 product(bool, PrintGCCause, true, \
4081 "Include GC cause in GC logging") \
4082 \
4083 experimental(intx, SurvivorAlignmentInBytes, 0, \
4084 "Default survivor space alignment in bytes") \
4085 constraint(SurvivorAlignmentInBytesConstraintFunc) \
4086 \
4087 product(bool , AllowNonVirtualCalls, false, \
4088 "Obey the ACC_SUPER flag and allow invokenonvirtual calls") \
4089 \
4090 product(ccstr, DumpLoadedClassList, NULL, \
4091 "Dump the names all loaded classes, that could be stored into " \
4092 "the CDS archive, in the specified file") \
4093 \
4094 product(ccstr, SharedClassListFile, NULL, \
4095 "Override the default CDS class list") \
4096 \
4097 diagnostic(ccstr, SharedArchiveFile, NULL, \
4098 "Override the default location of the CDS archive file") \
4099 \
4100 product(ccstr, ExtraSharedClassListFile, NULL, \
4101 "Extra classlist for building the CDS archive file") \
4102 \
4103 experimental(size_t, ArrayAllocatorMallocLimit, \
4104 SOLARIS_ONLY(64*K) NOT_SOLARIS((size_t)-1), \
4105 "Allocation less than this value will be allocated " \
4123
4124 /*
4125 * Macros for factoring of globals
4126 */
4127
4128 // Interface macros
4129 #define DECLARE_PRODUCT_FLAG(type, name, value, doc) extern "C" type name;
4130 #define DECLARE_PD_PRODUCT_FLAG(type, name, doc) extern "C" type name;
4131 #define DECLARE_DIAGNOSTIC_FLAG(type, name, value, doc) extern "C" type name;
4132 #define DECLARE_EXPERIMENTAL_FLAG(type, name, value, doc) extern "C" type name;
4133 #define DECLARE_MANAGEABLE_FLAG(type, name, value, doc) extern "C" type name;
4134 #define DECLARE_PRODUCT_RW_FLAG(type, name, value, doc) extern "C" type name;
4135 #ifdef PRODUCT
4136 #define DECLARE_DEVELOPER_FLAG(type, name, value, doc) extern "C" type CONST_##name; const type name = value;
4137 #define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) extern "C" type CONST_##name; const type name = pd_##name;
4138 #define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) extern "C" type CONST_##name;
4139 #else
4140 #define DECLARE_DEVELOPER_FLAG(type, name, value, doc) extern "C" type name;
4141 #define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) extern "C" type name;
4142 #define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) extern "C" type name;
4143 #endif // PRODUCT
4144 // Special LP64 flags, product only needed for now.
4145 #ifdef _LP64
4146 #define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) extern "C" type name;
4147 #else
4148 #define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) const type name = value;
4149 #endif // _LP64
4150
4151 // Implementation macros
4152 #define MATERIALIZE_PRODUCT_FLAG(type, name, value, doc) type name = value;
4153 #define MATERIALIZE_PD_PRODUCT_FLAG(type, name, doc) type name = pd_##name;
4154 #define MATERIALIZE_DIAGNOSTIC_FLAG(type, name, value, doc) type name = value;
4155 #define MATERIALIZE_EXPERIMENTAL_FLAG(type, name, value, doc) type name = value;
4156 #define MATERIALIZE_MANAGEABLE_FLAG(type, name, value, doc) type name = value;
4157 #define MATERIALIZE_PRODUCT_RW_FLAG(type, name, value, doc) type name = value;
4158 #ifdef PRODUCT
4159 #define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type CONST_##name = value;
4160 #define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type CONST_##name = pd_##name;
4161 #define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type CONST_##name = value;
4162 #else
4163 #define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type name = value;
4164 #define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type name = pd_##name;
4165 #define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type name = value;
4166 #endif // PRODUCT
4167 #ifdef _LP64
4168 #define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) type name = value;
4169 #else
4170 #define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) /* flag is constant */
4171 #endif // _LP64
4172
4173 // Only materialize src code for range checking when required, ignore otherwise
4174 #define IGNORE_RANGE(a, b)
4175 // Only materialize src code for contraint checking when required, ignore otherwise
4176 #define IGNORE_CONSTRAINT(func)
4177
4178 RUNTIME_FLAGS(DECLARE_DEVELOPER_FLAG, \
4179 DECLARE_PD_DEVELOPER_FLAG, \
4180 DECLARE_PRODUCT_FLAG, \
4181 DECLARE_PD_PRODUCT_FLAG, \
4182 DECLARE_DIAGNOSTIC_FLAG, \
4183 DECLARE_EXPERIMENTAL_FLAG, \
4184 DECLARE_NOTPRODUCT_FLAG, \
4185 DECLARE_MANAGEABLE_FLAG, \
4186 DECLARE_PRODUCT_RW_FLAG, \
4187 DECLARE_LP64_PRODUCT_FLAG, \
4188 IGNORE_RANGE, \
4189 IGNORE_CONSTRAINT)
4190
4191 RUNTIME_OS_FLAGS(DECLARE_DEVELOPER_FLAG, \
4192 DECLARE_PD_DEVELOPER_FLAG, \
4193 DECLARE_PRODUCT_FLAG, \
4194 DECLARE_PD_PRODUCT_FLAG, \
4195 DECLARE_DIAGNOSTIC_FLAG, \
4196 DECLARE_NOTPRODUCT_FLAG, \
4197 IGNORE_RANGE, \
4198 IGNORE_CONSTRAINT)
4199
4200 ARCH_FLAGS(DECLARE_DEVELOPER_FLAG, \
4201 DECLARE_PRODUCT_FLAG, \
4202 DECLARE_DIAGNOSTIC_FLAG, \
4203 DECLARE_EXPERIMENTAL_FLAG, \
4204 DECLARE_NOTPRODUCT_FLAG, \
4205 IGNORE_RANGE, \
4206 IGNORE_CONSTRAINT)
4207
4208 // Extensions
4209
4210 #include "runtime/globals_ext.hpp"
4211
4212 #endif // SHARE_VM_RUNTIME_GLOBALS_HPP
|