40
41 // CPU feature flags.
42 static uint64_t _features;
43 static const char* _features_string;
44
45 // These are set by machine-dependent initializations
46 static bool _supports_cx8;
47 static bool _supports_atomic_getset4;
48 static bool _supports_atomic_getset8;
49 static bool _supports_atomic_getadd4;
50 static bool _supports_atomic_getadd8;
51 static unsigned int _logical_processors_per_package;
52 static unsigned int _L1_data_cache_line_size;
53 static int _vm_major_version;
54 static int _vm_minor_version;
55 static int _vm_security_version;
56 static int _vm_patch_version;
57 static int _vm_build_number;
58 static unsigned int _parallel_worker_threads;
59 static bool _parallel_worker_threads_initialized;
60 static int _reserve_for_allocation_prefetch;
61
62 static unsigned int nof_parallel_worker_threads(unsigned int num,
63 unsigned int dem,
64 unsigned int switch_pt);
65 public:
66 // Called as part of the runtime services initialization which is
67 // called from the management module initialization (via init_globals())
68 // after argument parsing and attaching of the main thread has
69 // occurred. Examines a variety of the hardware capabilities of
70 // the platform to determine which features can be used to execute the
71 // program.
72 static void initialize();
73
74 // This allows for early initialization of VM_Version information
75 // that may be needed later in the initialization sequence but before
76 // full VM_Version initialization is possible. It can not depend on any
77 // other part of the VM being initialized when called. Platforms that
78 // need to specialize this define VM_Version::early_initialize().
79 static void early_initialize() { }
80
120 static bool supports_cx8() {
121 #ifdef SUPPORTS_NATIVE_CX8
122 return true;
123 #else
124 return _supports_cx8;
125 #endif
126 }
127 // does HW support atomic get-and-set or atomic get-and-add? Used
128 // to guide intrinsification decisions for Unsafe atomic ops
129 static bool supports_atomic_getset4() {return _supports_atomic_getset4;}
130 static bool supports_atomic_getset8() {return _supports_atomic_getset8;}
131 static bool supports_atomic_getadd4() {return _supports_atomic_getadd4;}
132 static bool supports_atomic_getadd8() {return _supports_atomic_getadd8;}
133
134 static unsigned int logical_processors_per_package() {
135 return _logical_processors_per_package;
136 }
137
138 static unsigned int L1_data_cache_line_size() {
139 return _L1_data_cache_line_size;
140 }
141
142 // Need a space at the end of TLAB for prefetch instructions
143 // which may fault when accessing memory outside of heap.
144 static int reserve_for_allocation_prefetch() {
145 return _reserve_for_allocation_prefetch;
146 }
147
148 // ARCH specific policy for the BiasedLocking
149 static bool use_biased_locking() { return true; }
150
151 // Number of page sizes efficiently supported by the hardware. Most chips now
152 // support two sizes, thus this default implementation. Processor-specific
153 // subclasses should define new versions to hide this one as needed. Note
154 // that the O/S may support more sizes, but at most this many are used.
155 static uint page_size_count() { return 2; }
156
157 // Returns the number of parallel threads to be used for VM
158 // work. If that number has not been calculated, do so and
159 // save it. Returns ParallelGCThreads if it is set on the
160 // command line.
161 static unsigned int parallel_worker_threads();
162 // Calculates and returns the number of parallel threads. May
163 // be VM version specific.
164 static unsigned int calc_parallel_worker_threads();
165
|
40
41 // CPU feature flags.
42 static uint64_t _features;
43 static const char* _features_string;
44
45 // These are set by machine-dependent initializations
46 static bool _supports_cx8;
47 static bool _supports_atomic_getset4;
48 static bool _supports_atomic_getset8;
49 static bool _supports_atomic_getadd4;
50 static bool _supports_atomic_getadd8;
51 static unsigned int _logical_processors_per_package;
52 static unsigned int _L1_data_cache_line_size;
53 static int _vm_major_version;
54 static int _vm_minor_version;
55 static int _vm_security_version;
56 static int _vm_patch_version;
57 static int _vm_build_number;
58 static unsigned int _parallel_worker_threads;
59 static bool _parallel_worker_threads_initialized;
60
61 static unsigned int nof_parallel_worker_threads(unsigned int num,
62 unsigned int dem,
63 unsigned int switch_pt);
64 public:
65 // Called as part of the runtime services initialization which is
66 // called from the management module initialization (via init_globals())
67 // after argument parsing and attaching of the main thread has
68 // occurred. Examines a variety of the hardware capabilities of
69 // the platform to determine which features can be used to execute the
70 // program.
71 static void initialize();
72
73 // This allows for early initialization of VM_Version information
74 // that may be needed later in the initialization sequence but before
75 // full VM_Version initialization is possible. It can not depend on any
76 // other part of the VM being initialized when called. Platforms that
77 // need to specialize this define VM_Version::early_initialize().
78 static void early_initialize() { }
79
119 static bool supports_cx8() {
120 #ifdef SUPPORTS_NATIVE_CX8
121 return true;
122 #else
123 return _supports_cx8;
124 #endif
125 }
126 // does HW support atomic get-and-set or atomic get-and-add? Used
127 // to guide intrinsification decisions for Unsafe atomic ops
128 static bool supports_atomic_getset4() {return _supports_atomic_getset4;}
129 static bool supports_atomic_getset8() {return _supports_atomic_getset8;}
130 static bool supports_atomic_getadd4() {return _supports_atomic_getadd4;}
131 static bool supports_atomic_getadd8() {return _supports_atomic_getadd8;}
132
133 static unsigned int logical_processors_per_package() {
134 return _logical_processors_per_package;
135 }
136
137 static unsigned int L1_data_cache_line_size() {
138 return _L1_data_cache_line_size;
139 }
140
141 // ARCH specific policy for the BiasedLocking
142 static bool use_biased_locking() { return true; }
143
144 // Number of page sizes efficiently supported by the hardware. Most chips now
145 // support two sizes, thus this default implementation. Processor-specific
146 // subclasses should define new versions to hide this one as needed. Note
147 // that the O/S may support more sizes, but at most this many are used.
148 static uint page_size_count() { return 2; }
149
150 // Returns the number of parallel threads to be used for VM
151 // work. If that number has not been calculated, do so and
152 // save it. Returns ParallelGCThreads if it is set on the
153 // command line.
154 static unsigned int parallel_worker_threads();
155 // Calculates and returns the number of parallel threads. May
156 // be VM version specific.
157 static unsigned int calc_parallel_worker_threads();
158
|