64 address uep;
65 int line;
66
67 void print() {
68 tty->print_cr("traversal = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
69 PTR_FORMAT " state = %d traversal_mark %ld line = %d",
70 traversal,
71 compile_id,
72 kind == NULL ? "" : kind,
73 p2i(uep),
74 p2i(vep),
75 state,
76 traversal_mark,
77 line);
78 }
79 };
80
81 static int _sweep_index = 0;
82 static SweeperRecord* _records = NULL;
83
84 void NMethodSweeper::report_events(int id, address entry) {
85 if (_records != NULL) {
86 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
87 if (_records[i].uep == entry ||
88 _records[i].vep == entry ||
89 _records[i].compile_id == id) {
90 _records[i].print();
91 }
92 }
93 for (int i = 0; i < _sweep_index; i++) {
94 if (_records[i].uep == entry ||
95 _records[i].vep == entry ||
96 _records[i].compile_id == id) {
97 _records[i].print();
98 }
99 }
100 }
101 }
102
103 void NMethodSweeper::report_events() {
104 if (_records != NULL) {
105 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
106 // skip empty records
107 if (_records[i].vep == NULL) continue;
108 _records[i].print();
109 }
110 for (int i = 0; i < _sweep_index; i++) {
111 // skip empty records
112 if (_records[i].vep == NULL) continue;
113 _records[i].print();
114 }
115 }
116 }
117
118 void NMethodSweeper::record_sweep(CompiledMethod* nm, int line) {
119 if (_records != NULL) {
120 _records[_sweep_index].traversal = _traversals;
121 _records[_sweep_index].traversal_mark = nm->is_nmethod() ? ((nmethod*)nm)->stack_traversal_mark() : 0;
122 _records[_sweep_index].compile_id = nm->compile_id();
123 _records[_sweep_index].kind = nm->compile_kind();
124 _records[_sweep_index].state = nm->get_state();
125 _records[_sweep_index].vep = nm->verified_entry_point();
126 _records[_sweep_index].uep = nm->entry_point();
127 _records[_sweep_index].line = line;
128 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
129 }
130 }
131
132 void NMethodSweeper::init_sweeper_log() {
133 if (LogSweeper && _records == NULL) {
134 // Create the ring buffer for the logging code
135 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
136 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
137 }
192 _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
193 }
194 return _hotness_counter_reset_val;
195 }
196 bool NMethodSweeper::wait_for_stack_scanning() {
197 return _current.end();
198 }
199
200 class NMethodMarkingClosure : public HandshakeClosure {
201 private:
202 CodeBlobClosure* _cl;
203 public:
204 NMethodMarkingClosure(CodeBlobClosure* cl) : HandshakeClosure("NMethodMarking"), _cl(cl) {}
205 void do_thread(Thread* thread) {
206 if (thread->is_Java_thread() && ! thread->is_Code_cache_sweeper_thread()) {
207 JavaThread* jt = (JavaThread*) thread;
208 jt->nmethods_do(_cl);
209 }
210 }
211 };
212
213 class NMethodMarkingTask : public AbstractGangTask {
214 private:
215 NMethodMarkingClosure* _cl;
216 public:
217 NMethodMarkingTask(NMethodMarkingClosure* cl) :
218 AbstractGangTask("Parallel NMethod Marking"),
219 _cl(cl) {
220 Threads::change_thread_claim_token();
221 }
222
223 ~NMethodMarkingTask() {
224 Threads::assert_all_threads_claimed();
225 }
226
227 void work(uint worker_id) {
228 Threads::possibly_parallel_threads_do(true, _cl);
229 }
230 };
231
232 /**
233 * Scans the stacks of all Java threads and marks activations of not-entrant methods.
234 * No need to synchronize access, since 'mark_active_nmethods' is always executed at a
235 * safepoint.
236 */
237 void NMethodSweeper::mark_active_nmethods() {
238 CodeBlobClosure* cl = prepare_mark_active_nmethods();
239 if (cl != NULL) {
240 WorkGang* workers = Universe::heap()->get_safepoint_workers();
241 if (workers != NULL) {
242 NMethodMarkingClosure tcl(cl);
243 NMethodMarkingTask task(&tcl);
244 workers->run_task(&task);
245 } else {
246 Threads::nmethods_do(cl);
247 }
248 }
249 }
250
251 CodeBlobClosure* NMethodSweeper::prepare_mark_active_nmethods() {
252 #ifdef ASSERT
253 assert(Thread::current()->is_Code_cache_sweeper_thread(), "must be executed under CodeCache_lock and in sweeper thread");
254 assert_lock_strong(CodeCache_lock);
255 #endif
256
257 // If we do not want to reclaim not-entrant or zombie methods there is no need
258 // to scan stacks
259 if (!MethodFlushing) {
260 return NULL;
261 }
262
263 // Increase time so that we can estimate when to invoke the sweeper again.
264 _time_counter++;
265
266 // Check for restart
267 assert(_current.method() == NULL, "should only happen between sweeper cycles");
268 assert(wait_for_stack_scanning(), "should only happen between sweeper cycles");
269
|
64 address uep;
65 int line;
66
67 void print() {
68 tty->print_cr("traversal = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
69 PTR_FORMAT " state = %d traversal_mark %ld line = %d",
70 traversal,
71 compile_id,
72 kind == NULL ? "" : kind,
73 p2i(uep),
74 p2i(vep),
75 state,
76 traversal_mark,
77 line);
78 }
79 };
80
81 static int _sweep_index = 0;
82 static SweeperRecord* _records = NULL;
83
84 void NMethodSweeper::record_sweep(CompiledMethod* nm, int line) {
85 if (_records != NULL) {
86 _records[_sweep_index].traversal = _traversals;
87 _records[_sweep_index].traversal_mark = nm->is_nmethod() ? ((nmethod*)nm)->stack_traversal_mark() : 0;
88 _records[_sweep_index].compile_id = nm->compile_id();
89 _records[_sweep_index].kind = nm->compile_kind();
90 _records[_sweep_index].state = nm->get_state();
91 _records[_sweep_index].vep = nm->verified_entry_point();
92 _records[_sweep_index].uep = nm->entry_point();
93 _records[_sweep_index].line = line;
94 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
95 }
96 }
97
98 void NMethodSweeper::init_sweeper_log() {
99 if (LogSweeper && _records == NULL) {
100 // Create the ring buffer for the logging code
101 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
102 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
103 }
158 _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
159 }
160 return _hotness_counter_reset_val;
161 }
162 bool NMethodSweeper::wait_for_stack_scanning() {
163 return _current.end();
164 }
165
166 class NMethodMarkingClosure : public HandshakeClosure {
167 private:
168 CodeBlobClosure* _cl;
169 public:
170 NMethodMarkingClosure(CodeBlobClosure* cl) : HandshakeClosure("NMethodMarking"), _cl(cl) {}
171 void do_thread(Thread* thread) {
172 if (thread->is_Java_thread() && ! thread->is_Code_cache_sweeper_thread()) {
173 JavaThread* jt = (JavaThread*) thread;
174 jt->nmethods_do(_cl);
175 }
176 }
177 };
178
179 CodeBlobClosure* NMethodSweeper::prepare_mark_active_nmethods() {
180 #ifdef ASSERT
181 assert(Thread::current()->is_Code_cache_sweeper_thread(), "must be executed under CodeCache_lock and in sweeper thread");
182 assert_lock_strong(CodeCache_lock);
183 #endif
184
185 // If we do not want to reclaim not-entrant or zombie methods there is no need
186 // to scan stacks
187 if (!MethodFlushing) {
188 return NULL;
189 }
190
191 // Increase time so that we can estimate when to invoke the sweeper again.
192 _time_counter++;
193
194 // Check for restart
195 assert(_current.method() == NULL, "should only happen between sweeper cycles");
196 assert(wait_for_stack_scanning(), "should only happen between sweeper cycles");
197
|