1 /*
   2  * Copyright (c) 2019, 2020, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  28 #include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
  29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  30 #include "gc/shenandoah/shenandoahNMethod.inline.hpp"
  31 #include "memory/resourceArea.hpp"
  32 
  33 ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>& oops, bool non_immediate_oops) :
  34   _nm(nm), _oops(NULL), _oops_count(0), _unregistered(false) {
  35 
  36   if (!oops.is_empty()) {
  37     _oops_count = oops.length();
  38     _oops = NEW_C_HEAP_ARRAY(oop*, _oops_count, mtGC);
  39     for (int c = 0; c < _oops_count; c++) {
  40       _oops[c] = oops.at(c);
  41     }
  42   }
  43   _has_non_immed_oops = non_immediate_oops;
  44 
  45   assert_same_oops();
  46 }
  47 
  48 ShenandoahNMethod::~ShenandoahNMethod() {
  49   if (_oops != NULL) {
  50     FREE_C_HEAP_ARRAY(oop*, _oops);
  51   }
  52 }
  53 
  54 class ShenandoahHasCSetOopClosure : public OopClosure {
  55 private:
  56   ShenandoahHeap* const _heap;
  57   bool                  _has_cset_oops;
  58 
  59 public:
  60   ShenandoahHasCSetOopClosure(ShenandoahHeap *heap) :
  61     _heap(heap),
  62     _has_cset_oops(false) {
  63   }
  64 
  65   bool has_cset_oops() const {
  66     return _has_cset_oops;
  67   }
  68 
  69   void do_oop(oop* p) {
  70     oop value = RawAccess<>::oop_load(p);
  71     if (!_has_cset_oops && _heap->in_collection_set(value)) {
  72       _has_cset_oops = true;
  73     }
  74   }
  75 
  76   void do_oop(narrowOop* p) {
  77     ShouldNotReachHere();
  78   }
  79 };
  80 
  81 bool ShenandoahNMethod::has_cset_oops(ShenandoahHeap *heap) {
  82   ShenandoahHasCSetOopClosure cl(heap);
  83   oops_do(&cl);
  84   return cl.has_cset_oops();
  85 }
  86 
  87 void ShenandoahNMethod::update() {
  88   ResourceMark rm;
  89   bool non_immediate_oops = false;
  90   GrowableArray<oop*> oops;
  91 
  92   detect_reloc_oops(nm(), oops, non_immediate_oops);
  93   if (oops.length() != _oops_count) {
  94     if (_oops != NULL) {
  95       FREE_C_HEAP_ARRAY(oop*, _oops);
  96       _oops = NULL;
  97     }
  98 
  99     _oops_count = oops.length();
 100     if (_oops_count > 0) {
 101       _oops = NEW_C_HEAP_ARRAY(oop*, _oops_count, mtGC);
 102     }
 103   }
 104 
 105   for (int index = 0; index < _oops_count; index ++) {
 106     _oops[index] = oops.at(index);
 107   }
 108   _has_non_immed_oops = non_immediate_oops;
 109 
 110   assert_same_oops();
 111 }
 112 
 113 void ShenandoahNMethod::oops_do(OopClosure* oops, bool fix_relocations) {
 114   for (int c = 0; c < _oops_count; c ++) {
 115     oops->do_oop(_oops[c]);
 116   }
 117 
 118   oop* const begin = _nm->oops_begin();
 119   oop* const end = _nm->oops_end();
 120   for (oop* p = begin; p < end; p++) {
 121     if (*p != Universe::non_oop_word()) {
 122       oops->do_oop(p);
 123     }
 124   }
 125 
 126   if (fix_relocations && _has_non_immed_oops) {
 127     _nm->fix_oop_relocations();
 128   }
 129 }
 130 
 131 void ShenandoahNMethod::detect_reloc_oops(nmethod* nm, GrowableArray<oop*>& oops, bool& has_non_immed_oops) {
 132   has_non_immed_oops = false;
 133   // Find all oops relocations
 134   RelocIterator iter(nm);
 135   while (iter.next()) {
 136     if (iter.type() != relocInfo::oop_type) {
 137       // Not an oop
 138       continue;
 139     }
 140 
 141     oop_Relocation* r = iter.oop_reloc();
 142     if (!r->oop_is_immediate()) {
 143       // Non-immediate oop found
 144       has_non_immed_oops = true;
 145       continue;
 146     }
 147 
 148     oop value = r->oop_value();
 149     if (value != NULL) {
 150       oop* addr = r->oop_addr();
 151       shenandoah_assert_correct(addr, value);
 152       shenandoah_assert_not_in_cset_except(addr, value, ShenandoahHeap::heap()->cancelled_gc());
 153       shenandoah_assert_not_forwarded(addr, value);
 154       // Non-NULL immediate oop found. NULL oops can safely be
 155       // ignored since the method will be re-registered if they
 156       // are later patched to be non-NULL.
 157       oops.push(addr);
 158     }
 159   }
 160 }
 161 
 162 ShenandoahNMethod* ShenandoahNMethod::for_nmethod(nmethod* nm) {
 163   ResourceMark rm;
 164   bool non_immediate_oops = false;
 165   GrowableArray<oop*> oops;
 166 
 167   detect_reloc_oops(nm, oops, non_immediate_oops);
 168 
 169   // No embedded oops
 170   if(!ShenandoahConcurrentRoots::can_do_concurrent_class_unloading() &&
 171     oops.is_empty() && nm->oops_begin() >= nm->oops_end()) {
 172     return NULL;
 173   }
 174 
 175   return new ShenandoahNMethod(nm, oops, non_immediate_oops);
 176 }
 177 
 178 template <bool HAS_FWD>
 179 class ShenandoahKeepNMethodMetadataAliveClosure : public OopClosure {
 180 private:
 181   ShenandoahBarrierSet* const _bs;
 182 public:
 183   ShenandoahKeepNMethodMetadataAliveClosure() :
 184     _bs(static_cast<ShenandoahBarrierSet*>(BarrierSet::barrier_set())) {
 185   }
 186 
 187   virtual void do_oop(oop* p) {
 188     oop obj = RawAccess<>::oop_load(p);
 189     if (!CompressedOops::is_null(obj)) {
 190       if (HAS_FWD) {
 191         obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 192       }
 193       _bs->enqueue(obj);
 194     }
 195   }
 196 
 197   virtual void do_oop(narrowOop* p) {
 198     ShouldNotReachHere();
 199   }
 200 };
 201 
 202 void ShenandoahNMethod::heal_nmethod(nmethod* nm) {
 203   ShenandoahNMethod* data = gc_data(nm);
 204   assert(data != NULL, "Sanity");
 205   assert(data->lock()->owned_by_self(), "Must hold the lock");
 206 
 207   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 208   if (heap->is_concurrent_mark_in_progress()) {
 209     if (heap->has_forwarded_objects()) {
 210       ShenandoahKeepNMethodMetadataAliveClosure<true> cl;
 211       data->oops_do(&cl);
 212     } else {
 213       ShenandoahKeepNMethodMetadataAliveClosure<false> cl;
 214       data->oops_do(&cl);
 215     }
 216   } else if (heap->is_concurrent_weak_root_in_progress()) {
 217     ShenandoahEvacOOMScope evac_scope;
 218     ShenandoahEvacuateUpdateRootsClosure<> cl;
 219     data->oops_do(&cl, true /*fix relocation*/);
 220   } else {
 221     // There is possibility that GC is cancelled when it arrives final mark.
 222     // In this case, concurrent root phase is skipped and degenerated GC should be
 223     // followed, where nmethods are disarmed.
 224     assert(heap->cancelled_gc(), "What else?");
 225   }
 226 }
 227 
 228 #ifdef ASSERT
 229 void ShenandoahNMethod::assert_alive_and_correct() {
 230   assert(_nm->is_alive(), "only alive nmethods here");
 231   ShenandoahHeap* heap = ShenandoahHeap::heap();
 232   for (int c = 0; c < _oops_count; c++) {
 233     oop *loc = _oops[c];
 234     assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*");
 235     oop o = RawAccess<>::oop_load(loc);
 236     shenandoah_assert_correct_except(loc, o, o == NULL || heap->is_full_gc_move_in_progress());
 237   }
 238 
 239   oop* const begin = _nm->oops_begin();
 240   oop* const end = _nm->oops_end();
 241   for (oop* p = begin; p < end; p++) {
 242     if (*p != Universe::non_oop_word()) {
 243       oop o = RawAccess<>::oop_load(p);
 244       shenandoah_assert_correct_except(p, o, o == NULL || heap->is_full_gc_move_in_progress());
 245     }
 246   }
 247 }
 248 
 249 class ShenandoahNMethodOopDetector : public OopClosure {
 250 private:
 251   ResourceMark rm; // For growable array allocation below.
 252   GrowableArray<oop*> _oops;
 253 
 254 public:
 255   ShenandoahNMethodOopDetector() : _oops(10) {};
 256 
 257   void do_oop(oop* o) {
 258     _oops.append(o);
 259   }
 260   void do_oop(narrowOop* o) {
 261     fatal("NMethods should not have compressed oops embedded.");
 262   }
 263 
 264   GrowableArray<oop*>* oops() {
 265     return &_oops;
 266   }
 267 
 268   bool has_oops() {
 269     return !_oops.is_empty();
 270   }
 271 };
 272 
 273 void ShenandoahNMethod::assert_same_oops(bool allow_dead) {
 274   ShenandoahNMethodOopDetector detector;
 275   nm()->oops_do(&detector, allow_dead);
 276 
 277   GrowableArray<oop*>* oops = detector.oops();
 278 
 279   int count = _oops_count;
 280   for (int index = 0; index < _oops_count; index ++) {
 281     assert(oops->contains(_oops[index]), "Must contain this oop");
 282   }
 283 
 284   for (oop* p = nm()->oops_begin(); p < nm()->oops_end(); p ++) {
 285     if (*p == Universe::non_oop_word()) continue;
 286     count++;
 287     assert(oops->contains(p), "Must contain this oop");
 288   }
 289 
 290   if (oops->length() < count) {
 291     stringStream debug_stream;
 292     debug_stream.print_cr("detected locs: %d", oops->length());
 293     for (int i = 0; i < oops->length(); i++) {
 294       debug_stream.print_cr("-> " PTR_FORMAT, p2i(oops->at(i)));
 295     }
 296     debug_stream.print_cr("recorded oops: %d", _oops_count);
 297     for (int i = 0; i < _oops_count; i++) {
 298       debug_stream.print_cr("-> " PTR_FORMAT, p2i(_oops[i]));
 299     }
 300     GrowableArray<oop*> check;
 301     bool non_immed;
 302     detect_reloc_oops(nm(), check, non_immed);
 303     debug_stream.print_cr("check oops: %d", check.length());
 304     for (int i = 0; i < check.length(); i++) {
 305       debug_stream.print_cr("-> " PTR_FORMAT, p2i(check.at(i)));
 306     }
 307     fatal("Must match #detected: %d, #recorded: %d, #total: %d, begin: " PTR_FORMAT ", end: " PTR_FORMAT "\n%s",
 308           oops->length(), _oops_count, count, p2i(nm()->oops_begin()), p2i(nm()->oops_end()), debug_stream.as_string());
 309   }
 310 }
 311 
 312 void ShenandoahNMethod::assert_no_oops(nmethod* nm, bool allow_dead) {
 313   ShenandoahNMethodOopDetector detector;
 314   nm->oops_do(&detector, allow_dead);
 315   assert(detector.oops()->length() == 0, "Should not have oops");
 316 }
 317 #endif
 318 
 319 ShenandoahNMethodTable::ShenandoahNMethodTable() :
 320   _heap(ShenandoahHeap::heap()),
 321   _index(0),
 322   _itr_cnt(0) {
 323   _list = new ShenandoahNMethodList(minSize);
 324 }
 325 
 326 ShenandoahNMethodTable::~ShenandoahNMethodTable() {
 327   assert(_list != NULL, "Sanity");
 328   _list->release();
 329 }
 330 
 331 void ShenandoahNMethodTable::register_nmethod(nmethod* nm) {
 332   assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
 333   assert(_index >= 0 && _index <= _list->size(), "Sanity");
 334 
 335   ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
 336   ShenandoahReentrantLocker data_locker(data != NULL ? data->lock() : NULL);
 337 
 338   if (data != NULL) {
 339     assert(contain(nm), "Must have been registered");
 340     assert(nm == data->nm(), "Must be same nmethod");
 341     data->update();
 342   } else {
 343     data = ShenandoahNMethod::for_nmethod(nm);
 344     if (data == NULL) {
 345       assert(!ShenandoahConcurrentRoots::can_do_concurrent_class_unloading(),
 346              "Only possible when concurrent class unloading is off");
 347       return;
 348     }
 349     ShenandoahNMethod::attach_gc_data(nm, data);
 350     ShenandoahLocker locker(&_lock);
 351     log_register_nmethod(nm);
 352     append(data);
 353   }
 354   // Disarm new nmethod
 355   ShenandoahNMethod::disarm_nmethod(nm);
 356 }
 357 
 358 void ShenandoahNMethodTable::unregister_nmethod(nmethod* nm) {
 359   assert_locked_or_safepoint(CodeCache_lock);
 360 
 361   ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
 362   if (data == NULL) {
 363     assert(!ShenandoahConcurrentRoots::can_do_concurrent_class_unloading(),
 364            "Only possible when concurrent class unloading is off");
 365     ShenandoahNMethod::assert_no_oops(nm, true /*allow_dead*/);
 366     return;
 367   }
 368 
 369   if (Thread::current()->is_Code_cache_sweeper_thread()) {
 370     wait_until_concurrent_iteration_done();
 371   }
 372   log_unregister_nmethod(nm);
 373   ShenandoahLocker locker(&_lock);
 374   assert(contain(nm), "Must have been registered");
 375 
 376   ShenandoahReentrantLocker data_locker(data->lock());
 377   data->mark_unregistered();
 378 }
 379 
 380 void ShenandoahNMethodTable::flush_nmethod(nmethod* nm) {
 381   assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
 382   assert(Thread::current()->is_Code_cache_sweeper_thread(), "Must from Sweep thread");
 383   ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
 384   assert(data != NULL || !ShenandoahConcurrentRoots::can_do_concurrent_class_unloading(),
 385          "Only possible when concurrent class unloading is off");
 386   if (data == NULL) {
 387     ShenandoahNMethod::assert_no_oops(nm, true /*allow_dead*/);
 388     return;
 389   }
 390 
 391   // Can not alter the array when iteration is in progress
 392   wait_until_concurrent_iteration_done();
 393   log_flush_nmethod(nm);
 394 
 395   ShenandoahLocker locker(&_lock);
 396   int idx = index_of(nm);
 397   assert(idx >= 0 && idx < _index, "Invalid index");
 398   ShenandoahNMethod::attach_gc_data(nm, NULL);
 399   remove(idx);
 400 }
 401 
 402 bool ShenandoahNMethodTable::contain(nmethod* nm) const {
 403   return index_of(nm) != -1;
 404 }
 405 
 406 ShenandoahNMethod* ShenandoahNMethodTable::at(int index) const {
 407   assert(index >= 0 && index < _index, "Out of bound");
 408   return _list->at(index);
 409 }
 410 
 411 int ShenandoahNMethodTable::index_of(nmethod* nm) const {
 412   for (int index = 0; index < length(); index ++) {
 413     if (at(index)->nm() == nm) {
 414       return index;
 415     }
 416   }
 417   return -1;
 418 }
 419 
 420 void ShenandoahNMethodTable::remove(int idx) {
 421   shenandoah_assert_locked_or_safepoint(CodeCache_lock);
 422   assert(!iteration_in_progress(), "Can not happen");
 423   assert(_index >= 0 && _index <= _list->size(), "Sanity");
 424 
 425   assert(idx >= 0 && idx < _index, "Out of bound");
 426   ShenandoahNMethod* snm = _list->at(idx);
 427   ShenandoahNMethod* tmp = _list->at(_index - 1);
 428   _list->set(idx, tmp);
 429   _index --;
 430 
 431   delete snm;
 432 }
 433 
 434 void ShenandoahNMethodTable::wait_until_concurrent_iteration_done() {
 435   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 436   while (iteration_in_progress()) {
 437     CodeCache_lock->wait_without_safepoint_check();
 438   }
 439 }
 440 
 441 void ShenandoahNMethodTable::append(ShenandoahNMethod* snm) {
 442   if (is_full()) {
 443     int new_size = 2 * _list->size();
 444     // Rebuild table and replace current one
 445     rebuild(new_size);
 446   }
 447 
 448   _list->set(_index++,  snm);
 449   assert(_index >= 0 && _index <= _list->size(), "Sanity");
 450 }
 451 
 452 void ShenandoahNMethodTable::rebuild(int size) {
 453   ShenandoahNMethodList* new_list = new ShenandoahNMethodList(size);
 454   new_list->transfer(_list, _index);
 455 
 456   // Release old list
 457   _list->release();
 458   _list = new_list;
 459 }
 460 
 461 ShenandoahNMethodTableSnapshot* ShenandoahNMethodTable::snapshot_for_iteration() {
 462   _itr_cnt++;
 463   return new ShenandoahNMethodTableSnapshot(this);
 464 }
 465 
 466 void ShenandoahNMethodTable::finish_iteration(ShenandoahNMethodTableSnapshot* snapshot) {
 467   assert(iteration_in_progress(), "Why we here?");
 468   assert(snapshot != NULL, "No snapshot");
 469   _itr_cnt--;
 470 
 471   delete snapshot;
 472 }
 473 
 474 void ShenandoahNMethodTable::log_register_nmethod(nmethod* nm) {
 475   LogTarget(Debug, gc, nmethod) log;
 476   if (!log.is_enabled()) {
 477     return;
 478   }
 479 
 480   ResourceMark rm;
 481   log.print("Register NMethod: %s.%s [" PTR_FORMAT "] (%s)",
 482             nm->method()->method_holder()->external_name(),
 483             nm->method()->name()->as_C_string(),
 484             p2i(nm),
 485             nm->compiler_name());
 486 }
 487 
 488 void ShenandoahNMethodTable::log_unregister_nmethod(nmethod* nm) {
 489   LogTarget(Debug, gc, nmethod) log;
 490   if (!log.is_enabled()) {
 491     return;
 492   }
 493 
 494   ResourceMark rm;
 495   log.print("Unregister NMethod: %s.%s [" PTR_FORMAT "]",
 496             nm->method()->method_holder()->external_name(),
 497             nm->method()->name()->as_C_string(),
 498             p2i(nm));
 499 }
 500 
 501 void ShenandoahNMethodTable::log_flush_nmethod(nmethod* nm) {
 502   LogTarget(Debug, gc, nmethod) log;
 503   if (!log.is_enabled()) {
 504     return;
 505   }
 506 
 507   ResourceMark rm;
 508   log.print("Flush NMethod: (" PTR_FORMAT ")", p2i(nm));
 509 }
 510 
 511 #ifdef ASSERT
 512 void ShenandoahNMethodTable::assert_nmethods_alive_and_correct() {
 513   assert_locked_or_safepoint(CodeCache_lock);
 514 
 515   for (int index = 0; index < length(); index ++) {
 516     ShenandoahNMethod* m = _list->at(index);
 517     // Concurrent unloading may have dead nmethods to be cleaned by sweeper
 518     if (m->is_unregistered()) continue;
 519     m->assert_alive_and_correct();
 520   }
 521 }
 522 #endif
 523 
 524 
 525 ShenandoahNMethodList::ShenandoahNMethodList(int size) :
 526   _size(size), _ref_count(1) {
 527   _list = NEW_C_HEAP_ARRAY(ShenandoahNMethod*, size, mtGC);
 528 }
 529 
 530 ShenandoahNMethodList::~ShenandoahNMethodList() {
 531   assert(_list != NULL, "Sanity");
 532   assert(_ref_count == 0, "Must be");
 533   FREE_C_HEAP_ARRAY(ShenandoahNMethod*, _list);
 534 }
 535 
 536 void ShenandoahNMethodList::transfer(ShenandoahNMethodList* const list, int limit) {
 537   assert(limit <= size(), "Sanity");
 538   ShenandoahNMethod** old_list = list->list();
 539   for (int index = 0; index < limit; index++) {
 540     _list[index] = old_list[index];
 541   }
 542 }
 543 
 544 ShenandoahNMethodList* ShenandoahNMethodList::acquire() {
 545   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 546   _ref_count++;
 547   return this;
 548 }
 549 
 550 void ShenandoahNMethodList::release() {
 551   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 552   _ref_count--;
 553   if (_ref_count == 0) {
 554     delete this;
 555   }
 556 }
 557 
 558 ShenandoahNMethodTableSnapshot::ShenandoahNMethodTableSnapshot(ShenandoahNMethodTable* table) :
 559   _heap(ShenandoahHeap::heap()), _list(table->_list->acquire()), _limit(table->_index), _claimed(0) {
 560 }
 561 
 562 ShenandoahNMethodTableSnapshot::~ShenandoahNMethodTableSnapshot() {
 563   _list->release();
 564 }
 565 
 566 void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl) {
 567   size_t stride = 256; // educated guess
 568 
 569   ShenandoahNMethod** list = _list->list();
 570   size_t max = (size_t)_limit;
 571   while (_claimed < max) {
 572     size_t cur = Atomic::fetch_and_add(&_claimed, stride);
 573     size_t start = cur;
 574     size_t end = MIN2(cur + stride, max);
 575     if (start >= max) break;
 576 
 577     for (size_t idx = start; idx < end; idx++) {
 578       ShenandoahNMethod* data = list[idx];
 579       assert(data != NULL, "Should not be NULL");
 580       if (!data->is_unregistered()) {
 581         cl->do_nmethod(data->nm());
 582       }
 583     }
 584   }
 585 }
 586 
 587 ShenandoahConcurrentNMethodIterator::ShenandoahConcurrentNMethodIterator(ShenandoahNMethodTable* table) :
 588   _table(table), _table_snapshot(NULL) {
 589 }
 590 
 591 void ShenandoahConcurrentNMethodIterator::nmethods_do_begin() {
 592   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 593   assert(ShenandoahConcurrentRoots::can_do_concurrent_class_unloading(),
 594          "Only for concurrent class unloading");
 595   _table_snapshot = _table->snapshot_for_iteration();
 596 }
 597 
 598 void ShenandoahConcurrentNMethodIterator::nmethods_do(NMethodClosure* cl) {
 599   assert(_table_snapshot != NULL, "Must first call nmethod_do_begin()");
 600   _table_snapshot->concurrent_nmethods_do(cl);
 601 }
 602 
 603 void ShenandoahConcurrentNMethodIterator::nmethods_do_end() {
 604   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 605   assert(ShenandoahConcurrentRoots::can_do_concurrent_class_unloading(),
 606          "Only for concurrent class unloading");
 607   _table->finish_iteration(_table_snapshot);
 608   CodeCache_lock->notify_all();
 609 }