< prev index next >

src/share/vm/runtime/advancedThresholdPolicy.cpp

Print this page


   1 /*
   2  * Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCache.hpp"

  27 #include "runtime/advancedThresholdPolicy.hpp"
  28 #include "runtime/simpleThresholdPolicy.inline.hpp"
  29 
  30 #ifdef TIERED
  31 // Print an event.
  32 void AdvancedThresholdPolicy::print_specific(EventType type, methodHandle mh, methodHandle imh,
  33                                              int bci, CompLevel level) {
  34   tty->print(" rate=");
  35   if (mh->prev_time() == 0) tty->print("n/a");
  36   else tty->print("%f", mh->rate());
  37 
  38   tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
  39                                threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
  40 
  41 }
  42 
  43 void AdvancedThresholdPolicy::initialize() {
  44   // Turn on ergonomic compiler count selection
  45   if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
  46     FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);


 145       if (weight(x) > weight(y)) {
 146         return true;
 147       }
 148     }
 149   return false;
 150 }
 151 
 152 // Is method profiled enough?
 153 bool AdvancedThresholdPolicy::is_method_profiled(Method* method) {
 154   MethodData* mdo = method->method_data();
 155   if (mdo != NULL) {
 156     int i = mdo->invocation_count_delta();
 157     int b = mdo->backedge_count_delta();
 158     return call_predicate_helper<CompLevel_full_profile>(i, b, 1, method);
 159   }
 160   return false;
 161 }
 162 
 163 // Called with the queue locked and with at least one element
 164 CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {



 165   CompileTask *max_task = NULL;
 166   Method* max_method = NULL;
 167   jlong t = os::javaTimeMillis();
 168   // Iterate through the queue and find a method with a maximum rate.
 169   for (CompileTask* task = compile_queue->first(); task != NULL;) {
 170     CompileTask* next_task = task->next();
 171     Method* method = task->method();
 172     update_rate(t, method);
 173     if (max_task == NULL) {
 174       max_task = task;
 175       max_method = method;
 176     } else {
 177       // If a method has been stale for some time, remove it from the queue.
 178       if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
 179         if (PrintTieredEvents) {
 180           print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
 181         }

 182         compile_queue->remove_and_mark_stale(task);
 183         method->clear_queued_for_compilation();
 184         task = next_task;
 185         continue;
 186       }
 187 
 188       // Select a method with a higher rate
 189       if (compare_methods(method, max_method)) {
 190         max_task = task;
 191         max_method = method;
 192       }
 193     }
 194     task = next_task;
 195   }
 196 









 197   if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile
 198       && is_method_profiled(max_method)) {
 199     max_task->set_comp_level(CompLevel_limited_profile);
 200     if (PrintTieredEvents) {
 201       print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
 202     }
 203   }
 204 
 205   return max_task;
 206 }
 207 
 208 double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
 209   double queue_size = CompileBroker::queue_size(level);
 210   int comp_count = compiler_count(level);
 211   double k = queue_size / (feedback_k * comp_count) + 1;
 212 
 213   // Increase C1 compile threshold when the code cache is filled more
 214   // than specified by IncreaseFirstTierCompileThresholdAt percentage.
 215   // The main intention is to keep enough free space for C2 compiled code
 216   // to achieve peak performance if the code cache is under stress.


 337  * Note that since state 0 can be reached from any other state via deoptimization different loops
 338  * are possible.
 339  *
 340  */
 341 
 342 // Common transition function. Given a predicate determines if a method should transition to another level.
 343 CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
 344   CompLevel next_level = cur_level;
 345   int i = method->invocation_count();
 346   int b = method->backedge_count();
 347 
 348   if (is_trivial(method)) {
 349     next_level = CompLevel_simple;
 350   } else {
 351     switch(cur_level) {
 352     case CompLevel_none:
 353       // If we were at full profile level, would we switch to full opt?
 354       if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
 355         next_level = CompLevel_full_optimization;
 356       } else if ((this->*p)(i, b, cur_level, method)) {








 357         // C1-generated fully profiled code is about 30% slower than the limited profile
 358         // code that has only invocation and backedge counters. The observation is that
 359         // if C2 queue is large enough we can spend too much time in the fully profiled code
 360         // while waiting for C2 to pick the method from the queue. To alleviate this problem
 361         // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
 362         // we choose to compile a limited profiled version and then recompile with full profiling
 363         // when the load on C2 goes down.
 364         if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
 365                                  Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
 366           next_level = CompLevel_limited_profile;
 367         } else {
 368           next_level = CompLevel_full_profile;
 369         }
 370       }
 371       break;
 372     case CompLevel_limited_profile:
 373       if (is_method_profiled(method)) {
 374         // Special case: we got here because this method was fully profiled in the interpreter.
 375         next_level = CompLevel_full_optimization;
 376       } else {


   1 /*
   2  * Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "compiler/compileTask.hpp"
  28 #include "runtime/advancedThresholdPolicy.hpp"
  29 #include "runtime/simpleThresholdPolicy.inline.hpp"
  30 
  31 #ifdef TIERED
  32 // Print an event.
  33 void AdvancedThresholdPolicy::print_specific(EventType type, methodHandle mh, methodHandle imh,
  34                                              int bci, CompLevel level) {
  35   tty->print(" rate=");
  36   if (mh->prev_time() == 0) tty->print("n/a");
  37   else tty->print("%f", mh->rate());
  38 
  39   tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
  40                                threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
  41 
  42 }
  43 
  44 void AdvancedThresholdPolicy::initialize() {
  45   // Turn on ergonomic compiler count selection
  46   if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
  47     FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);


 146       if (weight(x) > weight(y)) {
 147         return true;
 148       }
 149     }
 150   return false;
 151 }
 152 
 153 // Is method profiled enough?
 154 bool AdvancedThresholdPolicy::is_method_profiled(Method* method) {
 155   MethodData* mdo = method->method_data();
 156   if (mdo != NULL) {
 157     int i = mdo->invocation_count_delta();
 158     int b = mdo->backedge_count_delta();
 159     return call_predicate_helper<CompLevel_full_profile>(i, b, 1, method);
 160   }
 161   return false;
 162 }
 163 
 164 // Called with the queue locked and with at least one element
 165 CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {
 166 #if INCLUDE_JVMCI
 167   CompileTask *max_non_jvmci_task = NULL;
 168 #endif
 169   CompileTask *max_task = NULL;
 170   Method* max_method = NULL;
 171   jlong t = os::javaTimeMillis();
 172   // Iterate through the queue and find a method with a maximum rate.
 173   for (CompileTask* task = compile_queue->first(); task != NULL;) {
 174     CompileTask* next_task = task->next();
 175     Method* method = task->method();
 176     update_rate(t, method);
 177     if (max_task == NULL) {
 178       max_task = task;
 179       max_method = method;
 180     } else {
 181       // If a method has been stale for some time, remove it from the queue.
 182       if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
 183         if (PrintTieredEvents) {
 184           print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
 185         }
 186         task->log_task_dequeued("stale");
 187         compile_queue->remove_and_mark_stale(task);
 188         method->clear_queued_for_compilation();
 189         task = next_task;
 190         continue;
 191       }
 192 
 193       // Select a method with a higher rate
 194       if (compare_methods(method, max_method)) {
 195         max_task = task;
 196         max_method = method;
 197       }
 198     }
 199     task = next_task;
 200   }
 201 
 202 #if INCLUDE_JVMCI
 203   if (UseJVMCICompiler) {
 204     if (max_non_jvmci_task != NULL) {
 205       max_task = max_non_jvmci_task;
 206       max_method = max_task->method();
 207     }
 208   }
 209 #endif
 210 
 211   if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile
 212       && is_method_profiled(max_method)) {
 213     max_task->set_comp_level(CompLevel_limited_profile);
 214     if (PrintTieredEvents) {
 215       print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
 216     }
 217   }
 218 
 219   return max_task;
 220 }
 221 
 222 double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
 223   double queue_size = CompileBroker::queue_size(level);
 224   int comp_count = compiler_count(level);
 225   double k = queue_size / (feedback_k * comp_count) + 1;
 226 
 227   // Increase C1 compile threshold when the code cache is filled more
 228   // than specified by IncreaseFirstTierCompileThresholdAt percentage.
 229   // The main intention is to keep enough free space for C2 compiled code
 230   // to achieve peak performance if the code cache is under stress.


 351  * Note that since state 0 can be reached from any other state via deoptimization different loops
 352  * are possible.
 353  *
 354  */
 355 
 356 // Common transition function. Given a predicate determines if a method should transition to another level.
 357 CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
 358   CompLevel next_level = cur_level;
 359   int i = method->invocation_count();
 360   int b = method->backedge_count();
 361 
 362   if (is_trivial(method)) {
 363     next_level = CompLevel_simple;
 364   } else {
 365     switch(cur_level) {
 366     case CompLevel_none:
 367       // If we were at full profile level, would we switch to full opt?
 368       if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
 369         next_level = CompLevel_full_optimization;
 370       } else if ((this->*p)(i, b, cur_level, method)) {
 371 #if INCLUDE_JVMCI
 372         if (UseJVMCICompiler) {
 373           // Since JVMCI takes a while to warm up, its queue inevitably backs up during
 374           // early VM execution.
 375           next_level = CompLevel_full_profile;
 376           break;
 377         }
 378 #endif
 379         // C1-generated fully profiled code is about 30% slower than the limited profile
 380         // code that has only invocation and backedge counters. The observation is that
 381         // if C2 queue is large enough we can spend too much time in the fully profiled code
 382         // while waiting for C2 to pick the method from the queue. To alleviate this problem
 383         // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
 384         // we choose to compile a limited profiled version and then recompile with full profiling
 385         // when the load on C2 goes down.
 386         if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
 387             Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
 388           next_level = CompLevel_limited_profile;
 389         } else {
 390           next_level = CompLevel_full_profile;
 391         }
 392       }
 393       break;
 394     case CompLevel_limited_profile:
 395       if (is_method_profiled(method)) {
 396         // Special case: we got here because this method was fully profiled in the interpreter.
 397         next_level = CompLevel_full_optimization;
 398       } else {


< prev index next >