< prev index next >

src/share/vm/oops/methodData.hpp

Print this page

        

@@ -538,10 +538,14 @@
 protected:
   enum {
     // null_seen:
     //  saw a null operand (cast/aastore/instanceof)
     null_seen_flag              = DataLayout::first_flag + 0
+#if INCLUDE_JVMCI
+    // bytecode threw any exception
+    , exception_seen_flag         = null_seen_flag + 1
+#endif
   };
   enum { bit_cell_count = 0 };  // no additional data fields needed.
 public:
   BitData(DataLayout* layout) : ProfileData(layout) {
   }

@@ -561,10 +565,15 @@
   // The null_seen flag bit is specially known to the interpreter.
   // Consulting it allows the compiler to avoid setting up null_check traps.
   bool null_seen()     { return flag_at(null_seen_flag); }
   void set_null_seen()    { set_flag_at(null_seen_flag); }
 
+#if INCLUDE_JVMCI
+  // true if an exception was thrown at the specific BCI
+  bool exception_seen() { return flag_at(exception_seen_flag); }
+  void set_exception_seen() { set_flag_at(exception_seen_flag); }
+#endif
 
   // Code generation support
   static int null_seen_byte_constant() {
     return flag_number_to_byte_constant(null_seen_flag);
   }

@@ -1164,11 +1173,26 @@
 // that the check is reached, and a series of (Klass*, count) pairs
 // which are used to store a type profile for the receiver of the check.
 class ReceiverTypeData : public CounterData {
 protected:
   enum {
+#if INCLUDE_JVMCI
+    // Description of the different counters
+    // ReceiverTypeData for instanceof/checkcast/aastore:
+    //   C1/C2: count is incremented on type overflow and decremented for failed type checks
+    //   JVMCI: count decremented for failed type checks and nonprofiled_count is incremented on type overflow
+    //          TODO (chaeubl): in fact, JVMCI should also increment the count for failed type checks to mimic the C1/C2 behavior
+    // VirtualCallData for invokevirtual/invokeinterface:
+    //   C1/C2: count is incremented on type overflow
+    //   JVMCI: count is incremented on type overflow, nonprofiled_count is incremented on method overflow
+
+    // JVMCI is interested in knowing the percentage of type checks involving a type not explicitly in the profile
+    nonprofiled_count_off_set = counter_cell_count,
+    receiver0_offset,
+#else
     receiver0_offset = counter_cell_count,
+#endif
     count0_offset,
     receiver_type_row_cell_count = (count0_offset + 1) - receiver0_offset
   };
 
 public:

@@ -1179,11 +1203,11 @@
   }
 
   virtual bool is_ReceiverTypeData() const { return true; }
 
   static int static_cell_count() {
-    return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count;
+    return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count JVMCI_ONLY(+ 1);
   }
 
   virtual int cell_count() const {
     return static_cell_count();
   }

@@ -1241,19 +1265,37 @@
     // We do sorting a profiling info (ciCallProfile) for compilation.
     //
     set_count(0);
     set_receiver(row, NULL);
     set_receiver_count(row, 0);
+#if INCLUDE_JVMCI
+    if (!this->is_VirtualCallData()) {
+      // if this is a ReceiverTypeData for JVMCI, the nonprofiled_count
+      // must also be reset (see "Description of the different counters" above)
+      set_nonprofiled_count(0);
+    }
+#endif
   }
 
   // Code generation support
   static ByteSize receiver_offset(uint row) {
     return cell_offset(receiver_cell_index(row));
   }
   static ByteSize receiver_count_offset(uint row) {
     return cell_offset(receiver_count_cell_index(row));
   }
+#if INCLUDE_JVMCI
+  static ByteSize nonprofiled_receiver_count_offset() {
+    return cell_offset(nonprofiled_count_off_set);
+  }
+  uint nonprofiled_count() const {
+    return uint_at(nonprofiled_count_off_set);
+  }
+  void set_nonprofiled_count(uint count) {
+    set_uint_at(nonprofiled_count_off_set, count);
+  }
+#endif // INCLUDE_JVMCI
   static ByteSize receiver_type_data_size() {
     return cell_offset(static_cell_count());
   }
 
   // GC support

@@ -1314,11 +1356,11 @@
   virtual bool is_VirtualCallData() const { return true; }
 
   static int static_cell_count() {
     // At this point we could add more profile state, e.g., for arguments.
     // But for now it's the same size as the base record type.
-    return ReceiverTypeData::static_cell_count();
+    return ReceiverTypeData::static_cell_count() JVMCI_ONLY(+ (uint) MethodProfileWidth * receiver_type_row_cell_count);
   }
 
   virtual int cell_count() const {
     return static_cell_count();
   }

@@ -1336,10 +1378,66 @@
   static DataLayout* advance(DataLayout* layout) {
     return (DataLayout*) (((address)layout) + (ssize_t)VirtualCallData::virtual_call_data_size_in_bytes());
   }
 #endif // CC_INTERP
 
+#if INCLUDE_JVMCI
+  static ByteSize method_offset(uint row) {
+    return cell_offset(method_cell_index(row));
+  }
+  static ByteSize method_count_offset(uint row) {
+    return cell_offset(method_count_cell_index(row));
+  }
+  static int method_cell_index(uint row) {
+    return receiver0_offset + (row + TypeProfileWidth) * receiver_type_row_cell_count;
+  }
+  static int method_count_cell_index(uint row) {
+    return count0_offset + (row + TypeProfileWidth) * receiver_type_row_cell_count;
+  }
+  static uint method_row_limit() {
+    return MethodProfileWidth;
+  }
+
+  Method* method(uint row) const {
+    assert(row < method_row_limit(), "oob");
+
+    Method* method = (Method*)intptr_at(method_cell_index(row));
+    assert(method == NULL || method->is_method(), "must be");
+    return method;
+  }
+
+  uint method_count(uint row) const {
+    assert(row < method_row_limit(), "oob");
+    return uint_at(method_count_cell_index(row));
+  }
+
+  void set_method(uint row, Method* m) {
+    assert((uint)row < method_row_limit(), "oob");
+    set_intptr_at(method_cell_index(row), (uintptr_t)m);
+  }
+
+  void set_method_count(uint row, uint count) {
+    assert(row < method_row_limit(), "oob");
+    set_uint_at(method_count_cell_index(row), count);
+  }
+
+  void clear_method_row(uint row) {
+    assert(row < method_row_limit(), "oob");
+    // Clear total count - indicator of polymorphic call site (see comment for clear_row() in ReceiverTypeData).
+    set_nonprofiled_count(0);
+    set_method(row, NULL);
+    set_method_count(row, 0);
+  }
+
+  // GC support
+  virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
+
+  // Redefinition support
+  virtual void clean_weak_method_links();
+#endif // INCLUDE_JVMCI
+
+  void print_method_data_on(outputStream* st) const NOT_JVMCI_RETURN;
   void print_data_on(outputStream* st, const char* extra = NULL) const;
 };
 
 // VirtualCallTypeData
 //

@@ -2051,14 +2149,15 @@
 public:
   static MethodData* allocate(ClassLoaderData* loader_data, methodHandle method, TRAPS);
   MethodData() : _extra_data_lock(Monitor::leaf, "MDO extra data lock") {}; // For ciMethodData
 
   bool is_methodData() const volatile { return true; }
+  void initialize();
 
   // Whole-method sticky bits and flags
   enum {
-    _trap_hist_limit    = 22,   // decoupled from Deoptimization::Reason_LIMIT
+    _trap_hist_limit    = 22 JVMCI_ONLY(+5),   // decoupled from Deoptimization::Reason_LIMIT
     _trap_hist_mask     = max_jubyte,
     _extra_data_count   = 4     // extra DataLayout headers, for trap history
   }; // Public flag values
 private:
   uint _nof_decompiles;             // count of all nmethod removals

@@ -2102,10 +2201,15 @@
   short             _num_blocks;
   // Does this method contain anything worth profiling?
   enum WouldProfile {unknown, no_profile, profile};
   WouldProfile      _would_profile;
 
+#if INCLUDE_JVMCI
+  // Support for HotSpotMethodData.setCompiledIRSize(int)
+  int               _jvmci_ir_size;
+#endif
+
   // Size of _data array in bytes.  (Excludes header and extra_data fields.)
   int _data_size;
 
   // data index for the area dedicated to parameters. -1 if no
   // parameter profiling.

@@ -2380,31 +2484,27 @@
   int extra_data_size() const          { return (address)extra_data_limit() - (address)extra_data_base(); }
   static DataLayout* next_extra(DataLayout* dp);
 
   // Return (uint)-1 for overflow.
   uint trap_count(int reason) const {
-    assert((uint)reason < _trap_hist_limit, "oob");
+    assert((uint)reason < JVMCI_ONLY(2*) _trap_hist_limit, "oob");
     return (int)((_trap_hist._array[reason]+1) & _trap_hist_mask) - 1;
   }
   // For loops:
   static uint trap_reason_limit() { return _trap_hist_limit; }
   static uint trap_count_limit()  { return _trap_hist_mask; }
   uint inc_trap_count(int reason) {
     // Count another trap, anywhere in this method.
     assert(reason >= 0, "must be single trap");
-    if ((uint)reason < _trap_hist_limit) {
+    assert((uint)reason < JVMCI_ONLY(2*) _trap_hist_limit, "oob");
       uint cnt1 = 1 + _trap_hist._array[reason];
       if ((cnt1 & _trap_hist_mask) != 0) {  // if no counter overflow...
         _trap_hist._array[reason] = cnt1;
         return cnt1;
       } else {
         return _trap_hist_mask + (++_nof_overflow_traps);
       }
-    } else {
-      // Could not represent the count in the histogram.
-      return (++_nof_overflow_traps);
-    }
   }
 
   uint overflow_trap_count() const {
     return _nof_overflow_traps;
   }

@@ -2444,10 +2544,14 @@
   // Support for code generation
   static ByteSize data_offset() {
     return byte_offset_of(MethodData, _data[0]);
   }
 
+  static ByteSize trap_history_offset() {
+    return byte_offset_of(MethodData, _trap_hist._array);
+  }
+
   static ByteSize invocation_counter_offset() {
     return byte_offset_of(MethodData, _invocation_counter);
   }
 
   static ByteSize backedge_counter_offset() {
< prev index next >