1 /*
   2  * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_CODE_OOPRECORDER_HPP
  26 #define SHARE_VM_CODE_OOPRECORDER_HPP
  27 
  28 #include "memory/universe.hpp"
  29 #include "runtime/handles.hpp"
  30 #include "utilities/growableArray.hpp"
  31 
  32 // Recording and retrieval of either oop relocations or metadata in compiled code.
  33 
  34 class CodeBlob;
  35 
  36 template <class T> class ValueRecorder : public StackObj {
  37  public:
  38   // A two-way mapping from positive indexes to oop handles.
  39   // The zero index is reserved for a constant (sharable) null.
  40   // Indexes may not be negative.
  41 
  42   // Use the given arena to manage storage, if not NULL.
  43   // By default, uses the current ResourceArea.
  44   ValueRecorder(Arena* arena = NULL);
  45 
  46   // Generate a new index on which nmethod::oop_addr_at will work.
  47   // allocate_index and find_index never return the same index,
  48   // and allocate_index never returns the same index twice.
  49   // In fact, two successive calls to allocate_index return successive ints.
  50   int allocate_index(T h) {
  51     return add_handle(h, false);
  52   }
  53 
  54   // For a given jobject or Metadata*, this will return the same index
  55   // repeatedly. The index can later be given to nmethod::oop_at or
  56   // metadata_at to retrieve the oop.
  57   // However, the oop must not be changed via nmethod::oop_addr_at.
  58   int find_index(T h) {
  59     int index = maybe_find_index(h);
  60     if (index < 0) {  // previously unallocated
  61       index = add_handle(h, true);
  62     }
  63     return index;
  64   }
  65 
  66   // returns the size of the generated oop/metadata table, for sizing the
  67   // CodeBlob. Must be called after all oops are allocated!
  68   int size();
  69 
  70   // Retrieve the value at a given index.
  71   T at(int index);
  72 
  73   int count() {
  74     if (_handles == NULL) return 0;
  75     // there is always a NULL virtually present as first object
  76     return _handles->length() + first_index;
  77   }
  78 
  79   // Helper function; returns false for NULL or Universe::non_oop_word().
  80   bool is_real(T h) {
  81     return h != NULL && h != (T)Universe::non_oop_word();
  82   }
  83 
  84   // copy the generated table to nmethod
  85   void copy_values_to(nmethod* nm);
  86 
  87   bool is_unused() { return _handles == NULL && !_complete; }
  88 #ifdef ASSERT
  89   bool is_complete() { return _complete; }
  90 #endif
  91 
  92  private:
  93   // variant of find_index which does not allocate if not found (yields -1)
  94   int maybe_find_index(T h);
  95 
  96   // leaky hash table of handle => index, to help detect duplicate insertion
  97   template <class X> class IndexCache : public ResourceObj {
  98     // This class is only used by the ValueRecorder class.
  99     friend class ValueRecorder;
 100     enum {
 101       _log_cache_size = 9,
 102       _cache_size = (1<<_log_cache_size),
 103       // Index entries are ints.  The LSBit is a collision indicator.
 104       _collision_bit_shift = 0,
 105       _collision_bit = 1,
 106       _index_shift = _collision_bit_shift+1
 107     };
 108     int _cache[_cache_size];
 109     static juint cache_index(X handle) {
 110       juint ci = (int) (intptr_t) handle;
 111       ci ^= ci >> (BitsPerByte*2);
 112       ci += ci >> (BitsPerByte*1);
 113       return ci & (_cache_size-1);
 114     }
 115     int* cache_location(X handle) {
 116       return &_cache[ cache_index(handle) ];
 117     }
 118     static bool cache_location_collision(int* cloc) {
 119       return ((*cloc) & _collision_bit) != 0;
 120     }
 121     static int cache_location_index(int* cloc) {
 122       return (*cloc) >> _index_shift;
 123     }
 124     static void set_cache_location_index(int* cloc, int index) {
 125       int cval0 = (*cloc);
 126       int cval1 = (index << _index_shift);
 127       if (cval0 != 0 && cval1 != cval0)  cval1 += _collision_bit;
 128       (*cloc) = cval1;
 129     }
 130     IndexCache();
 131   };
 132 
 133   void maybe_initialize();
 134   int add_handle(T h, bool make_findable);
 135 
 136   enum { null_index = 0, first_index = 1, index_cache_threshold = 20 };
 137 
 138   GrowableArray<T>*        _handles;  // ordered list (first is always NULL)
 139   GrowableArray<int>*       _no_finds; // all unfindable indexes; usually empty
 140   IndexCache<T>*           _indexes;  // map: handle -> its probable index
 141   Arena*                    _arena;
 142   bool                      _complete;
 143 
 144 #ifdef ASSERT
 145   static int _find_index_calls, _hit_indexes, _missed_indexes;
 146 #endif
 147 };
 148 
 149 class OopRecorder : public ResourceObj {
 150  private:
 151   ValueRecorder<jobject>      _oops;
 152   ValueRecorder<Metadata*>    _metadata;
 153  public:
 154   OopRecorder(Arena* arena = NULL): _oops(arena), _metadata(arena) {}
 155 
 156   int allocate_oop_index(jobject h) {
 157     return _oops.allocate_index(h);
 158   }
 159   int find_index(jobject h) {
 160     return _oops.find_index(h);
 161   }
 162   jobject oop_at(int index) {
 163     return _oops.at(index);
 164   }
 165   int oop_size() {
 166     return _oops.size();
 167   }
 168   int oop_count() {
 169     return _oops.count();
 170   }
 171   bool is_real(jobject h) {
 172     return _oops.is_real(h);
 173   }
 174 
 175   int allocate_metadata_index(Metadata* oop) {
 176     return _metadata.allocate_index(oop);
 177   }
 178   int find_index(Metadata* h) {
 179     return _metadata.find_index(h);
 180   }
 181   Metadata* metadata_at(int index) {
 182     return _metadata.at(index);
 183   }
 184   int metadata_size() {
 185     return _metadata.size();
 186   }
 187   int metadata_count() {
 188     return _metadata.count();
 189   }
 190   bool is_real(Metadata* h) {
 191     return _metadata.is_real(h);
 192   }
 193 
 194   bool is_unused() {
 195     return _oops.is_unused() && _metadata.is_unused();
 196   }
 197 
 198   void freeze() {
 199     _oops.size();
 200     _metadata.size();
 201   }
 202 
 203   void copy_values_to(nmethod* nm) {
 204     if (!_oops.is_unused()) {
 205       _oops.copy_values_to(nm);
 206     }
 207     if (!_metadata.is_unused()) {
 208       _metadata.copy_values_to(nm);
 209     }
 210   }
 211 
 212 #ifdef ASSERT
 213   bool is_complete() {
 214     assert(_oops.is_complete() == _metadata.is_complete(), "must agree");
 215     return _oops.is_complete();
 216   }
 217 #endif
 218 };
 219 
 220 
 221 #endif // SHARE_VM_CODE_OOPRECORDER_HPP