1 /* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OOPS_MARKOOP_HPP 26 #define SHARE_VM_OOPS_MARKOOP_HPP 27 28 #include "oops/oop.hpp" 29 30 // The markOop describes the header of an object. 31 // 32 // Note that the mark is not a real oop but just a word. 33 // It is placed in the oop hierarchy for historical reasons. 34 // 35 // Bit-format of an object header (most significant first, big endian layout below): 36 // 37 // 32 bits: 38 // -------- 39 // hash:25 ------------>| age:4 biased_lock:1 lock:2 (normal object) 40 // JavaThread*:23 epoch:2 age:4 biased_lock:1 lock:2 (biased object) 41 // "1" :23 epoch:2 age:4 biased_lock:1 lock:2 (biased always locked object) 42 // size:32 ------------------------------------------>| (CMS free block) 43 // PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object) 44 // 45 // 64 bits: 46 // -------- 47 // unused:25 hash:31 -->| unused:1 age:4 biased_lock:1 lock:2 (normal object) 48 // JavaThread*:54 epoch:2 unused:1 age:4 biased_lock:1 lock:2 (biased object) 49 // "1" :54 epoch:2 unused:1 age:4 biased_lock:1 lock:2 (biased always locked object) 50 // PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object) 51 // size:64 ----------------------------------------------------->| (CMS free block) 52 // 53 // unused:25 hash:31 -->| cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && normal object) 54 // JavaThread*:54 epoch:2 cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && biased object) 55 // narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (COOPs && CMS promoted object) 56 // unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (COOPs && CMS free block) 57 // 58 // - hash contains the identity hash value: largest value is 59 // 31 bits, see os::random(). Also, 64-bit vm's require 60 // a hash value no bigger than 32 bits because they will not 61 // properly generate a mask larger than that: see library_call.cpp 62 // and c1_CodePatterns_sparc.cpp. 63 // 64 // - the biased lock pattern is used to bias a lock toward a given 65 // thread. When this pattern is set in the low three bits, the lock 66 // is either biased toward a given thread or "anonymously" biased, 67 // indicating that it is possible for it to be biased. When the 68 // lock is biased toward a given thread, locking and unlocking can 69 // be performed by that thread without using atomic operations. 70 // When a lock's bias is revoked, it reverts back to the normal 71 // locking scheme described below. 72 // 73 // Note that we are overloading the meaning of the "unlocked" state 74 // of the header. Because we steal a bit from the age we can 75 // guarantee that the bias pattern will never be seen for a truly 76 // unlocked object. 77 // 78 // Note also that the biased state contains the age bits normally 79 // contained in the object header. Large increases in scavenge 80 // times were seen when these bits were absent and an arbitrary age 81 // assigned to all biased objects, because they tended to consume a 82 // significant fraction of the eden semispaces and were not 83 // promoted promptly, causing an increase in the amount of copying 84 // performed. The runtime system aligns all JavaThread* pointers to 85 // a very large value (currently 128 bytes (32bVM) or 256 bytes (64bVM)) 86 // to make room for the age bits & the epoch bits (used in support of 87 // biased locking), and for the CMS "freeness" bit in the 64bVM (+COOPs). 88 // 89 // [JavaThread* | epoch | age | 1 | 01] lock is biased toward given thread 90 // [0 | epoch | age | 1 | 01] lock is anonymously biased 91 // 92 // - the two lock bits are used to describe three states: locked/unlocked and monitor. 93 // 94 // [ptr | 00] locked ptr points to real header on stack 95 // [header | 0 | 01] unlocked regular object header 96 // [ptr | 10] monitor inflated lock (header is wapped out) 97 // [ptr | 11] marked used by markSweep to mark an object 98 // not valid at any other time 99 // 100 // We assume that stack/thread pointers have the lowest two bits cleared. 101 // 102 // Always locked: since displaced and monitor references require memory at a 103 // fixed address, and hash code can be displaced, an efficiently providing a 104 // *permanent lock* leaves us with specializing the biased pattern (even when 105 // biased locking isn't enabled). Since biased_lock_alignment for the thread 106 // reference doesn't use the lowest bit ("2 << thread_shift"), we can use 107 // this illegal thread pointer alignment to denote "always locked" pattern. 108 // 109 // [ <unused> | larval |1| epoch | age | 1 | 01] permanently locked 110 // 111 // A private buffered value is always locked and can be in a larval state. 112 // 113 114 class BasicLock; 115 class ObjectMonitor; 116 class JavaThread; 117 118 class markOopDesc: public oopDesc { 119 private: 120 // Conversion 121 uintptr_t value() const { return (uintptr_t) this; } 122 123 public: 124 // Constants 125 enum { age_bits = 4, 126 lock_bits = 2, 127 biased_lock_bits = 1, 128 max_hash_bits = BitsPerWord - age_bits - lock_bits - biased_lock_bits, 129 hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits, 130 cms_bits = LP64_ONLY(1) NOT_LP64(0), 131 epoch_bits = 2, 132 always_locked_bits = 1, 133 larval_bits = 1 134 }; 135 136 // The biased locking code currently requires that the age bits be 137 // contiguous to the lock bits. 138 enum { lock_shift = 0, 139 biased_lock_shift = lock_bits, 140 age_shift = lock_bits + biased_lock_bits, 141 cms_shift = age_shift + age_bits, 142 hash_shift = cms_shift + cms_bits, 143 epoch_shift = hash_shift, 144 thread_shift = epoch_shift + epoch_bits, 145 larval_shift = thread_shift + always_locked_bits 146 }; 147 148 enum { lock_mask = right_n_bits(lock_bits), 149 lock_mask_in_place = lock_mask << lock_shift, 150 biased_lock_mask = right_n_bits(lock_bits + biased_lock_bits), 151 biased_lock_mask_in_place= biased_lock_mask << lock_shift, 152 biased_lock_bit_in_place = 1 << biased_lock_shift, 153 age_mask = right_n_bits(age_bits), 154 age_mask_in_place = age_mask << age_shift, 155 epoch_mask = right_n_bits(epoch_bits), 156 epoch_mask_in_place = epoch_mask << epoch_shift, 157 cms_mask = right_n_bits(cms_bits), 158 cms_mask_in_place = cms_mask << cms_shift, 159 #ifndef _WIN64 160 hash_mask = right_n_bits(hash_bits), 161 hash_mask_in_place = (address_word)hash_mask << hash_shift, 162 #endif 163 larval_mask = right_n_bits(larval_bits), 164 larval_mask_in_place = larval_mask << larval_shift 165 }; 166 167 // Alignment of JavaThread pointers encoded in object header required by biased locking 168 enum { biased_lock_alignment = 2 << thread_shift 169 }; 170 171 #ifdef _WIN64 172 // These values are too big for Win64 173 const static uintptr_t hash_mask = right_n_bits(hash_bits); 174 const static uintptr_t hash_mask_in_place = 175 (address_word)hash_mask << hash_shift; 176 #endif 177 178 enum { locked_value = 0, 179 unlocked_value = 1, 180 monitor_value = 2, 181 marked_value = 3, 182 biased_lock_pattern = 5, 183 always_locked_pattern = 1 << thread_shift | biased_lock_pattern 184 }; 185 186 enum { no_hash = 0 }; // no hash value assigned 187 188 enum { no_hash_in_place = (address_word)no_hash << hash_shift, 189 no_lock_in_place = unlocked_value 190 }; 191 192 enum { max_age = age_mask }; 193 194 enum { max_bias_epoch = epoch_mask }; 195 196 enum { larval_state_pattern = (1 << larval_shift) }; 197 198 static markOop always_locked_prototype() { 199 return markOop(always_locked_pattern); 200 } 201 202 bool is_always_locked() const { return mask_bits(value(), always_locked_pattern) == always_locked_pattern; } 203 204 // Biased Locking accessors. 205 // These must be checked by all code which calls into the 206 // ObjectSynchronizer and other code. The biasing is not understood 207 // by the lower-level CAS-based locking code, although the runtime 208 // fixes up biased locks to be compatible with it when a bias is 209 // revoked. 210 bool has_bias_pattern() const { 211 return (mask_bits(value(), biased_lock_mask_in_place) == biased_lock_pattern); 212 } 213 JavaThread* biased_locker() const { 214 assert(has_bias_pattern(), "should not call this otherwise"); 215 assert(!is_always_locked(), "invariant"); 216 return (JavaThread*) ((intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place)))); 217 } 218 // Indicates that the mark has the bias bit set but that it has not 219 // yet been biased toward a particular thread 220 bool is_biased_anonymously() const { 221 return (has_bias_pattern() && (biased_locker() == NULL)); 222 } 223 // Indicates epoch in which this bias was acquired. If the epoch 224 // changes due to too many bias revocations occurring, the biases 225 // from the previous epochs are all considered invalid. 226 int bias_epoch() const { 227 assert(has_bias_pattern(), "should not call this otherwise"); 228 return (mask_bits(value(), epoch_mask_in_place) >> epoch_shift); 229 } 230 markOop set_bias_epoch(int epoch) { 231 assert(has_bias_pattern(), "should not call this otherwise"); 232 assert((epoch & (~epoch_mask)) == 0, "epoch overflow"); 233 assert(!is_always_locked(), "Rebias needs to fail"); 234 return markOop(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift)); 235 } 236 markOop incr_bias_epoch() { 237 return set_bias_epoch((1 + bias_epoch()) & epoch_mask); 238 } 239 // Prototype mark for initialization 240 static markOop biased_locking_prototype() { 241 return markOop( biased_lock_pattern ); 242 } 243 244 // lock accessors (note that these assume lock_shift == 0) 245 bool is_locked() const { 246 return (mask_bits(value(), lock_mask_in_place) != unlocked_value); 247 } 248 bool is_unlocked() const { 249 return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); 250 } 251 bool is_marked() const { 252 return (mask_bits(value(), lock_mask_in_place) == marked_value); 253 } 254 bool is_neutral() const { return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); } 255 256 // Special temporary state of the markOop while being inflated. 257 // Code that looks at mark outside a lock need to take this into account. 258 bool is_being_inflated() const { return (value() == 0); } 259 260 // Distinguished markword value - used when inflating over 261 // an existing stacklock. 0 indicates the markword is "BUSY". 262 // Lockword mutators that use a LD...CAS idiom should always 263 // check for and avoid overwriting a 0 value installed by some 264 // other thread. (They should spin or block instead. The 0 value 265 // is transient and *should* be short-lived). 266 static markOop INFLATING() { return (markOop) 0; } // inflate-in-progress 267 268 // Should this header be preserved during GC? 269 inline bool must_be_preserved(oop obj_containing_mark) const; 270 inline bool must_be_preserved_with_bias(oop obj_containing_mark) const; 271 272 // Should this header (including its age bits) be preserved in the 273 // case of a promotion failure during scavenge? 274 // Note that we special case this situation. We want to avoid 275 // calling BiasedLocking::preserve_marks()/restore_marks() (which 276 // decrease the number of mark words that need to be preserved 277 // during GC) during each scavenge. During scavenges in which there 278 // is no promotion failure, we actually don't need to call the above 279 // routines at all, since we don't mutate and re-initialize the 280 // marks of promoted objects using init_mark(). However, during 281 // scavenges which result in promotion failure, we do re-initialize 282 // the mark words of objects, meaning that we should have called 283 // these mark word preservation routines. Currently there's no good 284 // place in which to call them in any of the scavengers (although 285 // guarded by appropriate locks we could make one), but the 286 // observation is that promotion failures are quite rare and 287 // reducing the number of mark words preserved during them isn't a 288 // high priority. 289 inline bool must_be_preserved_for_promotion_failure(oop obj_containing_mark) const; 290 inline bool must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const; 291 292 // Should this header be preserved during a scavenge where CMS is 293 // the old generation? 294 // (This is basically the same body as must_be_preserved_for_promotion_failure(), 295 // but takes the Klass* as argument instead) 296 inline bool must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const; 297 inline bool must_be_preserved_with_bias_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const; 298 299 // WARNING: The following routines are used EXCLUSIVELY by 300 // synchronization functions. They are not really gc safe. 301 // They must get updated if markOop layout get changed. 302 markOop set_unlocked() const { 303 return markOop(value() | unlocked_value); 304 } 305 bool has_locker() const { 306 return ((value() & lock_mask_in_place) == locked_value); 307 } 308 BasicLock* locker() const { 309 assert(has_locker(), "check"); 310 return (BasicLock*) value(); 311 } 312 bool has_monitor() const { 313 return ((value() & monitor_value) != 0); 314 } 315 ObjectMonitor* monitor() const { 316 assert(has_monitor(), "check"); 317 // Use xor instead of &~ to provide one extra tag-bit check. 318 return (ObjectMonitor*) (value() ^ monitor_value); 319 } 320 bool has_displaced_mark_helper() const { 321 return ((value() & unlocked_value) == 0); 322 } 323 markOop displaced_mark_helper() const { 324 assert(has_displaced_mark_helper(), "check"); 325 intptr_t ptr = (value() & ~monitor_value); 326 return *(markOop*)ptr; 327 } 328 void set_displaced_mark_helper(markOop m) const { 329 assert(has_displaced_mark_helper(), "check"); 330 intptr_t ptr = (value() & ~monitor_value); 331 *(markOop*)ptr = m; 332 } 333 markOop copy_set_hash(intptr_t hash) const { 334 intptr_t tmp = value() & (~hash_mask_in_place); 335 tmp |= ((hash & hash_mask) << hash_shift); 336 return (markOop)tmp; 337 } 338 // it is only used to be stored into BasicLock as the 339 // indicator that the lock is using heavyweight monitor 340 static markOop unused_mark() { 341 return (markOop) marked_value; 342 } 343 // the following two functions create the markOop to be 344 // stored into object header, it encodes monitor info 345 static markOop encode(BasicLock* lock) { 346 return (markOop) lock; 347 } 348 static markOop encode(ObjectMonitor* monitor) { 349 intptr_t tmp = (intptr_t) monitor; 350 return (markOop) (tmp | monitor_value); 351 } 352 static markOop encode(JavaThread* thread, uint age, int bias_epoch) { 353 intptr_t tmp = (intptr_t) thread; 354 assert(UseBiasedLocking && ((tmp & (epoch_mask_in_place | age_mask_in_place | biased_lock_mask_in_place)) == 0), "misaligned JavaThread pointer"); 355 assert(age <= max_age, "age too large"); 356 assert(bias_epoch <= max_bias_epoch, "bias epoch too large"); 357 return (markOop) (tmp | (bias_epoch << epoch_shift) | (age << age_shift) | biased_lock_pattern); 358 } 359 360 // used to encode pointers during GC 361 markOop clear_lock_bits() { return markOop(value() & ~lock_mask_in_place); } 362 363 // age operations 364 markOop set_marked() { return markOop((value() & ~lock_mask_in_place) | marked_value); } 365 markOop set_unmarked() { return markOop((value() & ~lock_mask_in_place) | unlocked_value); } 366 367 uint age() const { return mask_bits(value() >> age_shift, age_mask); } 368 markOop set_age(uint v) const { 369 assert((v & ~age_mask) == 0, "shouldn't overflow age field"); 370 return markOop((value() & ~age_mask_in_place) | (((uintptr_t)v & age_mask) << age_shift)); 371 } 372 markOop incr_age() const { return age() == max_age ? markOop(this) : set_age(age() + 1); } 373 374 // hash operations 375 intptr_t hash() const { 376 return mask_bits(value() >> hash_shift, hash_mask); 377 } 378 379 bool has_no_hash() const { 380 return hash() == no_hash; 381 } 382 383 // private buffered value operations 384 markOop enter_larval_state() const { 385 return markOop((value() & ~larval_mask_in_place) | larval_state_pattern); 386 } 387 markOop exit_larval_state() const { 388 return markOop(value() & ~larval_mask_in_place); 389 } 390 bool is_larval_state() const { 391 return (value() & larval_mask_in_place) == larval_state_pattern; 392 } 393 394 // Prototype mark for initialization 395 static markOop prototype() { 396 return markOop( no_hash_in_place | no_lock_in_place ); 397 } 398 399 // Helper function for restoration of unmarked mark oops during GC 400 static inline markOop prototype_for_object(oop obj); 401 402 // Debugging 403 void print_on(outputStream* st) const; 404 405 // Prepare address of oop for placement into mark 406 inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); } 407 408 // Recover address of oop from encoded form used in mark 409 inline void* decode_pointer() { if (has_bias_pattern()) return NULL; return clear_lock_bits(); } 410 411 // These markOops indicate cms free chunk blocks and not objects. 412 // In 64 bit, the markOop is set to distinguish them from oops. 413 // These are defined in 32 bit mode for vmStructs. 414 const static uintptr_t cms_free_chunk_pattern = 0x1; 415 416 // Constants for the size field. 417 enum { size_shift = cms_shift + cms_bits, 418 size_bits = 35 // need for compressed oops 32G 419 }; 420 // These values are too big for Win64 421 const static uintptr_t size_mask = LP64_ONLY(right_n_bits(size_bits)) 422 NOT_LP64(0); 423 const static uintptr_t size_mask_in_place = 424 (address_word)size_mask << size_shift; 425 426 #ifdef _LP64 427 static markOop cms_free_prototype() { 428 return markOop(((intptr_t)prototype() & ~cms_mask_in_place) | 429 ((cms_free_chunk_pattern & cms_mask) << cms_shift)); 430 } 431 uintptr_t cms_encoding() const { 432 return mask_bits(value() >> cms_shift, cms_mask); 433 } 434 bool is_cms_free_chunk() const { 435 return is_neutral() && 436 (cms_encoding() & cms_free_chunk_pattern) == cms_free_chunk_pattern; 437 } 438 439 size_t get_size() const { return (size_t)(value() >> size_shift); } 440 static markOop set_size_and_free(size_t size) { 441 assert((size & ~size_mask) == 0, "shouldn't overflow size field"); 442 return markOop(((intptr_t)cms_free_prototype() & ~size_mask_in_place) | 443 (((intptr_t)size & size_mask) << size_shift)); 444 } 445 #endif // _LP64 446 }; 447 448 #endif // SHARE_VM_OOPS_MARKOOP_HPP