1 /* 2 * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/altHashing.hpp" 27 #include "classfile/javaClasses.inline.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 30 #include "gc/g1/g1StringDedup.hpp" 31 #include "gc/g1/g1StringDedupTable.hpp" 32 #include "gc/shared/gcLocker.hpp" 33 #include "logging/log.hpp" 34 #include "memory/padded.inline.hpp" 35 #include "memory/universe.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "oops/typeArrayOop.hpp" 38 #include "runtime/mutexLocker.hpp" 39 40 // 41 // List of deduplication table entries. Links table 42 // entries together using their _next fields. 43 // 44 class G1StringDedupEntryList : public CHeapObj<mtGC> { 45 private: 46 G1StringDedupEntry* _list; 47 size_t _length; 48 49 public: 50 G1StringDedupEntryList() : 51 _list(NULL), 52 _length(0) { 53 } 54 55 void add(G1StringDedupEntry* entry) { 56 entry->set_next(_list); 57 _list = entry; 58 _length++; 59 } 60 61 G1StringDedupEntry* remove() { 62 G1StringDedupEntry* entry = _list; 63 if (entry != NULL) { 64 _list = entry->next(); 65 _length--; 66 } 67 return entry; 68 } 69 70 G1StringDedupEntry* remove_all() { 71 G1StringDedupEntry* list = _list; 72 _list = NULL; 73 return list; 74 } 75 76 size_t length() { 77 return _length; 78 } 79 }; 80 81 // 82 // Cache of deduplication table entries. This cache provides fast allocation and 83 // reuse of table entries to lower the pressure on the underlying allocator. 84 // But more importantly, it provides fast/deferred freeing of table entries. This 85 // is important because freeing of table entries is done during stop-the-world 86 // phases and it is not uncommon for large number of entries to be freed at once. 87 // Tables entries that are freed during these phases are placed onto a freelist in 88 // the cache. The deduplication thread, which executes in a concurrent phase, will 89 // later reuse or free the underlying memory for these entries. 90 // 91 // The cache allows for single-threaded allocations and multi-threaded frees. 92 // Allocations are synchronized by StringDedupTable_lock as part of a table 93 // modification. 94 // 95 class G1StringDedupEntryCache : public CHeapObj<mtGC> { 96 private: 97 // One cache/overflow list per GC worker to allow lock less freeing of 98 // entries while doing a parallel scan of the table. Using PaddedEnd to 99 // avoid false sharing. 100 size_t _nlists; 101 size_t _max_list_length; 102 PaddedEnd<G1StringDedupEntryList>* _cached; 103 PaddedEnd<G1StringDedupEntryList>* _overflowed; 104 105 public: 106 G1StringDedupEntryCache(size_t max_size); 107 ~G1StringDedupEntryCache(); 108 109 // Set max number of table entries to cache. 110 void set_max_size(size_t max_size); 111 112 // Get a table entry from the cache, or allocate a new entry if the cache is empty. 113 G1StringDedupEntry* alloc(); 114 115 // Insert a table entry into the cache. 116 void free(G1StringDedupEntry* entry, uint worker_id); 117 118 // Returns current number of entries in the cache. 119 size_t size(); 120 121 // Deletes overflowed entries. 122 void delete_overflowed(); 123 }; 124 125 G1StringDedupEntryCache::G1StringDedupEntryCache(size_t max_size) : 126 _nlists(ParallelGCThreads), 127 _max_list_length(0), 128 _cached(PaddedArray<G1StringDedupEntryList, mtGC>::create_unfreeable((uint)_nlists)), 129 _overflowed(PaddedArray<G1StringDedupEntryList, mtGC>::create_unfreeable((uint)_nlists)) { 130 set_max_size(max_size); 131 } 132 133 G1StringDedupEntryCache::~G1StringDedupEntryCache() { 134 ShouldNotReachHere(); 135 } 136 137 void G1StringDedupEntryCache::set_max_size(size_t size) { 138 _max_list_length = size / _nlists; 139 } 140 141 G1StringDedupEntry* G1StringDedupEntryCache::alloc() { 142 for (size_t i = 0; i < _nlists; i++) { 143 G1StringDedupEntry* entry = _cached[i].remove(); 144 if (entry != NULL) { 145 return entry; 146 } 147 } 148 return new G1StringDedupEntry(); 149 } 150 151 void G1StringDedupEntryCache::free(G1StringDedupEntry* entry, uint worker_id) { 152 assert(entry->obj() != NULL, "Double free"); 153 assert(worker_id < _nlists, "Invalid worker id"); 154 155 entry->set_obj(NULL); 156 entry->set_hash(0); 157 158 if (_cached[worker_id].length() < _max_list_length) { 159 // Cache is not full 160 _cached[worker_id].add(entry); 161 } else { 162 // Cache is full, add to overflow list for later deletion 163 _overflowed[worker_id].add(entry); 164 } 165 } 166 167 size_t G1StringDedupEntryCache::size() { 168 size_t size = 0; 169 for (size_t i = 0; i < _nlists; i++) { 170 size += _cached[i].length(); 171 } 172 return size; 173 } 174 175 void G1StringDedupEntryCache::delete_overflowed() { 176 double start = os::elapsedTime(); 177 uintx count = 0; 178 179 for (size_t i = 0; i < _nlists; i++) { 180 G1StringDedupEntry* entry; 181 182 { 183 // The overflow list can be modified during safepoints, therefore 184 // we temporarily join the suspendible thread set while removing 185 // all entries from the list. 186 SuspendibleThreadSetJoiner sts_join; 187 entry = _overflowed[i].remove_all(); 188 } 189 190 // Delete all entries 191 while (entry != NULL) { 192 G1StringDedupEntry* next = entry->next(); 193 delete entry; 194 entry = next; 195 count++; 196 } 197 } 198 199 double end = os::elapsedTime(); 200 log_trace(gc, stringdedup)("Deleted " UINTX_FORMAT " entries, " G1_STRDEDUP_TIME_FORMAT_MS, 201 count, G1_STRDEDUP_TIME_PARAM_MS(end - start)); 202 } 203 204 G1StringDedupTable* G1StringDedupTable::_table = NULL; 205 G1StringDedupEntryCache* G1StringDedupTable::_entry_cache = NULL; 206 207 const size_t G1StringDedupTable::_min_size = (1 << 10); // 1024 208 const size_t G1StringDedupTable::_max_size = (1 << 24); // 16777216 209 const double G1StringDedupTable::_grow_load_factor = 2.0; // Grow table at 200% load 210 const double G1StringDedupTable::_shrink_load_factor = _grow_load_factor / 3.0; // Shrink table at 67% load 211 const double G1StringDedupTable::_max_cache_factor = 0.1; // Cache a maximum of 10% of the table size 212 const uintx G1StringDedupTable::_rehash_multiple = 60; // Hash bucket has 60 times more collisions than expected 213 const uintx G1StringDedupTable::_rehash_threshold = (uintx)(_rehash_multiple * _grow_load_factor); 214 215 uintx G1StringDedupTable::_entries_added = 0; 216 uintx G1StringDedupTable::_entries_removed = 0; 217 uintx G1StringDedupTable::_resize_count = 0; 218 uintx G1StringDedupTable::_rehash_count = 0; 219 220 G1StringDedupTable::G1StringDedupTable(size_t size, jint hash_seed) : 221 _size(size), 222 _entries(0), 223 _grow_threshold((uintx)(size * _grow_load_factor)), 224 _shrink_threshold((uintx)(size * _shrink_load_factor)), 225 _rehash_needed(false), 226 _hash_seed(hash_seed) { 227 assert(is_power_of_2(size), "Table size must be a power of 2"); 228 _buckets = NEW_C_HEAP_ARRAY(G1StringDedupEntry*, _size, mtGC); 229 memset(_buckets, 0, _size * sizeof(G1StringDedupEntry*)); 230 } 231 232 G1StringDedupTable::~G1StringDedupTable() { 233 FREE_C_HEAP_ARRAY(G1StringDedupEntry*, _buckets); 234 } 235 236 void G1StringDedupTable::create() { 237 assert(_table == NULL, "One string deduplication table allowed"); 238 _entry_cache = new G1StringDedupEntryCache(_min_size * _max_cache_factor); 239 _table = new G1StringDedupTable(_min_size); 240 } 241 242 void G1StringDedupTable::add(typeArrayOop value, bool latin1, unsigned int hash, G1StringDedupEntry** list) { 243 G1StringDedupEntry* entry = _entry_cache->alloc(); 244 entry->set_obj(value); 245 entry->set_hash(hash); 246 entry->set_latin1(latin1); 247 entry->set_next(*list); 248 *list = entry; 249 _entries++; 250 } 251 252 void G1StringDedupTable::remove(G1StringDedupEntry** pentry, uint worker_id) { 253 G1StringDedupEntry* entry = *pentry; 254 *pentry = entry->next(); 255 _entry_cache->free(entry, worker_id); 256 } 257 258 void G1StringDedupTable::transfer(G1StringDedupEntry** pentry, G1StringDedupTable* dest) { 259 G1StringDedupEntry* entry = *pentry; 260 *pentry = entry->next(); 261 unsigned int hash = entry->hash(); 262 size_t index = dest->hash_to_index(hash); 263 G1StringDedupEntry** list = dest->bucket(index); 264 entry->set_next(*list); 265 *list = entry; 266 } 267 268 bool G1StringDedupTable::equals(typeArrayOop value1, typeArrayOop value2) { 269 return (oopDesc::equals(value1, value2) || 270 (value1->length() == value2->length() && 271 (!memcmp(value1->base(T_BYTE), 272 value2->base(T_BYTE), 273 value1->length() * sizeof(jbyte))))); 274 } 275 276 typeArrayOop G1StringDedupTable::lookup(typeArrayOop value, bool latin1, unsigned int hash, 277 G1StringDedupEntry** list, uintx &count) { 278 for (G1StringDedupEntry* entry = *list; entry != NULL; entry = entry->next()) { 279 if (entry->hash() == hash && entry->latin1() == latin1) { 280 typeArrayOop existing_value = entry->obj(); 281 if (equals(value, existing_value)) { 282 // Match found 283 return existing_value; 284 } 285 } 286 count++; 287 } 288 289 // Not found 290 return NULL; 291 } 292 293 typeArrayOop G1StringDedupTable::lookup_or_add_inner(typeArrayOop value, bool latin1, unsigned int hash) { 294 size_t index = hash_to_index(hash); 295 G1StringDedupEntry** list = bucket(index); 296 uintx count = 0; 297 298 // Lookup in list 299 typeArrayOop existing_value = lookup(value, latin1, hash, list, count); 300 301 // Check if rehash is needed 302 if (count > _rehash_threshold) { 303 _rehash_needed = true; 304 } 305 306 if (existing_value == NULL) { 307 // Not found, add new entry 308 add(value, latin1, hash, list); 309 310 // Update statistics 311 _entries_added++; 312 } 313 314 return existing_value; 315 } 316 317 unsigned int G1StringDedupTable::hash_code(typeArrayOop value, bool latin1) { 318 unsigned int hash; 319 int length = value->length(); 320 if (latin1) { 321 const jbyte* data = (jbyte*)value->base(T_BYTE); 322 if (use_java_hash()) { 323 hash = java_lang_String::hash_code(data, length); 324 } else { 325 hash = AltHashing::murmur3_32(_table->_hash_seed, data, length); 326 } 327 } else { 328 length /= sizeof(jchar) / sizeof(jbyte); // Convert number of bytes to number of chars 329 const jchar* data = (jchar*)value->base(T_CHAR); 330 if (use_java_hash()) { 331 hash = java_lang_String::hash_code(data, length); 332 } else { 333 hash = AltHashing::murmur3_32(_table->_hash_seed, data, length); 334 } 335 } 336 337 return hash; 338 } 339 340 void G1StringDedupTable::deduplicate(oop java_string, G1StringDedupStat& stat) { 341 assert(java_lang_String::is_instance(java_string), "Must be a string"); 342 NoSafepointVerifier nsv; 343 344 stat.inc_inspected(); 345 346 typeArrayOop value = java_lang_String::value(java_string); 347 if (value == NULL) { 348 // String has no value 349 stat.inc_skipped(); 350 return; 351 } 352 353 bool latin1 = java_lang_String::is_latin1(java_string); 354 unsigned int hash = 0; 355 356 if (use_java_hash()) { 357 // Get hash code from cache 358 hash = java_lang_String::hash(java_string); 359 } 360 361 if (hash == 0) { 362 // Compute hash 363 hash = hash_code(value, latin1); 364 stat.inc_hashed(); 365 366 if (use_java_hash() && hash != 0) { 367 // Store hash code in cache 368 java_lang_String::set_hash(java_string, hash); 369 } 370 } 371 372 typeArrayOop existing_value = lookup_or_add(value, latin1, hash); 373 if (oopDesc::equals(existing_value, value)) { 374 // Same value, already known 375 stat.inc_known(); 376 return; 377 } 378 379 // Get size of value array 380 uintx size_in_bytes = value->size() * HeapWordSize; 381 stat.inc_new(size_in_bytes); 382 383 if (existing_value != NULL) { 384 // Enqueue the reference to make sure it is kept alive. Concurrent mark might 385 // otherwise declare it dead if there are no other strong references to this object. 386 G1SATBCardTableModRefBS::enqueue(existing_value); 387 388 // Existing value found, deduplicate string 389 java_lang_String::set_value(java_string, typeArrayOop(existing_value)); 390 if (UseG1GC) { 391 if (G1CollectedHeap::heap()->is_in_young(value)) { 392 stat.inc_deduped_young(size_in_bytes); 393 } else { 394 stat.inc_deduped_old(size_in_bytes); 395 } 396 } else { 397 stat.inc_deduped_young(size_in_bytes); 398 } 399 } 400 } 401 402 G1StringDedupTable* G1StringDedupTable::prepare_resize() { 403 size_t size = _table->_size; 404 405 // Check if the hashtable needs to be resized 406 if (_table->_entries > _table->_grow_threshold) { 407 // Grow table, double the size 408 size *= 2; 409 if (size > _max_size) { 410 // Too big, don't resize 411 return NULL; 412 } 413 } else if (_table->_entries < _table->_shrink_threshold) { 414 // Shrink table, half the size 415 size /= 2; 416 if (size < _min_size) { 417 // Too small, don't resize 418 return NULL; 419 } 420 } else if (StringDeduplicationResizeALot) { 421 // Force grow 422 size *= 2; 423 if (size > _max_size) { 424 // Too big, force shrink instead 425 size /= 4; 426 } 427 } else { 428 // Resize not needed 429 return NULL; 430 } 431 432 // Update statistics 433 _resize_count++; 434 435 // Update max cache size 436 _entry_cache->set_max_size(size * _max_cache_factor); 437 438 // Allocate the new table. The new table will be populated by workers 439 // calling unlink_or_oops_do() and finally installed by finish_resize(). 440 return new G1StringDedupTable(size, _table->_hash_seed); 441 } 442 443 void G1StringDedupTable::finish_resize(G1StringDedupTable* resized_table) { 444 assert(resized_table != NULL, "Invalid table"); 445 446 resized_table->_entries = _table->_entries; 447 448 // Free old table 449 delete _table; 450 451 // Install new table 452 _table = resized_table; 453 } 454 455 void G1StringDedupTable::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, uint worker_id) { 456 // The table is divided into partitions to allow lock-less parallel processing by 457 // multiple worker threads. A worker thread first claims a partition, which ensures 458 // exclusive access to that part of the table, then continues to process it. To allow 459 // shrinking of the table in parallel we also need to make sure that the same worker 460 // thread processes all partitions where entries will hash to the same destination 461 // partition. Since the table size is always a power of two and we always shrink by 462 // dividing the table in half, we know that for a given partition there is only one 463 // other partition whoes entries will hash to the same destination partition. That 464 // other partition is always the sibling partition in the second half of the table. 465 // For example, if the table is divided into 8 partitions, the sibling of partition 0 466 // is partition 4, the sibling of partition 1 is partition 5, etc. 467 size_t table_half = _table->_size / 2; 468 469 // Let each partition be one page worth of buckets 470 size_t partition_size = MIN2(table_half, os::vm_page_size() / sizeof(G1StringDedupEntry*)); 471 assert(table_half % partition_size == 0, "Invalid partition size"); 472 473 // Number of entries removed during the scan 474 uintx removed = 0; 475 476 for (;;) { 477 // Grab next partition to scan 478 size_t partition_begin = cl->claim_table_partition(partition_size); 479 size_t partition_end = partition_begin + partition_size; 480 if (partition_begin >= table_half) { 481 // End of table 482 break; 483 } 484 485 // Scan the partition followed by the sibling partition in the second half of the table 486 removed += unlink_or_oops_do(cl, partition_begin, partition_end, worker_id); 487 removed += unlink_or_oops_do(cl, table_half + partition_begin, table_half + partition_end, worker_id); 488 } 489 490 // Delayed update to avoid contention on the table lock 491 if (removed > 0) { 492 MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag); 493 _table->_entries -= removed; 494 _entries_removed += removed; 495 } 496 } 497 498 uintx G1StringDedupTable::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, 499 size_t partition_begin, 500 size_t partition_end, 501 uint worker_id) { 502 uintx removed = 0; 503 for (size_t bucket = partition_begin; bucket < partition_end; bucket++) { 504 G1StringDedupEntry** entry = _table->bucket(bucket); 505 while (*entry != NULL) { 506 oop* p = (oop*)(*entry)->obj_addr(); 507 if (cl->is_alive(*p)) { 508 cl->keep_alive(p); 509 if (cl->is_resizing()) { 510 // We are resizing the table, transfer entry to the new table 511 _table->transfer(entry, cl->resized_table()); 512 } else { 513 if (cl->is_rehashing()) { 514 // We are rehashing the table, rehash the entry but keep it 515 // in the table. We can't transfer entries into the new table 516 // at this point since we don't have exclusive access to all 517 // destination partitions. finish_rehash() will do a single 518 // threaded transfer of all entries. 519 typeArrayOop value = (typeArrayOop)*p; 520 bool latin1 = (*entry)->latin1(); 521 unsigned int hash = hash_code(value, latin1); 522 (*entry)->set_hash(hash); 523 } 524 525 // Move to next entry 526 entry = (*entry)->next_addr(); 527 } 528 } else { 529 // Not alive, remove entry from table 530 _table->remove(entry, worker_id); 531 removed++; 532 } 533 } 534 } 535 536 return removed; 537 } 538 539 G1StringDedupTable* G1StringDedupTable::prepare_rehash() { 540 if (!_table->_rehash_needed && !StringDeduplicationRehashALot) { 541 // Rehash not needed 542 return NULL; 543 } 544 545 // Update statistics 546 _rehash_count++; 547 548 // Compute new hash seed 549 _table->_hash_seed = AltHashing::compute_seed(); 550 551 // Allocate the new table, same size and hash seed 552 return new G1StringDedupTable(_table->_size, _table->_hash_seed); 553 } 554 555 void G1StringDedupTable::finish_rehash(G1StringDedupTable* rehashed_table) { 556 assert(rehashed_table != NULL, "Invalid table"); 557 558 // Move all newly rehashed entries into the correct buckets in the new table 559 for (size_t bucket = 0; bucket < _table->_size; bucket++) { 560 G1StringDedupEntry** entry = _table->bucket(bucket); 561 while (*entry != NULL) { 562 _table->transfer(entry, rehashed_table); 563 } 564 } 565 566 rehashed_table->_entries = _table->_entries; 567 568 // Free old table 569 delete _table; 570 571 // Install new table 572 _table = rehashed_table; 573 } 574 575 void G1StringDedupTable::verify() { 576 for (size_t bucket = 0; bucket < _table->_size; bucket++) { 577 // Verify entries 578 G1StringDedupEntry** entry = _table->bucket(bucket); 579 while (*entry != NULL) { 580 typeArrayOop value = (*entry)->obj(); 581 guarantee(value != NULL, "Object must not be NULL"); 582 guarantee(Universe::heap()->is_in_reserved(value), "Object must be on the heap"); 583 guarantee(!value->is_forwarded(), "Object must not be forwarded"); 584 guarantee(value->is_typeArray(), "Object must be a typeArrayOop"); 585 bool latin1 = (*entry)->latin1(); 586 unsigned int hash = hash_code(value, latin1); 587 guarantee((*entry)->hash() == hash, "Table entry has inorrect hash"); 588 guarantee(_table->hash_to_index(hash) == bucket, "Table entry has incorrect index"); 589 entry = (*entry)->next_addr(); 590 } 591 592 // Verify that we do not have entries with identical oops or identical arrays. 593 // We only need to compare entries in the same bucket. If the same oop or an 594 // identical array has been inserted more than once into different/incorrect 595 // buckets the verification step above will catch that. 596 G1StringDedupEntry** entry1 = _table->bucket(bucket); 597 while (*entry1 != NULL) { 598 typeArrayOop value1 = (*entry1)->obj(); 599 bool latin1_1 = (*entry1)->latin1(); 600 G1StringDedupEntry** entry2 = (*entry1)->next_addr(); 601 while (*entry2 != NULL) { 602 typeArrayOop value2 = (*entry2)->obj(); 603 bool latin1_2 = (*entry2)->latin1(); 604 guarantee(latin1_1 != latin1_2 || !equals(value1, value2), "Table entries must not have identical arrays"); 605 entry2 = (*entry2)->next_addr(); 606 } 607 entry1 = (*entry1)->next_addr(); 608 } 609 } 610 } 611 612 void G1StringDedupTable::clean_entry_cache() { 613 _entry_cache->delete_overflowed(); 614 } 615 616 void G1StringDedupTable::print_statistics() { 617 Log(gc, stringdedup) log; 618 log.debug(" Table"); 619 log.debug(" Memory Usage: " G1_STRDEDUP_BYTES_FORMAT_NS, 620 G1_STRDEDUP_BYTES_PARAM(_table->_size * sizeof(G1StringDedupEntry*) + (_table->_entries + _entry_cache->size()) * sizeof(G1StringDedupEntry))); 621 log.debug(" Size: " SIZE_FORMAT ", Min: " SIZE_FORMAT ", Max: " SIZE_FORMAT, _table->_size, _min_size, _max_size); 622 log.debug(" Entries: " UINTX_FORMAT ", Load: " G1_STRDEDUP_PERCENT_FORMAT_NS ", Cached: " UINTX_FORMAT ", Added: " UINTX_FORMAT ", Removed: " UINTX_FORMAT, 623 _table->_entries, (double)_table->_entries / (double)_table->_size * 100.0, _entry_cache->size(), _entries_added, _entries_removed); 624 log.debug(" Resize Count: " UINTX_FORMAT ", Shrink Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS "), Grow Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS ")", 625 _resize_count, _table->_shrink_threshold, _shrink_load_factor * 100.0, _table->_grow_threshold, _grow_load_factor * 100.0); 626 log.debug(" Rehash Count: " UINTX_FORMAT ", Rehash Threshold: " UINTX_FORMAT ", Hash Seed: 0x%x", _rehash_count, _rehash_threshold, _table->_hash_seed); 627 log.debug(" Age Threshold: " UINTX_FORMAT, StringDeduplicationAgeThreshold); 628 }