--- old/src/hotspot/share/classfile/classFileParser.cpp 2020-02-21 02:14:22.118544158 -0800 +++ new/src/hotspot/share/classfile/classFileParser.cpp 2020-02-21 02:14:21.685908507 -0800 @@ -83,6 +83,7 @@ #include "utilities/macros.hpp" #include "utilities/ostream.hpp" #include "utilities/resourceHash.hpp" +#include "utilities/stringUtils.hpp" #include "utilities/utf8.hpp" #if INCLUDE_CDS @@ -943,10 +944,17 @@ } // Side-effects: populates the _local_interfaces field -void ClassFileParser::parse_interfaces(const ClassFileStream* const stream, - const int itfs_len, - ConstantPool* const cp, +void ClassFileParser::parse_interfaces(const ClassFileStream* stream, + int itfs_len, + ConstantPool* cp, bool* const has_nonstatic_concrete_methods, + // FIXME: lots of these functions + // declare their parameters as const, + // which adds only noise to the code. + // Remove the spurious const modifiers. + // Many are of the form "const int x" + // or "T* const x". + bool* const is_declared_atomic, TRAPS) { assert(stream != NULL, "invariant"); assert(cp != NULL, "invariant"); @@ -994,10 +1002,14 @@ interf->class_in_module_of_loader())); } - if (InstanceKlass::cast(interf)->has_nonstatic_concrete_methods()) { + InstanceKlass* ik = InstanceKlass::cast(interf); + if (ik->has_nonstatic_concrete_methods()) { *has_nonstatic_concrete_methods = true; } - _local_interfaces->at_put(index, InstanceKlass::cast(interf)); + if (ik->is_declared_atomic()) { + *is_declared_atomic = true; + } + _local_interfaces->at_put(index, ik); } if (!_need_verify || itfs_len <= 1) { @@ -4346,6 +4358,7 @@ Klass** nonstatic_value_type_klasses = NULL; unsigned int value_type_oop_map_count = 0; int not_flattened_value_types = 0; + int not_atomic_value_types = 0; int max_nonstatic_value_type = fac->count[NONSTATIC_FLATTENABLE] + 1; @@ -4380,7 +4393,16 @@ } ValueKlass* vk = ValueKlass::cast(klass); // Conditions to apply flattening or not should be defined in a single place - if ((ValueFieldMaxFlatSize < 0) || (vk->size_helper() * HeapWordSize) <= ValueFieldMaxFlatSize) { + bool too_big_to_flatten = (ValueFieldMaxFlatSize >= 0 && + (vk->size_helper() * HeapWordSize) > ValueFieldMaxFlatSize); + bool too_atomic_to_flatten = vk->is_declared_atomic(); + bool too_volatile_to_flatten = fs.access_flags().is_volatile(); + if (vk->is_naturally_atomic()) { + too_atomic_to_flatten = false; + //too_volatile_to_flatten = false; //FIXME + // volatile fields are currently never flattened, this could change in the future + } + if (!(too_big_to_flatten | too_atomic_to_flatten | too_volatile_to_flatten)) { nonstatic_value_type_indexes[nonstatic_value_type_count] = fs.index(); nonstatic_value_type_klasses[nonstatic_value_type_count] = klass; nonstatic_value_type_count++; @@ -4390,6 +4412,9 @@ value_type_oop_map_count += vklass->nonstatic_oop_map_count(); } fs.set_flattened(true); + if (!vk->is_atomic()) { // flat and non-atomic: take note + not_atomic_value_types++; + } } else { not_flattened_value_types++; fs.set_flattened(false); @@ -4848,6 +4873,19 @@ info->_static_field_size = static_field_size; info->_nonstatic_field_size = nonstatic_field_size; info->_has_nonstatic_fields = has_nonstatic_fields; + + // A value type is naturally atomic if it has just one field, and + // that field is simple enough. + info->_is_naturally_atomic = (is_value_type() && + !super_has_nonstatic_fields && + (nonstatic_fields_count <= 1) && + (not_atomic_value_types == 0) && + (nonstatic_contended_count == 0)); + // This may be too restrictive, since if all the fields fit in 64 + // bits we could make the decision to align instances of this class + // to 64-bit boundaries, and load and store them as single words. + // And on machines which supported larger atomics we could similarly + // allow larger values to be atomic, if properly aligned. } void ClassFileParser::set_precomputed_flags(InstanceKlass* ik) { @@ -5983,6 +6021,7 @@ } } +// Called from a factory method in KlassFactory, not from this file. InstanceKlass* ClassFileParser::create_instance_klass(bool changed_by_loadhook, TRAPS) { if (_klass != NULL) { return _klass; @@ -6052,6 +6091,9 @@ // Not yet: supers are done below to support the new subtype-checking fields ik->set_nonstatic_field_size(_field_info->_nonstatic_field_size); ik->set_has_nonstatic_fields(_field_info->_has_nonstatic_fields); + if (_field_info->_is_naturally_atomic && ik->is_value()) { + ik->set_is_naturally_atomic(); + } if (_is_empty_value) { ik->set_is_empty_value(); } @@ -6101,6 +6143,9 @@ ik->set_major_version(_major_version); ik->set_has_nonstatic_concrete_methods(_has_nonstatic_concrete_methods); ik->set_declares_nonstatic_concrete_methods(_declares_nonstatic_concrete_methods); + if (_is_declared_atomic) { + ik->set_is_declared_atomic(); + } if (_unsafe_anonymous_host != NULL) { assert (ik->is_unsafe_anonymous(), "should be the same"); @@ -6433,6 +6478,8 @@ _has_contended_fields(false), _has_flattenable_fields(false), _is_empty_value(false), + _is_naturally_atomic(false), + _is_declared_atomic(false), _has_finalizer(false), _has_empty_finalizer(false), _has_vanilla_constructor(false), @@ -6772,6 +6819,7 @@ _itfs_len, cp, &_has_nonstatic_concrete_methods, + &_is_declared_atomic, CHECK); assert(_local_interfaces != NULL, "invariant"); @@ -6779,8 +6827,8 @@ // Fields (offsets are filled in later) _fac = new FieldAllocationCount(); parse_fields(stream, - _access_flags.is_interface(), - _access_flags.is_value_type(), + is_interface(), + is_value_type(), _fac, cp, cp_size, @@ -6792,8 +6840,8 @@ // Methods AccessFlags promoted_flags; parse_methods(stream, - _access_flags.is_interface(), - _access_flags.is_value_type(), + is_interface(), + is_value_type(), &promoted_flags, &_has_final_method, &_declares_nonstatic_concrete_methods, @@ -6842,7 +6890,7 @@ // We check super class after class file is parsed and format is checked if (_super_class_index > 0 && NULL ==_super_klass) { Symbol* const super_class_name = cp->klass_name_at(_super_class_index); - if (_access_flags.is_interface()) { + if (is_interface()) { // Before attempting to resolve the superclass, check for class format // errors not checked yet. guarantee_property(super_class_name == vmSymbols::java_lang_Object(), @@ -6863,6 +6911,9 @@ if (_super_klass->has_nonstatic_concrete_methods()) { _has_nonstatic_concrete_methods = true; } + if (_super_klass->is_declared_atomic()) { + _is_declared_atomic = true; + } if (_super_klass->is_interface()) { ResourceMark rm(THREAD); @@ -6889,6 +6940,18 @@ } } + if (_class_name == vmSymbols::java_lang_NonTearable() && _loader_data->class_loader() == NULL) { + // This is the original source of this condition. + // It propagates by inheritance, as if testing "instanceof NonTearable". + _is_declared_atomic = true; + } else if (*ForceNonTearable != '\0') { + // Allow a command line switch to force the same atomicity property: + const char* class_name_str = _class_name->as_C_string(); + if (StringUtils::class_list_match(ForceNonTearable, class_name_str)) { + _is_declared_atomic = true; + } + } + // Compute the transitive list of all unique interfaces implemented by this class _transitive_interfaces = compute_transitive_interfaces(_super_klass, @@ -6917,7 +6980,7 @@ CHECK); // Size of Java itable (in words) - _itable_size = _access_flags.is_interface() ? 0 : + _itable_size = is_interface() ? 0 : klassItable::compute_itable_size(_transitive_interfaces); assert(_fac != NULL, "invariant"); --- old/src/hotspot/share/classfile/classFileParser.hpp 2020-02-21 02:14:24.054533772 -0800 +++ new/src/hotspot/share/classfile/classFileParser.hpp 2020-02-21 02:14:23.680350668 -0800 @@ -73,6 +73,7 @@ int _nonstatic_field_size; int _static_field_size; bool _has_nonstatic_fields; + bool _is_naturally_atomic; }; // Parser for for .class files @@ -199,6 +200,8 @@ bool _has_flattenable_fields; bool _is_empty_value; + bool _is_naturally_atomic; + bool _is_declared_atomic; // precomputed flags bool _has_finalizer; @@ -246,6 +249,7 @@ const int itfs_len, ConstantPool* const cp, bool* has_nonstatic_concrete_methods, + bool* is_declared_atomic, TRAPS); const InstanceKlass* parse_super_class(ConstantPool* const cp, --- old/src/hotspot/share/classfile/fieldLayoutBuilder.cpp 2020-02-21 02:14:25.624248115 -0800 +++ new/src/hotspot/share/classfile/fieldLayoutBuilder.cpp 2020-02-21 02:14:25.250301528 -0800 @@ -539,7 +539,10 @@ _has_nonstatic_fields(false), _is_contended(is_contended), _is_value_type(is_value_type), - _has_flattening_information(is_value_type) {} + _has_flattening_information(is_value_type), + _has_nonatomic_values(false), + _atomic_field_count(0) + {} FieldGroup* FieldLayoutBuilder::get_or_create_contended_group(int g) { assert(g > 0, "must only be called for named contended groups"); @@ -579,6 +582,7 @@ group = _static_fields; } else { _has_nonstatic_fields = true; + _atomic_field_count++; // we might decrement this if (fs.is_contended()) { int g = fs.contended_group(); if (g == 0) { @@ -626,14 +630,23 @@ _protection_domain, true, THREAD); assert(klass != NULL, "Sanity check"); ValueKlass* vk = ValueKlass::cast(klass); - bool has_flattenable_size = (ValueFieldMaxFlatSize < 0) - || (vk->size_helper() * HeapWordSize) <= ValueFieldMaxFlatSize; - // volatile fields are currently never flattened, this could change in the future - bool flattened = !fs.access_flags().is_volatile() && has_flattenable_size; - if (flattened) { + bool too_big_to_flatten = (ValueFieldMaxFlatSize >= 0 && + (vk->size_helper() * HeapWordSize) > ValueFieldMaxFlatSize); + bool too_atomic_to_flatten = vk->is_declared_atomic(); + bool too_volatile_to_flatten = fs.access_flags().is_volatile(); + if (vk->is_naturally_atomic()) { + too_atomic_to_flatten = false; + //too_volatile_to_flatten = false; //FIXME + // volatile fields are currently never flattened, this could change in the future + } + if (!(too_big_to_flatten | too_atomic_to_flatten | too_volatile_to_flatten)) { group->add_flattened_field(fs, vk); _nonstatic_oopmap_count += vk->nonstatic_oop_map_count(); fs.set_flattened(true); + if (!vk->is_atomic()) { // flat and non-atomic: take note + _has_nonatomic_values = true; + _atomic_field_count--; // every other field is atomic but this one + } } else { _nonstatic_oopmap_count++; group->add_oop_field(fs); @@ -674,6 +687,7 @@ group = _static_fields; } else { _has_nonstatic_fields = true; + _atomic_field_count++; // we might decrement this group = _root_group; } assert(group != NULL, "invariant"); @@ -716,13 +730,24 @@ _protection_domain, true, CHECK); assert(klass != NULL, "Sanity check"); ValueKlass* vk = ValueKlass::cast(klass); - bool flattened = (ValueFieldMaxFlatSize < 0) - || (vk->size_helper() * HeapWordSize) <= ValueFieldMaxFlatSize; - if (flattened) { + bool too_big_to_flatten = (ValueFieldMaxFlatSize >= 0 && + (vk->size_helper() * HeapWordSize) > ValueFieldMaxFlatSize); + bool too_atomic_to_flatten = vk->is_declared_atomic(); + bool too_volatile_to_flatten = fs.access_flags().is_volatile(); + if (vk->is_naturally_atomic()) { + too_atomic_to_flatten = false; + //too_volatile_to_flatten = false; //FIXME + // volatile fields are currently never flattened, this could change in the future + } + if (!(too_big_to_flatten | too_atomic_to_flatten | too_volatile_to_flatten)) { group->add_flattened_field(fs, vk); _nonstatic_oopmap_count += vk->nonstatic_oop_map_count(); field_alignment = vk->get_alignment(); fs.set_flattened(true); + if (!vk->is_atomic()) { // flat and non-atomic: take note + _has_nonatomic_values = true; + _atomic_field_count--; // every other field is atomic but this one + } } else { _nonstatic_oopmap_count++; field_alignment = type2aelembytes(T_OBJECT); @@ -983,6 +1008,19 @@ _info->_nonstatic_field_size = (nonstatic_field_end - instanceOopDesc::base_offset_in_bytes()) / heapOopSize; _info->_has_nonstatic_fields = _has_nonstatic_fields; + // A value type is naturally atomic if it has just one field, and + // that field is simple enough. + _info->_is_naturally_atomic = (_is_value_type && + (_atomic_field_count <= 1) && + !_has_nonatomic_values && + _contended_groups.is_empty()); + // This may be too restrictive, since if all the fields fit in 64 + // bits we could make the decision to align instances of this class + // to 64-bit boundaries, and load and store them as single words. + // And on machines which supported larger atomics we could similarly + // allow larger values to be atomic, if properly aligned. + + if (PrintFieldLayout) { ResourceMark rm; tty->print_cr("Layout of class %s", _classname->as_C_string()); --- old/src/hotspot/share/classfile/fieldLayoutBuilder.hpp 2020-02-21 02:14:27.195899114 -0800 +++ new/src/hotspot/share/classfile/fieldLayoutBuilder.hpp 2020-02-21 02:14:26.826773923 -0800 @@ -256,6 +256,8 @@ bool _is_contended; bool _is_value_type; bool _has_flattening_information; + bool _has_nonatomic_values; + int _atomic_field_count; FieldGroup* get_or_create_contended_group(int g); --- old/src/hotspot/share/classfile/vmSymbols.hpp 2020-02-21 02:14:28.762974763 -0800 +++ new/src/hotspot/share/classfile/vmSymbols.hpp 2020-02-21 02:14:28.367763243 -0800 @@ -64,6 +64,7 @@ template(java_lang_Thread, "java/lang/Thread") \ template(java_lang_ThreadGroup, "java/lang/ThreadGroup") \ template(java_lang_Cloneable, "java/lang/Cloneable") \ + template(java_lang_NonTearable, "java/lang/NonTearable") \ template(java_lang_Throwable, "java/lang/Throwable") \ template(java_lang_ClassLoader, "java/lang/ClassLoader") \ template(java_lang_ClassLoader_NativeLibrary, "java/lang/ClassLoader\x024NativeLibrary") \ --- old/src/hotspot/share/oops/arrayKlass.hpp 2020-02-21 02:14:30.387623416 -0800 +++ new/src/hotspot/share/oops/arrayKlass.hpp 2020-02-21 02:14:30.021831654 -0800 @@ -69,6 +69,10 @@ // Presented with an ArrayKlass, which storage_properties should be encoded into arrayOop virtual ArrayStorageProperties storage_properties() { return ArrayStorageProperties::empty; } + // Are loads and stores to this concrete array type atomic? + // Note that Object[] is naturally atomic, but its subtypes may not be. + virtual bool element_access_is_atomic() { return true; } + // Testing operation DEBUG_ONLY(bool is_array_klass_slow() const { return true; }) --- old/src/hotspot/share/oops/instanceKlass.hpp 2020-02-21 02:14:31.898409208 -0800 +++ new/src/hotspot/share/oops/instanceKlass.hpp 2020-02-21 02:14:31.525984207 -0800 @@ -291,7 +291,9 @@ _misc_is_being_redefined = 1 << 17, // used for locking redefinition _misc_has_contended_annotations = 1 << 18, // has @Contended annotation _misc_has_value_fields = 1 << 19, // has value fields and related embedded section is not empty - _misc_is_empty_value = 1 << 20 // empty value type + _misc_is_empty_value = 1 << 20, // empty value type + _misc_is_naturally_atomic = 1 << 21, // loaded/stored in one instruction + _misc_is_declared_atomic = 1 << 22 // implements jl.NonTearable }; u2 loader_type_bits() { return _misc_is_shared_boot_class|_misc_is_shared_platform_class|_misc_is_shared_app_class; @@ -432,6 +434,32 @@ _misc_flags |= _misc_is_empty_value; } + // Note: The naturally_atomic property only applies to + // inline classes; it is never true on identity classes. + // The bit is placed on instanceKlass for convenience. + + // Query if h/w provides atomic load/store for instances. + bool is_naturally_atomic() const { + return (_misc_flags & _misc_is_naturally_atomic) != 0; + } + // Initialized in the class file parser, not changed later. + void set_is_naturally_atomic() { + _misc_flags |= _misc_is_naturally_atomic; + } + + // Query if this class implements jl.NonTearable or was + // mentioned in the JVM option AlwaysAtomicValueTypes. + // This bit can occur anywhere, but is only significant + // for inline classes *and* their super types. + // It inherits from supers along with NonTearable. + bool is_declared_atomic() const { + return (_misc_flags & _misc_is_declared_atomic) != 0; + } + // Initialized in the class file parser, not changed later. + void set_is_declared_atomic() { + _misc_flags |= _misc_is_declared_atomic; + } + // field sizes int nonstatic_field_size() const { return _nonstatic_field_size; } void set_nonstatic_field_size(int size) { _nonstatic_field_size = size; } --- old/src/hotspot/share/oops/valueArrayKlass.cpp 2020-02-21 02:14:33.494737760 -0800 +++ new/src/hotspot/share/oops/valueArrayKlass.cpp 2020-02-21 02:14:33.126386052 -0800 @@ -83,7 +83,7 @@ ValueArrayKlass* ValueArrayKlass::allocate_klass(Klass* element_klass, TRAPS) { assert(ValueArrayFlatten, "Flatten array required"); - assert(ValueKlass::cast(element_klass)->is_atomic() || (!ValueArrayAtomicAccess), "Atomic by-default"); + assert(ValueKlass::cast(element_klass)->is_naturally_atomic() || (!ValueArrayAtomicAccess), "Atomic by-default"); /* * MVT->LWorld, now need to allocate secondaries array types, just like objArrayKlass... --- old/src/hotspot/share/oops/valueArrayKlass.hpp 2020-02-21 02:14:35.254058136 -0800 +++ new/src/hotspot/share/oops/valueArrayKlass.hpp 2020-02-21 02:14:34.877481131 -0800 @@ -87,7 +87,8 @@ return element_klass()->contains_oops(); } - bool is_atomic() { + // Override. + bool element_access_is_atomic() { return element_klass()->is_atomic(); } --- old/src/hotspot/share/oops/valueKlass.cpp 2020-02-21 02:14:38.706946961 -0800 +++ new/src/hotspot/share/oops/valueKlass.cpp 2020-02-21 02:14:37.998783292 -0800 @@ -139,10 +139,6 @@ return oop; } -bool ValueKlass::is_atomic() { - return (nonstatic_field_size() * heapOopSize) <= longSize; -} - int ValueKlass::nonstatic_oop_count() { int oops = 0; int map_count = nonstatic_oop_map_count(); @@ -195,6 +191,11 @@ return false; } + // Declared atomic but not naturally atomic. + if (is_declared_atomic() && !is_naturally_atomic()) { + return false; + } + return true; } @@ -253,7 +254,7 @@ } Klass* ValueKlass::allocate_value_array_klass(TRAPS) { - if (flatten_array() && (is_atomic() || (!ValueArrayAtomicAccess))) { + if (flatten_array() && (is_naturally_atomic() || (!ValueArrayAtomicAccess))) { return ValueArrayKlass::allocate_klass(ArrayStorageProperties::flattened_and_null_free, this, THREAD); } return ObjArrayKlass::allocate_objArray_klass(ArrayStorageProperties::null_free, 1, this, THREAD); --- old/src/hotspot/share/oops/valueKlass.hpp 2020-02-21 02:14:41.270970735 -0800 +++ new/src/hotspot/share/oops/valueKlass.hpp 2020-02-21 02:14:40.855389424 -0800 @@ -214,8 +214,8 @@ address data_for_oop(oop o) const; oop oop_for_data(address data) const; - // Query if h/w provides atomic load/store - bool is_atomic(); + // Query if this class promises atomicity one way or another + bool is_atomic() { return is_naturally_atomic() || is_declared_atomic(); } bool flatten_array(); --- old/src/hotspot/share/opto/valuetypenode.cpp 2020-02-21 02:14:43.127730558 -0800 +++ new/src/hotspot/share/opto/valuetypenode.cpp 2020-02-21 02:14:42.685265655 -0800 @@ -399,6 +399,7 @@ // Do not let stores that initialize this buffer be reordered with a subsequent // store that would make this buffer accessible by other threads. + // FIXME: coordinate with ready_to_publish(kit, alloc_oop) AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop, &kit->gvn()); assert(alloc != NULL, "must have an allocation node"); kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress)); @@ -627,6 +628,7 @@ Node* mark = kit->make_load(NULL, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); mark = kit->gvn().transform(new AndXNode(mark, kit->MakeConX(~markWord::larval_mask_in_place))); kit->store_to_memory(kit->control(), mark_addr, mark, TypeX_X->basic_type(), kit->gvn().type(mark_addr)->is_ptr(), MemNode::unordered); + ready_to_publish(kit, obj); // Do not let stores that initialize this buffer be reordered with a subsequent // store that would make this buffer accessible by other threads. @@ -641,6 +643,17 @@ return res; } +void ValueTypeBaseNode::ready_to_publish(GraphKit* kit, Node* base) const { + // Do not let stores that initialize this buffer be reordered with + // a subsequent store that would make it accessible by other threads. + // Required for correct non-flat array element publication. + // (See jtreg test ValueTearing.java.) + Node* raw_address_proj = NULL; //FIXME + kit->insert_mem_bar(Op_MemBarStoreStore, raw_address_proj); + // Fails to prevent array element tearing: + //kit->insert_mem_bar_volatile(Op_MemBarStoreStore, Compile::AliasIdxRaw, raw_address_proj); +} + Node* ValueTypeNode::is_loaded(PhaseGVN* phase, ciValueKlass* vk, Node* base, int holder_offset) { if (vk == NULL) { vk = value_klass(); --- old/src/hotspot/share/opto/valuetypenode.hpp 2020-02-21 02:14:44.917445911 -0800 +++ new/src/hotspot/share/opto/valuetypenode.hpp 2020-02-21 02:14:44.387626274 -0800 @@ -89,6 +89,9 @@ ValueTypeBaseNode* allocate(GraphKit* kit, bool safe_for_replace = true); bool is_allocated(PhaseGVN* phase) const; + // Ensure that writes to base are comitted before a subsequent store. + void ready_to_publish(GraphKit* kit, Node* base) const; + void replace_call_results(GraphKit* kit, Node* call, Compile* C); // Allocate all non-flattened value type fields --- old/src/hotspot/share/prims/jvm.cpp 2020-02-21 02:14:46.774973935 -0800 +++ new/src/hotspot/share/prims/jvm.cpp 2020-02-21 02:14:46.378397768 -0800 @@ -2332,10 +2332,7 @@ if ((o == NULL) || (!k->is_array_klass())) { THROW_0(vmSymbols::java_lang_IllegalArgumentException()); } - if (k->is_valueArray_klass()) { - return ValueArrayKlass::cast(k)->is_atomic(); - } - return true; + return ArrayKlass::cast(k)->element_access_is_atomic(); JVM_END JVM_ENTRY(jobject, JVM_ArrayEnsureAccessAtomic(JNIEnv *env, jclass unused, jobject array)) @@ -2347,7 +2344,7 @@ } if (k->is_valueArray_klass()) { ValueArrayKlass* vk = ValueArrayKlass::cast(k); - if (!vk->is_atomic()) { + if (!vk->element_access_is_atomic()) { /** * Need to decide how to implement: * --- old/src/hotspot/share/runtime/globals.hpp 2020-02-21 02:14:48.482743617 -0800 +++ new/src/hotspot/share/runtime/globals.hpp 2020-02-21 02:14:48.096377676 -0800 @@ -2520,6 +2520,11 @@ develop(bool, ScalarizeValueTypes, true, \ "Scalarize value types in compiled code") \ \ + diagnostic(ccstrlist, ForceNonTearable, "", \ + "List of inline classes which are forced to be atomic " \ + "(whitespace and commas separate names, " \ + "and leading and trailing stars '*' are wildcards)") \ + \ product(bool, PrintNewLayout, false, \ "Print layout compute by new algorithm") \ \ --- old/src/hotspot/share/utilities/stringUtils.cpp 2020-02-21 02:14:50.193917067 -0800 +++ new/src/hotspot/share/utilities/stringUtils.cpp 2020-02-21 02:14:49.804739820 -0800 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "utilities/debug.hpp" +#include "utilities/ostream.hpp" #include "utilities/stringUtils.hpp" int StringUtils::replace_no_expand(char* string, const char* from, const char* to) { @@ -65,3 +66,281 @@ return 2.0 * (double) hit / (double) total; } + +class StringMatcher { + public: + typedef int getc_function_t(const char* &source, const char* limit); + const getc_function_t* _pattern_getc; + const getc_function_t* _string_getc; + + StringMatcher(getc_function_t pattern_getc, + getc_function_t string_getc) + : _pattern_getc(pattern_getc), + _string_getc(string_getc) + { } + + enum { // special results from _pattern_getc + string_match_comma = -0x100 + ',', + string_match_star = -0x100 + '*', + string_match_eos = -0x100 + '\0' + }; + + private: + const char* + skip_anchor_word(const char* match, + const char* match_end, + int anchor_length, + const char* pattern, + const char* pattern_end) { + assert(pattern < pattern_end && anchor_length > 0, ""); + const char* begp = pattern; + int ch1 = _pattern_getc(begp, pattern_end); + // note that begp is now advanced over ch1 + assert(ch1 > 0, "regular char only"); + const char* matchp = match; + const char* limitp = match_end - anchor_length; + while (matchp <= limitp) { + int mch = _string_getc(matchp, match_end); + if (mch == ch1) { + const char* patp = begp; + const char* anchorp = matchp; + while (patp < pattern_end) { + char ch = _pattern_getc(patp, pattern_end); + char mch = _string_getc(anchorp, match_end); + if (mch != ch) { + anchorp = NULL; + break; + } + } + if (anchorp != NULL) { + return anchorp; // Found a full copy of the anchor. + } + // That did not work, so restart the search for ch1. + } + } + return NULL; + } + + public: + bool string_match(const char* pattern, + const char* string) { + return string_match(pattern, pattern + strlen(pattern), + string, string + strlen(string)); + } + bool string_match(const char* pattern, const char* pattern_end, + const char* string, const char* string_end) { + const char* patp = pattern; + switch (_pattern_getc(patp, pattern_end)) { + case string_match_eos: + return false; // Empty pattern is always false. + case string_match_star: + if (patp == pattern_end) { + return true; // Lone star pattern is always true. + } + break; + } + patp = pattern; // Reset after lookahead. + const char* matchp = string; // NULL if failing + for (;;) { + int ch = _pattern_getc(patp, pattern_end); + switch (ch) { + case string_match_eos: + case string_match_comma: + // End of a list item; see if it's a match. + if (matchp == string_end) { + return true; + } + if (ch == string_match_comma) { + // Get ready to match the next item. + matchp = string; + continue; + } + return false; // End of all items. + + case string_match_star: + if (matchp != NULL) { + // Wildcard: Parse out following anchor word and look for it. + const char* begp = patp; + const char* endp = patp; + int anchor_len = 0; + for (;;) { + // get as many following regular characters as possible + endp = patp; + ch = _pattern_getc(patp, pattern_end); + if (ch <= 0) { + break; + } + anchor_len += 1; + } + // Anchor word [begp..endp) does not contain ch, so back up. + // Now do an eager match to the anchor word, and commit to it. + patp = endp; + if (ch == string_match_eos || + ch == string_match_comma) { + // Anchor word is at end of pattern, so treat it as a fixed pattern. + const char* limitp = (matchp + strlen(matchp)) - anchor_len; + matchp = limitp; + patp = begp; + // Resume normal scanning at the only possible match position. + continue; + } + // Find a floating occurrence of the anchor and continue matching. + // Note: This is greedy; there is no backtrack here. Good enough. + matchp = skip_anchor_word(matchp, string_end, anchor_len, begp, endp); + } + continue; + } + // Normal character. + if (matchp != NULL) { + int mch = _string_getc(matchp, string_end); + if (mch != ch) { + matchp = NULL; + } + } + } + } +}; + +// Match a wildcarded class list to a proposed class name (in internal form). +// Commas or newlines separate multiple possible matches; stars are shell-style wildcards. +class ClassListMatcher : public StringMatcher { + public: + ClassListMatcher() + : StringMatcher(pattern_list_getc, class_name_getc) + { } + + private: + static int pattern_list_getc(const char* &pattern_ptr, + const char* pattern_end) { + if (pattern_ptr == pattern_end) { + return string_match_eos; + } + int ch = (unsigned char) *pattern_ptr++; + switch (ch) { + case ' ': case '\t': case '\n': case '\r': + case ',': + // End of list item. + for (;;) { + switch (*pattern_ptr) { + case ' ': case '\t': case '\n': case '\r': + case ',': + pattern_ptr += 1; // Collapse multiple commas or spaces. + continue; + } + break; + } + return string_match_comma; + + case '*': + // Wildcard, matching any number of chars. + while (*pattern_ptr == '*') { + pattern_ptr += 1; // Collapse multiple stars. + } + return string_match_star; + + case '.': + ch = '/'; // Look for internal form of package separator + break; + + case '\\': + // Superquote in pattern escapes * , whitespace, and itself. + if (pattern_ptr < pattern_end) { + ch = (unsigned char) *pattern_ptr++; + } + break; + } + + assert(ch > 0, "regular char only"); + return ch; + } + + static int class_name_getc(const char* &name_ptr, + const char* name_end) { + if (name_ptr == name_end) { + return string_match_eos; + } + int ch = (unsigned char) *name_ptr++; + if (ch == '.') { + ch = '/'; // Normalize to internal form of package separator + } + return ch; // plain character + } +}; + +static bool class_list_match_sane(); + +bool StringUtils::class_list_match(const char* class_pattern_list, + const char* class_name) { + assert(class_list_match_sane(), ""); + if (class_pattern_list == NULL || class_name == NULL || class_name[0] == '\0') + return false; + ClassListMatcher clm; + return clm.string_match(class_pattern_list, class_name); +} + +#ifdef ASSERT +static void +class_list_match_sane(const char* pat, const char* str, bool result = true) { + if (result) { + assert(StringUtils::class_list_match(pat, str), "%s ~ %s", pat, str); + } else { + assert(!StringUtils::class_list_match(pat, str), "%s !~ %s", pat, str); + } +} + +static bool +class_list_match_sane() { + static bool done = false; + if (done) return true; + done = true; + class_list_match_sane("foo", "foo"); + class_list_match_sane("foo,", "foo"); + class_list_match_sane(",foo,", "foo"); + class_list_match_sane("bar,foo", "foo"); + class_list_match_sane("bar,foo,", "foo"); + class_list_match_sane("*", "foo"); + class_list_match_sane("foo.bar", "foo/bar"); + class_list_match_sane("foo/bar", "foo.bar"); + class_list_match_sane("\\foo", "foo"); + class_list_match_sane("\\*foo", "*foo"); + const char* foo = "foo!"; + char buf[100], buf2[100]; + const int m = strlen(foo); + for (int n = 0; n <= 1; n++) { + for (int a = -1; a <= 1; a++) { + for (int i = 0; i <= m; i++) { + for (int j = i; j <= m; j++) { + if (j == i && j > 0) continue; + for (int k = j; k <= m; k++) { + if (k == j && k > i) continue; + for (int l = k; l <= m; l++) { + if (l == k && l > j) continue; + char* bp = &buf[0]; + strncpy(bp, foo + 0, i - 0); bp += i - 0; + *bp++ = '*'; + strncpy(bp, foo + j, k - j); bp += k - j; + *bp++ = '*'; + strncpy(bp, foo + l, m - l); bp += m - l; + if (n) { + *bp++ = 'N'; // make it fail + } + *bp++ = '\0'; + if (a != 0) { + if (a < 0) { + strcpy(buf2, buf); + strcat(buf, "X*, "); + strcat(buf, buf2); + } else { + strcat(buf, ", Y"); + } + } + class_list_match_sane(buf, foo, !n); + } + } + } + } + } + } + return true; +} +#endif //ASSERT --- old/src/hotspot/share/utilities/stringUtils.hpp 2020-02-21 02:14:51.912768814 -0800 +++ new/src/hotspot/share/utilities/stringUtils.hpp 2020-02-21 02:14:51.528700066 -0800 @@ -40,6 +40,10 @@ // Compute string similarity based on Dice's coefficient static double similarity(const char* str1, size_t len1, const char* str2, size_t len2); + + // Match a wildcarded class list to a proposed class name (in internal form). + // Commas separate multiple possible matches; stars are shell-style wildcards. + static bool class_list_match(const char* class_list, const char* class_name); }; #endif // SHARE_UTILITIES_STRINGUTILS_HPP --- old/test/hotspot/jtreg/runtime/valhalla/valuetypes/FlattenableSemanticTest.java 2020-02-21 02:14:53.644595999 -0800 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/FlattenableSemanticTest.java 2020-02-21 02:14:53.261203649 -0800 @@ -37,7 +37,9 @@ * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator Point.java JumboValue.java * @compile -XDemitQtypes -XDenableValueTypes -XDallowWithFieldOperator FlattenableSemanticTest.java * @run main/othervm -Xint -XX:ValueFieldMaxFlatSize=64 runtime.valhalla.valuetypes.FlattenableSemanticTest + * @run main/othervm -Xint -XX:ForceNonTearable=* runtime.valhalla.valuetypes.FlattenableSemanticTest * @run main/othervm -Xcomp -XX:ValueFieldMaxFlatSize=64 runtime.valhalla.valuetypes.FlattenableSemanticTest + * @run main/othervm -Xcomp -XX:ForceNonTearable=* runtime.valhalla.valuetypes.FlattenableSemanticTest * // debug: -XX:+PrintValueLayout -XX:-ShowMessageBoxOnError */ public class FlattenableSemanticTest { --- old/test/hotspot/jtreg/runtime/valhalla/valuetypes/ValueTypeArray.java 2020-02-21 02:14:55.157342239 -0800 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/ValueTypeArray.java 2020-02-21 02:14:54.795608072 -0800 @@ -39,6 +39,7 @@ * @run main/othervm -Xint -XX:ValueArrayElemMaxFlatSize=0 runtime.valhalla.valuetypes.ValueTypeArray * @run main/othervm -Xcomp -XX:ValueArrayElemMaxFlatSize=-1 runtime.valhalla.valuetypes.ValueTypeArray * @run main/othervm -Xcomp -XX:ValueArrayElemMaxFlatSize=0 runtime.valhalla.valuetypes.ValueTypeArray + * @run main/othervm -Xbatch -XX:ForceNonTearable=* runtime.valhalla.valuetypes.ValueTypeArray */ public class ValueTypeArray { public static void main(String[] args) { --- old/test/hotspot/jtreg/runtime/valhalla/valuetypes/ValueTypeDensity.java 2020-02-21 02:14:56.862907984 -0800 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/ValueTypeDensity.java 2020-02-21 02:14:56.335523439 -0800 @@ -39,6 +39,9 @@ * @run main/othervm -Xcomp -XX:ValueArrayElemMaxFlatSize=-1 * -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions * -XX:+WhiteBoxAPI ValueTypeDensity + * @run main/othervm -Xbatch -XX:ForceNonTearable=* + * -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI ValueTypeDensity */ public class ValueTypeDensity { --- old/test/hotspot/jtreg/runtime/valhalla/valuetypes/ValueTypesTest.java 2020-02-21 02:14:58.455430469 -0800 +++ new/test/hotspot/jtreg/runtime/valhalla/valuetypes/ValueTypesTest.java 2020-02-21 02:14:58.048541159 -0800 @@ -62,6 +62,12 @@ * -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions * -Djava.lang.invoke.MethodHandle.DUMP_CLASS_FILES=false * runtime.valhalla.valuetypes.ValueTypesTest + * @run main/othervm -Xbatch -Xmx128m -XX:-ShowMessageBoxOnError + * -XX:+ExplicitGCInvokesConcurrent + * -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -Djava.lang.invoke.MethodHandle.DUMP_CLASS_FILES=false + * -XX:ForceNonTearable=* + * runtime.valhalla.valuetypes.ValueTypesTest */ public class ValueTypesTest { --- /dev/null 2020-02-21 02:15:00.278539000 -0800 +++ new/src/java.base/share/classes/java/lang/NonTearable.java 2020-02-21 02:14:59.674881938 -0800 @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +/** + * An inline class implements the {@code NonTearable} interface to + * request that the JVM take extra care to avoid structure tearing + * when loading or storing any value of the class to a field or array + * element. Normally, only fields declared {@code volatile} are + * protected against structure tearing, but a class that implements + * this marker interface will never have its values torn, even when + * they are stored in array elements or in non-{@code volatile} + * fields, and even when multiple threads perform racing writes. + * + *

An inline instance of multiple components is said to be "torn" + * when two racing threads compete to update those components, and one + * thread updates some components while another thread updates other + * components. The resulting inline value stored in the heap might be + * a hybrid composed of field values from both racing writes. In + * extreme cases, this hybrid might be a value which is impossible + * to construct by normal means, and if data integrity or security + * depends on proper construction, the class should be declared as + * implementing {@code NonTearable}. + * + *

Non-inline classes can implement {@code NonTearable}, and + * interfaces can extend it, in the usual manner. The special effect + * on tearing applies to inline classes which implement this type, + * either directly, or indirectly via a supertype. Thus, it is not + * correct to assume that an object {@code x} for which {@code x + * instanceof NonTearable} is in fact an inline class instance. + * It is also not correct to assume that tearing is possible for + * classes which do not implement this marker interface, because + * the JVM may elect to make some inline values non-tearable if + * the cost of doing so is acceptable. The effect of declaring + * an inline type {@code NonTearable} is thus to override any + * heuristic the JVM may employ to control tearing, in favor + * of reliability, and possibly at the expense of performance. + * + * @author John Rose + * @since (valhalla) + */ +public interface NonTearable { +}