Commit 4dc1fb4e authored by Seth Brenith's avatar Seth Brenith Committed by Commit Bot

Revert "[torque] Support bitfield structs stored within Smis"

This reverts commit e5e4ea96.

Reason for revert: mysterious performance regression chromium:1052756

Original change's description:
> [torque] Support bitfield structs stored within Smis
>
> This change moves the definition of the bits stored in DebugInfo::flags
> to Torque, and updates the only Torque usage of that field to use more
> natural syntax. This is intended as an example of common patterns found
> in various other classes. Several supporting changes are required:
>
> 1. Add a new type representing a bitfield struct stored within a Smi. It
>    is currently called SmiTagged, but I'm open to suggestions.
> 2. Add an enum-style output for Torque bitfield structs whose bitfields
>    occupy only one bit each.
> 3. Add a new case to MachineOperatorReducer that makes the generated
>    code for IncBlockCounter match with what was generated before this
>    change.
> 4. Add support for reporting these bitfields in the postmortem debugging
>    API. The format matches existing bitfields but with an offset value
>    that includes the SMI shift size.
>
> Bug: v8:7793
> Change-Id: Icaecbe4a162da55d2d9a3a35a8ea85b285b2f1b7
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2028832
> Commit-Queue: Seth Brenith <seth.brenith@microsoft.com>
> Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
> Reviewed-by: Nico Hartmann <nicohartmann@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#66182}

Bug: chromium:1052756, v8:7793
Change-Id: I9e2897efbb6321124bf4952cf09de2f179f7310d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2062569
Commit-Queue: Seth Brenith <seth.brenith@microsoft.com>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66349}
parent 03d5a7ba
...@@ -85,7 +85,7 @@ type uint32 generates 'TNode<Uint32T>' constexpr 'uint32_t'; ...@@ -85,7 +85,7 @@ type uint32 generates 'TNode<Uint32T>' constexpr 'uint32_t';
type int31 extends int32 type int31 extends int32
generates 'TNode<Int32T>' constexpr 'int31_t'; generates 'TNode<Int32T>' constexpr 'int31_t';
type uint31 extends uint32 type uint31 extends uint32
generates 'TNode<Uint32T>' constexpr 'uint32_t'; generates 'TNode<Uint32T>' constexpr 'uint31_t';
type int16 extends int31 type int16 extends int31
generates 'TNode<Int16T>' constexpr 'int16_t'; generates 'TNode<Int16T>' constexpr 'int16_t';
type uint16 extends uint31 type uint16 extends uint31
...@@ -104,9 +104,6 @@ type bool generates 'TNode<BoolT>' constexpr 'bool'; ...@@ -104,9 +104,6 @@ type bool generates 'TNode<BoolT>' constexpr 'bool';
type bint generates 'TNode<BInt>' constexpr 'BInt'; type bint generates 'TNode<BInt>' constexpr 'BInt';
type string constexpr 'const char*'; type string constexpr 'const char*';
// A Smi value containing a bitfield struct as its integer data.
type SmiTagged<T : type extends uint31> extends Smi;
// WARNING: The memory representation (i.e., in class fields and arrays) of // WARNING: The memory representation (i.e., in class fields and arrays) of
// float64_or_hole is just a float64 that may be the hole-representing // float64_or_hole is just a float64 that may be the hole-representing
// signalling NaN bit-pattern. So it's memory size is that of float64 and // signalling NaN bit-pattern. So it's memory size is that of float64 and
...@@ -836,9 +833,6 @@ extern macro SmiTag(intptr): Smi; ...@@ -836,9 +833,6 @@ extern macro SmiTag(intptr): Smi;
extern macro SmiFromInt32(int32): Smi; extern macro SmiFromInt32(int32): Smi;
extern macro SmiFromUint32(uint32): Smi; extern macro SmiFromUint32(uint32): Smi;
extern macro SmiUntag(Smi): intptr; extern macro SmiUntag(Smi): intptr;
macro SmiUntag<T: type>(value: SmiTagged<T>): T {
return %RawDownCast<T>(Unsigned(SmiToInt32(Convert<Smi>(value))));
}
extern macro SmiToInt32(Smi): int32; extern macro SmiToInt32(Smi): int32;
extern macro RoundIntPtrToFloat64(intptr): float64; extern macro RoundIntPtrToFloat64(intptr): float64;
extern macro ChangeFloat32ToFloat64(float32): float64; extern macro ChangeFloat32ToFloat64(float32): float64;
......
...@@ -6,13 +6,16 @@ ...@@ -6,13 +6,16 @@
namespace internal_coverage { namespace internal_coverage {
const kHasCoverageInfo:
constexpr int31 generates 'DebugInfo::kHasCoverageInfo';
macro GetCoverageInfo(implicit context: Context)(function: JSFunction): macro GetCoverageInfo(implicit context: Context)(function: JSFunction):
CoverageInfo labels IfNoCoverageInfo { CoverageInfo labels IfNoCoverageInfo {
const shared: SharedFunctionInfo = function.shared_function_info; const shared: SharedFunctionInfo = function.shared_function_info;
const debugInfo = Cast<DebugInfo>(shared.script_or_debug_info) const debugInfo = Cast<DebugInfo>(shared.script_or_debug_info)
otherwise goto IfNoCoverageInfo; otherwise goto IfNoCoverageInfo;
if (!SmiUntag(debugInfo.flags).has_coverage_info) goto IfNoCoverageInfo; if ((debugInfo.flags & kHasCoverageInfo) == 0) goto IfNoCoverageInfo;
return UnsafeCast<CoverageInfo>(debugInfo.coverage_info); return UnsafeCast<CoverageInfo>(debugInfo.coverage_info);
} }
......
...@@ -297,27 +297,6 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { ...@@ -297,27 +297,6 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
} }
// TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares // TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true
if (m.left().IsWord32And() && m.right().HasValue()) {
Uint32BinopMatcher mand(m.left().node());
if ((mand.left().IsWord32Shr() || mand.left().IsWord32Sar()) &&
mand.right().HasValue()) {
Uint32BinopMatcher mshift(mand.left().node());
// ((x >> K1) & K2) == K3 => (x & (K2 << K1)) == (K3 << K1)
if (mshift.right().HasValue()) {
auto shift_bits = mshift.right().Value();
auto mask = mand.right().Value();
auto rhs = static_cast<uint32_t>(m.right().Value());
// Make sure that we won't shift data off the end.
if (shift_bits <= base::bits::CountLeadingZeros(mask) &&
shift_bits <= base::bits::CountLeadingZeros(rhs)) {
node->ReplaceInput(
0, Word32And(mshift.left().node(), mask << shift_bits));
node->ReplaceInput(1, Int32Constant(rhs << shift_bits));
return Changed(node);
}
}
}
}
break; break;
} }
case IrOpcode::kWord64Equal: { case IrOpcode::kWord64Equal: {
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include "src/objects/fixed-array.h" #include "src/objects/fixed-array.h"
#include "src/objects/objects.h" #include "src/objects/objects.h"
#include "src/objects/struct.h" #include "src/objects/struct.h"
#include "torque-generated/bit-fields-tq.h"
// Has to be the last include (doesn't have include guards): // Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h" #include "src/objects/object-macros.h"
...@@ -27,7 +26,16 @@ class BytecodeArray; ...@@ -27,7 +26,16 @@ class BytecodeArray;
class DebugInfo : public TorqueGeneratedDebugInfo<DebugInfo, Struct> { class DebugInfo : public TorqueGeneratedDebugInfo<DebugInfo, Struct> {
public: public:
NEVER_READ_ONLY_SPACE NEVER_READ_ONLY_SPACE
DEFINE_TORQUE_GENERATED_DEBUG_INFO_FLAGS() enum Flag {
kNone = 0,
kHasBreakInfo = 1 << 0,
kPreparedForDebugExecution = 1 << 1,
kHasCoverageInfo = 1 << 2,
kBreakAtEntry = 1 << 3,
kCanBreakAtEntry = 1 << 4,
kDebugExecutionMode = 1 << 5
};
using Flags = base::Flags<Flag>; using Flags = base::Flags<Flag>;
// A bitfield that lists uses of the current instance. // A bitfield that lists uses of the current instance.
......
...@@ -18,15 +18,6 @@ extern class BreakPointInfo extends Struct { ...@@ -18,15 +18,6 @@ extern class BreakPointInfo extends Struct {
break_points: FixedArray|BreakPoint|Undefined; break_points: FixedArray|BreakPoint|Undefined;
} }
bitfield struct DebugInfoFlags extends uint31 {
has_break_info: bool: 1 bit;
prepared_for_debug_execution: bool: 1 bit;
has_coverage_info: bool: 1 bit;
break_at_entry: bool: 1 bit;
can_break_at_entry: bool: 1 bit;
debug_execution_mode: bool: 1 bit;
}
@generateCppClass @generateCppClass
extern class DebugInfo extends Struct { extern class DebugInfo extends Struct {
shared: SharedFunctionInfo; shared: SharedFunctionInfo;
...@@ -41,7 +32,7 @@ extern class DebugInfo extends Struct { ...@@ -41,7 +32,7 @@ extern class DebugInfo extends Struct {
debug_bytecode_array: Undefined|BytecodeArray; debug_bytecode_array: Undefined|BytecodeArray;
// Fixed array holding status information for each active break point. // Fixed array holding status information for each active break point.
break_points: FixedArray; break_points: FixedArray;
flags: SmiTagged<DebugInfoFlags>; flags: Smi;
coverage_info: CoverageInfo|Undefined; coverage_info: CoverageInfo|Undefined;
} }
......
...@@ -48,18 +48,10 @@ class ValueTypeFieldIterator { ...@@ -48,18 +48,10 @@ class ValueTypeFieldIterator {
const auto& field = struct_type->fields()[index_]; const auto& field = struct_type->fields()[index_];
return {field.name_and_type, field.pos, *field.offset, 0, 0}; return {field.name_and_type, field.pos, *field.offset, 0, 0};
} }
const Type* type = type_;
int bitfield_start_offset = 0;
if (const auto type_wrapped_in_smi =
Type::MatchUnaryGeneric(type_, TypeOracle::GetSmiTaggedGeneric())) {
type = *type_wrapped_in_smi;
bitfield_start_offset = kSmiTagSize + kSmiShiftSize;
}
if (const BitFieldStructType* bit_field_struct_type = if (const BitFieldStructType* bit_field_struct_type =
BitFieldStructType::DynamicCast(type)) { BitFieldStructType::DynamicCast(type_)) {
const auto& field = bit_field_struct_type->fields()[index_]; const auto& field = bit_field_struct_type->fields()[index_];
return {field.name_and_type, field.pos, 0, field.num_bits, return {field.name_and_type, field.pos, 0, field.num_bits, field.offset};
field.offset + bitfield_start_offset};
} }
UNREACHABLE(); UNREACHABLE();
} }
...@@ -91,13 +83,8 @@ class ValueTypeFieldsRange { ...@@ -91,13 +83,8 @@ class ValueTypeFieldsRange {
if (struct_type && struct_type != TypeOracle::GetFloat64OrHoleType()) { if (struct_type && struct_type != TypeOracle::GetFloat64OrHoleType()) {
index = struct_type->fields().size(); index = struct_type->fields().size();
} }
const Type* type = type_;
if (const auto type_wrapped_in_smi =
Type::MatchUnaryGeneric(type_, TypeOracle::GetSmiTaggedGeneric())) {
type = *type_wrapped_in_smi;
}
if (const BitFieldStructType* bit_field_struct_type = if (const BitFieldStructType* bit_field_struct_type =
BitFieldStructType::DynamicCast(type)) { BitFieldStructType::DynamicCast(type_)) {
index = bit_field_struct_type->fields().size(); index = bit_field_struct_type->fields().size();
} }
return {type_, index}; return {type_, index};
......
...@@ -66,7 +66,6 @@ static const char* const TORQUE_INTERNAL_NAMESPACE_STRING = "torque_internal"; ...@@ -66,7 +66,6 @@ static const char* const TORQUE_INTERNAL_NAMESPACE_STRING = "torque_internal";
static const char* const REFERENCE_TYPE_STRING = "Reference"; static const char* const REFERENCE_TYPE_STRING = "Reference";
static const char* const SLICE_TYPE_STRING = "Slice"; static const char* const SLICE_TYPE_STRING = "Slice";
static const char* const WEAK_TYPE_STRING = "Weak"; static const char* const WEAK_TYPE_STRING = "Weak";
static const char* const SMI_TAGGED_TYPE_STRING = "SmiTagged";
static const char* const UNINITIALIZED_ITERATOR_TYPE_STRING = static const char* const UNINITIALIZED_ITERATOR_TYPE_STRING =
"UninitializedIterator"; "UninitializedIterator";
static const char* const GENERIC_TYPE_INSTANTIATION_NAMESPACE_STRING = static const char* const GENERIC_TYPE_INSTANTIATION_NAMESPACE_STRING =
......
...@@ -3267,14 +3267,11 @@ void ImplementationVisitor::GenerateBitFields( ...@@ -3267,14 +3267,11 @@ void ImplementationVisitor::GenerateBitFields(
NamespaceScope namespaces(header, {"v8", "internal"}); NamespaceScope namespaces(header, {"v8", "internal"});
for (const auto& type : TypeOracle::GetBitFieldStructTypes()) { for (const auto& type : TypeOracle::GetBitFieldStructTypes()) {
bool all_single_bits = true; // Track whether every field is one bit.
header << "#define DEFINE_TORQUE_GENERATED_" header << "#define DEFINE_TORQUE_GENERATED_"
<< CapifyStringWithUnderscores(type->name()) << "() \\\n"; << CapifyStringWithUnderscores(type->name()) << "() \\\n";
std::string type_name = type->GetConstexprGeneratedTypeName(); std::string type_name = type->GetConstexprGeneratedTypeName();
for (const auto& field : type->fields()) { for (const auto& field : type->fields()) {
const char* suffix = field.num_bits == 1 ? "Bit" : "Bits"; const char* suffix = field.num_bits == 1 ? "Bit" : "Bits";
all_single_bits = all_single_bits && field.num_bits == 1;
std::string field_type_name = std::string field_type_name =
field.name_and_type.type->GetConstexprGeneratedTypeName(); field.name_and_type.type->GetConstexprGeneratedTypeName();
header << " using " << CamelifyString(field.name_and_type.name) header << " using " << CamelifyString(field.name_and_type.name)
...@@ -3283,17 +3280,6 @@ void ImplementationVisitor::GenerateBitFields( ...@@ -3283,17 +3280,6 @@ void ImplementationVisitor::GenerateBitFields(
<< ">; \\\n"; << ">; \\\n";
} }
// If every field is one bit, we can also generate a convenient enum.
if (all_single_bits) {
header << " enum Flag { \\\n";
header << " kNone = 0, \\\n";
for (const auto& field : type->fields()) {
header << " k" << CamelifyString(field.name_and_type.name)
<< " = 1 << " << field.offset << ", \\\n";
}
header << " }; \\\n";
}
header << "\n"; header << "\n";
} }
} }
......
...@@ -95,10 +95,6 @@ class TypeOracle : public ContextualClass<TypeOracle> { ...@@ -95,10 +95,6 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Declarations::LookupGlobalUniqueGenericType(WEAK_TYPE_STRING); return Declarations::LookupGlobalUniqueGenericType(WEAK_TYPE_STRING);
} }
static GenericType* GetSmiTaggedGeneric() {
return Declarations::LookupGlobalUniqueGenericType(SMI_TAGGED_TYPE_STRING);
}
static const Type* GetReferenceType(const Type* referenced_type) { static const Type* GetReferenceType(const Type* referenced_type) {
return GetGenericTypeInstance(GetReferenceGeneric(), {referenced_type}); return GetGenericTypeInstance(GetReferenceGeneric(), {referenced_type});
} }
...@@ -233,10 +229,6 @@ class TypeOracle : public ContextualClass<TypeOracle> { ...@@ -233,10 +229,6 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(UINT32_TYPE_STRING); return Get().GetBuiltinType(UINT32_TYPE_STRING);
} }
static const Type* GetUint31Type() {
return Get().GetBuiltinType(UINT31_TYPE_STRING);
}
static const Type* GetInt16Type() { static const Type* GetInt16Type() {
return Get().GetBuiltinType(INT16_TYPE_STRING); return Get().GetBuiltinType(INT16_TYPE_STRING);
} }
......
...@@ -869,7 +869,6 @@ base::Optional<std::tuple<size_t, std::string>> SizeOf(const Type* type) { ...@@ -869,7 +869,6 @@ base::Optional<std::tuple<size_t, std::string>> SizeOf(const Type* type) {
bool IsAnyUnsignedInteger(const Type* type) { bool IsAnyUnsignedInteger(const Type* type) {
return type == TypeOracle::GetUint32Type() || return type == TypeOracle::GetUint32Type() ||
type == TypeOracle::GetUint31Type() ||
type == TypeOracle::GetUint16Type() || type == TypeOracle::GetUint16Type() ||
type == TypeOracle::GetUint8Type() || type == TypeOracle::GetUint8Type() ||
type == TypeOracle::GetUIntPtrType(); type == TypeOracle::GetUIntPtrType();
......
...@@ -122,7 +122,6 @@ class StringResource : public v8::String::ExternalStringResource { ...@@ -122,7 +122,6 @@ class StringResource : public v8::String::ExternalStringResource {
TEST(GetObjectProperties) { TEST(GetObjectProperties) {
CcTest::InitializeVM(); CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate(); v8::Isolate* isolate = CcTest::isolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
v8::HandleScope scope(isolate); v8::HandleScope scope(isolate);
LocalContext context; LocalContext context;
// Claim we don't know anything about the heap layout. // Claim we don't know anything about the heap layout.
...@@ -181,8 +180,11 @@ TEST(GetObjectProperties) { ...@@ -181,8 +180,11 @@ TEST(GetObjectProperties) {
: Contains(props->brief, "maybe EmptyFixedArray")); : Contains(props->brief, "maybe EmptyFixedArray"));
// Provide a heap first page so the API can be more sure. // Provide a heap first page so the API can be more sure.
heap_addresses.read_only_space_first_page = reinterpret_cast<uintptr_t>( heap_addresses.read_only_space_first_page =
i_isolate->heap()->read_only_space()->first_page()); reinterpret_cast<uintptr_t>(reinterpret_cast<i::Isolate*>(isolate)
->heap()
->read_only_space()
->first_page());
props = props =
d::GetObjectProperties(properties_or_hash, &ReadMemory, heap_addresses); d::GetObjectProperties(properties_or_hash, &ReadMemory, heap_addresses);
CHECK(props->type_check_result == CHECK(props->type_check_result ==
...@@ -371,25 +373,10 @@ TEST(GetObjectProperties) { ...@@ -371,25 +373,10 @@ TEST(GetObjectProperties) {
ReadProp<i::Tagged_t>(*props, "shared_function_info"), &ReadMemory, ReadProp<i::Tagged_t>(*props, "shared_function_info"), &ReadMemory,
heap_addresses); heap_addresses);
const d::ObjectProperty& flags = FindProp(*props, "flags"); const d::ObjectProperty& flags = FindProp(*props, "flags");
CHECK_GE(flags.num_struct_fields, 3);
CheckStructProp(*flags.struct_fields[0], "v8::internal::FunctionKind", CheckStructProp(*flags.struct_fields[0], "v8::internal::FunctionKind",
"function_kind", 0, 5, 0); "function_kind", 0, 5, 0);
CheckStructProp(*flags.struct_fields[1], "bool", "is_native", 0, 1, 5); CheckStructProp(*flags.struct_fields[1], "bool", "is_native", 0, 1, 5);
CheckStructProp(*flags.struct_fields[2], "bool", "is_strict", 0, 1, 6); CheckStructProp(*flags.struct_fields[2], "bool", "is_strict", 0, 1, 6);
// Get data about a different bitfield struct which is contained within a smi.
Handle<i::JSFunction> function = Handle<i::JSFunction>::cast(o);
Handle<i::SharedFunctionInfo> shared(function->shared(), i_isolate);
Handle<i::DebugInfo> debug_info =
i_isolate->debug()->GetOrCreateDebugInfo(shared);
props =
d::GetObjectProperties(debug_info->ptr(), &ReadMemory, heap_addresses);
const d::ObjectProperty& debug_flags = FindProp(*props, "flags");
CHECK_GE(debug_flags.num_struct_fields, 5);
CheckStructProp(*debug_flags.struct_fields[0], "bool", "has_break_info", 0, 1,
i::kSmiTagSize + i::kSmiShiftSize);
CheckStructProp(*debug_flags.struct_fields[4], "bool", "can_break_at_entry",
0, 1, i::kSmiTagSize + i::kSmiShiftSize + 4);
} }
TEST(ListObjectClasses) { TEST(ListObjectClasses) {
......
...@@ -1058,43 +1058,6 @@ TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Shr) { ...@@ -1058,43 +1058,6 @@ TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Shr) {
} }
} }
// -----------------------------------------------------------------------------
// Word32Equal
TEST_F(MachineOperatorReducerTest,
Word32EqualWithShiftedMaskedValueAndConstant) {
// ((x >> K1) & K2) == K3 => (x & (K2 << K1)) == (K3 << K1)
Node* const p0 = Parameter(0);
TRACED_FOREACH(uint32_t, mask, kUint32Values) {
TRACED_FOREACH(uint32_t, rhs, kUint32Values) {
TRACED_FORRANGE(uint32_t, shift_bits, 1, 31) {
Node* node = graph()->NewNode(
machine()->Word32Equal(),
graph()->NewNode(machine()->Word32And(),
graph()->NewNode(machine()->Word32Shr(), p0,
Uint32Constant(shift_bits)),
Uint32Constant(mask)),
Uint32Constant(rhs));
Reduction r = Reduce(node);
uint32_t new_mask = mask << shift_bits;
uint32_t new_rhs = rhs << shift_bits;
if (new_mask >> shift_bits == mask && new_rhs >> shift_bits == rhs) {
ASSERT_TRUE(r.Changed());
// The left-hand side of the equality is now a Word32And operation,
// unless the mask is zero in which case the newly-created Word32And
// is immediately reduced away.
Matcher<Node*> lhs = mask == 0
? IsInt32Constant(0)
: IsWord32And(p0, IsInt32Constant(new_mask));
EXPECT_THAT(r.replacement(),
IsWord32Equal(lhs, IsInt32Constant(new_rhs)));
} else {
ASSERT_FALSE(r.Changed());
}
}
}
}
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Int32Sub // Int32Sub
......
...@@ -70,7 +70,6 @@ type Code extends HeapObject generates 'TNode<Code>'; ...@@ -70,7 +70,6 @@ type Code extends HeapObject generates 'TNode<Code>';
type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>'; type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>';
type Context extends HeapObject generates 'TNode<Context>'; type Context extends HeapObject generates 'TNode<Context>';
type NativeContext extends Context; type NativeContext extends Context;
type SmiTagged<T : type extends uint31> extends Smi;
struct float64_or_hole { struct float64_or_hole {
is_hole: bool; is_hole: bool;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment