Commit 527f9de1 authored by Seth Brenith's avatar Seth Brenith Committed by Commit Bot

Reland "[torque] Support bitfield structs stored within Smis"

This reverts commit 4dc1fb4e.

Reason for revert: the regression from the original change was likely due to unlucky factors like code alignment.

Original change's description:
> Revert "[torque] Support bitfield structs stored within Smis"
>
> This reverts commit e5e4ea96.
>
> Reason for revert: mysterious performance regression chromium:1052756
>
> Original change's description:
> > [torque] Support bitfield structs stored within Smis
> >
> > This change moves the definition of the bits stored in DebugInfo::flags
> > to Torque, and updates the only Torque usage of that field to use more
> > natural syntax. This is intended as an example of common patterns found
> > in various other classes. Several supporting changes are required:
> >
> > 1. Add a new type representing a bitfield struct stored within a Smi. It
> >    is currently called SmiTagged, but I'm open to suggestions.
> > 2. Add an enum-style output for Torque bitfield structs whose bitfields
> >    occupy only one bit each.
> > 3. Add a new case to MachineOperatorReducer that makes the generated
> >    code for IncBlockCounter match with what was generated before this
> >    change.
> > 4. Add support for reporting these bitfields in the postmortem debugging
> >    API. The format matches existing bitfields but with an offset value
> >    that includes the SMI shift size.
> >
> > Bug: v8:7793
> > Change-Id: Icaecbe4a162da55d2d9a3a35a8ea85b285b2f1b7
> > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2028832
> > Commit-Queue: Seth Brenith <seth.brenith@microsoft.com>
> > Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
> > Reviewed-by: Nico Hartmann <nicohartmann@chromium.org>
> > Cr-Commit-Position: refs/heads/master@{#66182}
>
> Bug: chromium:1052756, v8:7793
> Change-Id: I9e2897efbb6321124bf4952cf09de2f179f7310d
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2062569
> Commit-Queue: Seth Brenith <seth.brenith@microsoft.com>
> Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#66349}

# Not skipping CQ checks because original CL landed > 1 day ago.

Bug: chromium:1052756, v8:7793
Change-Id: I6087928aa14c8551ebd294513bd8d6ffa402a0d4
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2070635Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Commit-Queue: Seth Brenith <seth.brenith@microsoft.com>
Cr-Commit-Position: refs/heads/master@{#66465}
parent a6cea204
......@@ -85,7 +85,7 @@ type uint32 generates 'TNode<Uint32T>' constexpr 'uint32_t';
type int31 extends int32
generates 'TNode<Int32T>' constexpr 'int31_t';
type uint31 extends uint32
generates 'TNode<Uint32T>' constexpr 'uint31_t';
generates 'TNode<Uint32T>' constexpr 'uint32_t';
type int16 extends int31
generates 'TNode<Int16T>' constexpr 'int16_t';
type uint16 extends uint31
......@@ -104,6 +104,9 @@ type bool generates 'TNode<BoolT>' constexpr 'bool';
type bint generates 'TNode<BInt>' constexpr 'BInt';
type string constexpr 'const char*';
// A Smi value containing a bitfield struct as its integer data.
type SmiTagged<T : type extends uint31> extends Smi;
// WARNING: The memory representation (i.e., in class fields and arrays) of
// float64_or_hole is just a float64 that may be the hole-representing
// signalling NaN bit-pattern. So it's memory size is that of float64 and
......@@ -897,6 +900,9 @@ extern macro SmiTag(intptr): Smi;
extern macro SmiFromInt32(int32): Smi;
extern macro SmiFromUint32(uint32): Smi;
extern macro SmiUntag(Smi): intptr;
macro SmiUntag<T: type>(value: SmiTagged<T>): T {
return %RawDownCast<T>(Unsigned(SmiToInt32(Convert<Smi>(value))));
}
extern macro SmiToInt32(Smi): int32;
extern macro RoundIntPtrToFloat64(intptr): float64;
extern macro ChangeFloat32ToFloat64(float32): float64;
......
......@@ -6,16 +6,13 @@
namespace internal_coverage {
const kHasCoverageInfo:
constexpr int31 generates 'DebugInfo::kHasCoverageInfo';
macro GetCoverageInfo(implicit context: Context)(function: JSFunction):
CoverageInfo labels IfNoCoverageInfo {
const shared: SharedFunctionInfo = function.shared_function_info;
const debugInfo = Cast<DebugInfo>(shared.script_or_debug_info)
otherwise goto IfNoCoverageInfo;
if ((debugInfo.flags & kHasCoverageInfo) == 0) goto IfNoCoverageInfo;
if (!SmiUntag(debugInfo.flags).has_coverage_info) goto IfNoCoverageInfo;
return UnsafeCast<CoverageInfo>(debugInfo.coverage_info);
}
......
......@@ -297,6 +297,27 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
// TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true
if (m.left().IsWord32And() && m.right().HasValue()) {
Uint32BinopMatcher mand(m.left().node());
if ((mand.left().IsWord32Shr() || mand.left().IsWord32Sar()) &&
mand.right().HasValue()) {
Uint32BinopMatcher mshift(mand.left().node());
// ((x >> K1) & K2) == K3 => (x & (K2 << K1)) == (K3 << K1)
if (mshift.right().HasValue()) {
auto shift_bits = mshift.right().Value();
auto mask = mand.right().Value();
auto rhs = static_cast<uint32_t>(m.right().Value());
// Make sure that we won't shift data off the end.
if (shift_bits <= base::bits::CountLeadingZeros(mask) &&
shift_bits <= base::bits::CountLeadingZeros(rhs)) {
node->ReplaceInput(
0, Word32And(mshift.left().node(), mask << shift_bits));
node->ReplaceInput(1, Int32Constant(rhs << shift_bits));
return Changed(node);
}
}
}
}
break;
}
case IrOpcode::kWord64Equal: {
......
......@@ -11,6 +11,7 @@
#include "src/objects/fixed-array.h"
#include "src/objects/objects.h"
#include "src/objects/struct.h"
#include "torque-generated/bit-fields-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
......@@ -26,16 +27,7 @@ class BytecodeArray;
class DebugInfo : public TorqueGeneratedDebugInfo<DebugInfo, Struct> {
public:
NEVER_READ_ONLY_SPACE
enum Flag {
kNone = 0,
kHasBreakInfo = 1 << 0,
kPreparedForDebugExecution = 1 << 1,
kHasCoverageInfo = 1 << 2,
kBreakAtEntry = 1 << 3,
kCanBreakAtEntry = 1 << 4,
kDebugExecutionMode = 1 << 5
};
DEFINE_TORQUE_GENERATED_DEBUG_INFO_FLAGS()
using Flags = base::Flags<Flag>;
// A bitfield that lists uses of the current instance.
......
......@@ -18,6 +18,15 @@ extern class BreakPointInfo extends Struct {
break_points: FixedArray|BreakPoint|Undefined;
}
bitfield struct DebugInfoFlags extends uint31 {
has_break_info: bool: 1 bit;
prepared_for_debug_execution: bool: 1 bit;
has_coverage_info: bool: 1 bit;
break_at_entry: bool: 1 bit;
can_break_at_entry: bool: 1 bit;
debug_execution_mode: bool: 1 bit;
}
@generateCppClass
extern class DebugInfo extends Struct {
shared: SharedFunctionInfo;
......@@ -32,7 +41,7 @@ extern class DebugInfo extends Struct {
debug_bytecode_array: Undefined|BytecodeArray;
// Fixed array holding status information for each active break point.
break_points: FixedArray;
flags: Smi;
flags: SmiTagged<DebugInfoFlags>;
coverage_info: CoverageInfo|Undefined;
}
......
......@@ -48,10 +48,18 @@ class ValueTypeFieldIterator {
const auto& field = struct_type->fields()[index_];
return {field.name_and_type, field.pos, *field.offset, 0, 0};
}
const Type* type = type_;
int bitfield_start_offset = 0;
if (const auto type_wrapped_in_smi =
Type::MatchUnaryGeneric(type_, TypeOracle::GetSmiTaggedGeneric())) {
type = *type_wrapped_in_smi;
bitfield_start_offset = kSmiTagSize + kSmiShiftSize;
}
if (const BitFieldStructType* bit_field_struct_type =
BitFieldStructType::DynamicCast(type_)) {
BitFieldStructType::DynamicCast(type)) {
const auto& field = bit_field_struct_type->fields()[index_];
return {field.name_and_type, field.pos, 0, field.num_bits, field.offset};
return {field.name_and_type, field.pos, 0, field.num_bits,
field.offset + bitfield_start_offset};
}
UNREACHABLE();
}
......@@ -83,8 +91,13 @@ class ValueTypeFieldsRange {
if (struct_type && struct_type != TypeOracle::GetFloat64OrHoleType()) {
index = struct_type->fields().size();
}
const Type* type = type_;
if (const auto type_wrapped_in_smi =
Type::MatchUnaryGeneric(type_, TypeOracle::GetSmiTaggedGeneric())) {
type = *type_wrapped_in_smi;
}
if (const BitFieldStructType* bit_field_struct_type =
BitFieldStructType::DynamicCast(type_)) {
BitFieldStructType::DynamicCast(type)) {
index = bit_field_struct_type->fields().size();
}
return {type_, index};
......
......@@ -66,6 +66,7 @@ static const char* const TORQUE_INTERNAL_NAMESPACE_STRING = "torque_internal";
static const char* const REFERENCE_TYPE_STRING = "Reference";
static const char* const SLICE_TYPE_STRING = "Slice";
static const char* const WEAK_TYPE_STRING = "Weak";
static const char* const SMI_TAGGED_TYPE_STRING = "SmiTagged";
static const char* const UNINITIALIZED_ITERATOR_TYPE_STRING =
"UninitializedIterator";
static const char* const GENERIC_TYPE_INSTANTIATION_NAMESPACE_STRING =
......
......@@ -3266,11 +3266,14 @@ void ImplementationVisitor::GenerateBitFields(
NamespaceScope namespaces(header, {"v8", "internal"});
for (const auto& type : TypeOracle::GetBitFieldStructTypes()) {
bool all_single_bits = true; // Track whether every field is one bit.
header << "#define DEFINE_TORQUE_GENERATED_"
<< CapifyStringWithUnderscores(type->name()) << "() \\\n";
std::string type_name = type->GetConstexprGeneratedTypeName();
for (const auto& field : type->fields()) {
const char* suffix = field.num_bits == 1 ? "Bit" : "Bits";
all_single_bits = all_single_bits && field.num_bits == 1;
std::string field_type_name =
field.name_and_type.type->GetConstexprGeneratedTypeName();
header << " using " << CamelifyString(field.name_and_type.name)
......@@ -3279,6 +3282,17 @@ void ImplementationVisitor::GenerateBitFields(
<< ">; \\\n";
}
// If every field is one bit, we can also generate a convenient enum.
if (all_single_bits) {
header << " enum Flag { \\\n";
header << " kNone = 0, \\\n";
for (const auto& field : type->fields()) {
header << " k" << CamelifyString(field.name_and_type.name)
<< " = 1 << " << field.offset << ", \\\n";
}
header << " }; \\\n";
}
header << "\n";
}
}
......
......@@ -95,6 +95,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Declarations::LookupGlobalUniqueGenericType(WEAK_TYPE_STRING);
}
static GenericType* GetSmiTaggedGeneric() {
return Declarations::LookupGlobalUniqueGenericType(SMI_TAGGED_TYPE_STRING);
}
static const Type* GetReferenceType(const Type* referenced_type) {
return GetGenericTypeInstance(GetReferenceGeneric(), {referenced_type});
}
......@@ -229,6 +233,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(UINT32_TYPE_STRING);
}
static const Type* GetUint31Type() {
return Get().GetBuiltinType(UINT31_TYPE_STRING);
}
static const Type* GetInt16Type() {
return Get().GetBuiltinType(INT16_TYPE_STRING);
}
......
......@@ -869,6 +869,7 @@ base::Optional<std::tuple<size_t, std::string>> SizeOf(const Type* type) {
bool IsAnyUnsignedInteger(const Type* type) {
return type == TypeOracle::GetUint32Type() ||
type == TypeOracle::GetUint31Type() ||
type == TypeOracle::GetUint16Type() ||
type == TypeOracle::GetUint8Type() ||
type == TypeOracle::GetUIntPtrType();
......
......@@ -122,6 +122,7 @@ class StringResource : public v8::String::ExternalStringResource {
TEST(GetObjectProperties) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
v8::HandleScope scope(isolate);
LocalContext context;
// Claim we don't know anything about the heap layout.
......@@ -180,11 +181,8 @@ TEST(GetObjectProperties) {
: Contains(props->brief, "maybe EmptyFixedArray"));
// Provide a heap first page so the API can be more sure.
heap_addresses.read_only_space_first_page =
reinterpret_cast<uintptr_t>(reinterpret_cast<i::Isolate*>(isolate)
->heap()
->read_only_space()
->first_page());
heap_addresses.read_only_space_first_page = reinterpret_cast<uintptr_t>(
i_isolate->heap()->read_only_space()->first_page());
props =
d::GetObjectProperties(properties_or_hash, &ReadMemory, heap_addresses);
CHECK(props->type_check_result ==
......@@ -373,10 +371,25 @@ TEST(GetObjectProperties) {
ReadProp<i::Tagged_t>(*props, "shared_function_info"), &ReadMemory,
heap_addresses);
const d::ObjectProperty& flags = FindProp(*props, "flags");
CHECK_GE(flags.num_struct_fields, 3);
CheckStructProp(*flags.struct_fields[0], "v8::internal::FunctionKind",
"function_kind", 0, 5, 0);
CheckStructProp(*flags.struct_fields[1], "bool", "is_native", 0, 1, 5);
CheckStructProp(*flags.struct_fields[2], "bool", "is_strict", 0, 1, 6);
// Get data about a different bitfield struct which is contained within a smi.
Handle<i::JSFunction> function = Handle<i::JSFunction>::cast(o);
Handle<i::SharedFunctionInfo> shared(function->shared(), i_isolate);
Handle<i::DebugInfo> debug_info =
i_isolate->debug()->GetOrCreateDebugInfo(shared);
props =
d::GetObjectProperties(debug_info->ptr(), &ReadMemory, heap_addresses);
const d::ObjectProperty& debug_flags = FindProp(*props, "flags");
CHECK_GE(debug_flags.num_struct_fields, 5);
CheckStructProp(*debug_flags.struct_fields[0], "bool", "has_break_info", 0, 1,
i::kSmiTagSize + i::kSmiShiftSize);
CheckStructProp(*debug_flags.struct_fields[4], "bool", "can_break_at_entry",
0, 1, i::kSmiTagSize + i::kSmiShiftSize + 4);
}
TEST(ListObjectClasses) {
......
......@@ -1058,6 +1058,43 @@ TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Shr) {
}
}
// -----------------------------------------------------------------------------
// Word32Equal
TEST_F(MachineOperatorReducerTest,
Word32EqualWithShiftedMaskedValueAndConstant) {
// ((x >> K1) & K2) == K3 => (x & (K2 << K1)) == (K3 << K1)
Node* const p0 = Parameter(0);
TRACED_FOREACH(uint32_t, mask, kUint32Values) {
TRACED_FOREACH(uint32_t, rhs, kUint32Values) {
TRACED_FORRANGE(uint32_t, shift_bits, 1, 31) {
Node* node = graph()->NewNode(
machine()->Word32Equal(),
graph()->NewNode(machine()->Word32And(),
graph()->NewNode(machine()->Word32Shr(), p0,
Uint32Constant(shift_bits)),
Uint32Constant(mask)),
Uint32Constant(rhs));
Reduction r = Reduce(node);
uint32_t new_mask = mask << shift_bits;
uint32_t new_rhs = rhs << shift_bits;
if (new_mask >> shift_bits == mask && new_rhs >> shift_bits == rhs) {
ASSERT_TRUE(r.Changed());
// The left-hand side of the equality is now a Word32And operation,
// unless the mask is zero in which case the newly-created Word32And
// is immediately reduced away.
Matcher<Node*> lhs = mask == 0
? IsInt32Constant(0)
: IsWord32And(p0, IsInt32Constant(new_mask));
EXPECT_THAT(r.replacement(),
IsWord32Equal(lhs, IsInt32Constant(new_rhs)));
} else {
ASSERT_FALSE(r.Changed());
}
}
}
}
}
// -----------------------------------------------------------------------------
// Int32Sub
......
......@@ -70,6 +70,7 @@ type Code extends HeapObject generates 'TNode<Code>';
type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>';
type Context extends HeapObject generates 'TNode<Context>';
type NativeContext extends Context;
type SmiTagged<T : type extends uint31> extends Smi;
struct float64_or_hole {
is_hole: bool;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment