Commit da900ffe authored by Seth Brenith's avatar Seth Brenith Committed by Commit Bot

Complete Torque definition of JSArrayBuffer

Previously, our Torque definition of JSArrayBuffer included only the
first two fields. This allowed access to those two fields, but was
somewhat confusing and obviously didn't let Torque code access the
other fields. This change:

- Completes the JSArrayBuffer layout definition;
- Moves the associated bitfield struct definition to Torque;
- Moves a couple of JSArrayBuffer macros to Torque;
- Adds a reducer case so that the code generated using these new macros
  is not worse than what was generated previously.

Change-Id: Ib19c3ba789a33801fa9d0d064cd21d62a1e03e30
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2053769
Commit-Queue: Seth Brenith <seth.brenith@microsoft.com>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66503}
parent fb792f31
...@@ -378,13 +378,6 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind( ...@@ -378,13 +378,6 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
BIND(&next); BIND(&next);
} }
TNode<BoolT> TypedArrayBuiltinsAssembler::IsSharedArrayBuffer(
TNode<JSArrayBuffer> buffer) {
TNode<Uint32T> bitfield =
LoadObjectField<Uint32T>(buffer, JSArrayBuffer::kBitFieldOffset);
return IsSetWord32<JSArrayBuffer::IsSharedBit>(bitfield);
}
void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr( void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
TNode<JSTypedArray> holder, TNode<ByteArray> base, TNode<UintPtrT> offset) { TNode<JSTypedArray> holder, TNode<ByteArray> base, TNode<UintPtrT> offset) {
offset = UintPtrAdd(UintPtrConstant(ByteArray::kHeaderSize - kHeapObjectTag), offset = UintPtrAdd(UintPtrConstant(ByteArray::kHeaderSize - kHeapObjectTag),
......
...@@ -85,8 +85,6 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler { ...@@ -85,8 +85,6 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
void DispatchTypedArrayByElementsKind( void DispatchTypedArrayByElementsKind(
TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function); TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function);
TNode<BoolT> IsSharedArrayBuffer(TNode<JSArrayBuffer> buffer);
void SetJSTypedArrayOnHeapDataPtr(TNode<JSTypedArray> holder, void SetJSTypedArrayOnHeapDataPtr(TNode<JSTypedArray> holder,
TNode<ByteArray> base, TNode<ByteArray> base,
TNode<UintPtrT> offset); TNode<UintPtrT> offset);
......
...@@ -20,7 +20,6 @@ extern macro IsContext(HeapObject): bool; ...@@ -20,7 +20,6 @@ extern macro IsContext(HeapObject): bool;
extern macro IsNativeContext(HeapObject): bool; extern macro IsNativeContext(HeapObject): bool;
extern macro IsJSReceiver(HeapObject): bool; extern macro IsJSReceiver(HeapObject): bool;
extern macro TaggedIsCallable(Object): bool; extern macro TaggedIsCallable(Object): bool;
extern macro IsDetachedBuffer(JSArrayBuffer): bool;
extern macro IsHeapNumber(HeapObject): bool; extern macro IsHeapNumber(HeapObject): bool;
extern macro IsBigInt(HeapObject): bool; extern macro IsBigInt(HeapObject): bool;
extern macro IsFixedArray(HeapObject): bool; extern macro IsFixedArray(HeapObject): bool;
......
...@@ -13,8 +13,6 @@ namespace typed_array { ...@@ -13,8 +13,6 @@ namespace typed_array {
extern macro CodeStubAssembler::AllocateByteArray(uintptr): ByteArray; extern macro CodeStubAssembler::AllocateByteArray(uintptr): ByteArray;
extern macro TypedArrayBuiltinsAssembler::GetDefaultConstructor( extern macro TypedArrayBuiltinsAssembler::GetDefaultConstructor(
implicit context: Context)(JSTypedArray): JSFunction; implicit context: Context)(JSTypedArray): JSFunction;
extern macro TypedArrayBuiltinsAssembler::IsSharedArrayBuffer(JSArrayBuffer):
bool;
extern macro TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields( extern macro TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields(
JSTypedArray): void; JSTypedArray): void;
......
...@@ -12384,11 +12384,6 @@ TNode<JSReceiver> CodeStubAssembler::ArraySpeciesCreate(TNode<Context> context, ...@@ -12384,11 +12384,6 @@ TNode<JSReceiver> CodeStubAssembler::ArraySpeciesCreate(TNode<Context> context,
return Construct(context, constructor, len); return Construct(context, constructor, len);
} }
TNode<BoolT> CodeStubAssembler::IsDetachedBuffer(TNode<JSArrayBuffer> buffer) {
TNode<Uint32T> buffer_bit_field = LoadJSArrayBufferBitField(buffer);
return IsSetWord32<JSArrayBuffer::WasDetachedBit>(buffer_bit_field);
}
void CodeStubAssembler::ThrowIfArrayBufferIsDetached( void CodeStubAssembler::ThrowIfArrayBufferIsDetached(
SloppyTNode<Context> context, TNode<JSArrayBuffer> array_buffer, SloppyTNode<Context> context, TNode<JSArrayBuffer> array_buffer,
const char* method_name) { const char* method_name) {
......
...@@ -3555,7 +3555,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler ...@@ -3555,7 +3555,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Uint32T> LoadJSArrayBufferBitField(TNode<JSArrayBuffer> array_buffer); TNode<Uint32T> LoadJSArrayBufferBitField(TNode<JSArrayBuffer> array_buffer);
TNode<RawPtrT> LoadJSArrayBufferBackingStore( TNode<RawPtrT> LoadJSArrayBufferBackingStore(
TNode<JSArrayBuffer> array_buffer); TNode<JSArrayBuffer> array_buffer);
TNode<BoolT> IsDetachedBuffer(TNode<JSArrayBuffer> buffer);
void ThrowIfArrayBufferIsDetached(SloppyTNode<Context> context, void ThrowIfArrayBufferIsDetached(SloppyTNode<Context> context,
TNode<JSArrayBuffer> array_buffer, TNode<JSArrayBuffer> array_buffer,
const char* method_name); const char* method_name);
......
...@@ -853,6 +853,12 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { ...@@ -853,6 +853,12 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
} }
break; break;
} }
case IrOpcode::kBranch:
case IrOpcode::kDeoptimizeIf:
case IrOpcode::kDeoptimizeUnless:
case IrOpcode::kTrapIf:
case IrOpcode::kTrapUnless:
return ReduceConditional(node);
default: default:
break; break;
} }
...@@ -1680,6 +1686,37 @@ Reduction MachineOperatorReducer::ReduceFloat64RoundDown(Node* node) { ...@@ -1680,6 +1686,37 @@ Reduction MachineOperatorReducer::ReduceFloat64RoundDown(Node* node) {
return NoChange(); return NoChange();
} }
Reduction MachineOperatorReducer::ReduceConditional(Node* node) {
DCHECK(node->opcode() == IrOpcode::kBranch ||
node->opcode() == IrOpcode::kDeoptimizeIf ||
node->opcode() == IrOpcode::kDeoptimizeUnless ||
node->opcode() == IrOpcode::kTrapIf ||
node->opcode() == IrOpcode::kTrapUnless);
// This reducer only applies operator reductions to the branch condition.
// Reductions involving control flow happen elsewhere. Non-zero inputs are
// considered true in all conditional ops.
NodeMatcher condition(NodeProperties::GetValueInput(node, 0));
if (condition.IsWord32And()) {
Uint32BinopMatcher mand(condition.node());
if ((mand.left().IsWord32Shr() || mand.left().IsWord32Sar()) &&
mand.right().HasValue()) {
Uint32BinopMatcher mshift(mand.left().node());
// Branch condition (x >> K1) & K2 => x & (K2 << K1)
if (mshift.right().HasValue()) {
auto shift_bits = mshift.right().Value();
auto mask = mand.right().Value();
// Make sure that we won't shift data off the end.
if (shift_bits <= base::bits::CountLeadingZeros(mask)) {
NodeProperties::ReplaceValueInput(
node, Word32And(mshift.left().node(), mask << shift_bits), 0);
return Changed(node);
}
}
}
}
return NoChange();
}
CommonOperatorBuilder* MachineOperatorReducer::common() const { CommonOperatorBuilder* MachineOperatorReducer::common() const {
return mcgraph()->common(); return mcgraph()->common();
} }
......
...@@ -109,6 +109,7 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final ...@@ -109,6 +109,7 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
Reduction ReduceFloat64InsertHighWord32(Node* node); Reduction ReduceFloat64InsertHighWord32(Node* node);
Reduction ReduceFloat64Compare(Node* node); Reduction ReduceFloat64Compare(Node* node);
Reduction ReduceFloat64RoundDown(Node* node); Reduction ReduceFloat64RoundDown(Node* node);
Reduction ReduceConditional(Node* node);
Graph* graph() const; Graph* graph() const;
MachineGraph* mcgraph() const { return mcgraph_; } MachineGraph* mcgraph() const { return mcgraph_; }
......
...@@ -5,9 +5,9 @@ ...@@ -5,9 +5,9 @@
#ifndef V8_OBJECTS_JS_ARRAY_BUFFER_H_ #ifndef V8_OBJECTS_JS_ARRAY_BUFFER_H_
#define V8_OBJECTS_JS_ARRAY_BUFFER_H_ #define V8_OBJECTS_JS_ARRAY_BUFFER_H_
#include "src/base/bit-field.h"
#include "src/objects/backing-store.h" #include "src/objects/backing-store.h"
#include "src/objects/js-objects.h" #include "src/objects/js-objects.h"
#include "torque-generated/bit-fields-tq.h"
// Has to be the last include (doesn't have include guards): // Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h" #include "src/objects/object-macros.h"
...@@ -51,14 +51,7 @@ class JSArrayBuffer : public JSObject { ...@@ -51,14 +51,7 @@ class JSArrayBuffer : public JSObject {
V8_INLINE void clear_padding(); V8_INLINE void clear_padding();
// Bit positions for [bit_field]. // Bit positions for [bit_field].
#define JS_ARRAY_BUFFER_BIT_FIELD_FIELDS(V, _) \ DEFINE_TORQUE_GENERATED_JS_ARRAY_BUFFER_FLAGS()
V(IsExternalBit, bool, 1, _) \
V(IsDetachableBit, bool, 1, _) \
V(WasDetachedBit, bool, 1, _) \
V(IsAsmJsMemoryBit, bool, 1, _) \
V(IsSharedBit, bool, 1, _)
DEFINE_BIT_FIELDS(JS_ARRAY_BUFFER_BIT_FIELD_FIELDS)
#undef JS_ARRAY_BUFFER_BIT_FIELD_FIELDS
// [is_external]: true indicates that the embedder is in charge of freeing the // [is_external]: true indicates that the embedder is in charge of freeing the
// backing_store, while is_external == false means that v8 will free the // backing_store, while is_external == false means that v8 will free the
...@@ -123,22 +116,10 @@ class JSArrayBuffer : public JSObject { ...@@ -123,22 +116,10 @@ class JSArrayBuffer : public JSObject {
DECL_PRINTER(JSArrayBuffer) DECL_PRINTER(JSArrayBuffer)
DECL_VERIFIER(JSArrayBuffer) DECL_VERIFIER(JSArrayBuffer)
// Layout description. // Layout description.
#define JS_ARRAY_BUFFER_FIELDS(V) \ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
V(kEndOfTaggedFieldsOffset, 0) \ TORQUE_GENERATED_JS_ARRAY_BUFFER_FIELDS)
/* Raw data fields. */ \ static constexpr int kEndOfTaggedFieldsOffset = JSObject::kHeaderSize;
V(kByteLengthOffset, kUIntptrSize) \
V(kBackingStoreOffset, kSystemPointerSize) \
V(kExtensionOffset, \
(V8_ARRAY_BUFFER_EXTENSION_BOOL ? kSystemPointerSize : 0)) \
V(kBitFieldOffset, kInt32Size) \
/* Pads header size to be a multiple of kTaggedSize. */ \
V(kOptionalPaddingOffset, OBJECT_POINTER_PADDING(kOptionalPaddingOffset)) \
/* Header size. */ \
V(kHeaderSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_ARRAY_BUFFER_FIELDS)
#undef JS_ARRAY_BUFFER_FIELDS
static const int kSizeWithEmbedderFields = static const int kSizeWithEmbedderFields =
kHeaderSize + kHeaderSize +
......
...@@ -2,9 +2,32 @@ ...@@ -2,9 +2,32 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
bitfield struct JSArrayBufferFlags extends uint32 {
is_external: bool: 1 bit;
is_detachable: bool: 1 bit;
was_detached: bool: 1 bit;
is_asm_js_memory: bool: 1 bit;
is_shared: bool: 1 bit;
}
extern class JSArrayBuffer extends JSObject { extern class JSArrayBuffer extends JSObject {
byte_length: uintptr; byte_length: uintptr;
backing_store: RawPtr; backing_store: RawPtr;
@if(V8_ARRAY_BUFFER_EXTENSION_BOOL) extension: RawPtr;
@ifnot(V8_ARRAY_BUFFER_EXTENSION_BOOL) extension: void;
bit_field: JSArrayBufferFlags;
// Pads header size to be a multiple of kTaggedSize.
@if(TAGGED_SIZE_8_BYTES) optional_padding: uint32;
@ifnot(TAGGED_SIZE_8_BYTES) optional_padding: void;
}
@export
macro IsDetachedBuffer(buffer: JSArrayBuffer): bool {
return buffer.bit_field.was_detached;
}
macro IsSharedArrayBuffer(buffer: JSArrayBuffer): bool {
return buffer.bit_field.is_shared;
} }
@abstract @abstract
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include <stdexcept> #include <stdexcept>
#include <unordered_map> #include <unordered_map>
#include "src/common/globals.h" #include "src/flags/flags.h"
#include "src/torque/constants.h" #include "src/torque/constants.h"
#include "src/torque/declarations.h" #include "src/torque/declarations.h"
#include "src/torque/earley-parser.h" #include "src/torque/earley-parser.h"
...@@ -41,6 +41,8 @@ class BuildFlags : public ContextualClass<BuildFlags> { ...@@ -41,6 +41,8 @@ class BuildFlags : public ContextualClass<BuildFlags> {
build_flags_["V8_SFI_HAS_UNIQUE_ID"] = V8_SFI_HAS_UNIQUE_ID; build_flags_["V8_SFI_HAS_UNIQUE_ID"] = V8_SFI_HAS_UNIQUE_ID;
build_flags_["TAGGED_SIZE_8_BYTES"] = TAGGED_SIZE_8_BYTES; build_flags_["TAGGED_SIZE_8_BYTES"] = TAGGED_SIZE_8_BYTES;
build_flags_["V8_DOUBLE_FIELDS_UNBOXING"] = V8_DOUBLE_FIELDS_UNBOXING; build_flags_["V8_DOUBLE_FIELDS_UNBOXING"] = V8_DOUBLE_FIELDS_UNBOXING;
build_flags_["V8_ARRAY_BUFFER_EXTENSION_BOOL"] =
V8_ARRAY_BUFFER_EXTENSION_BOOL;
build_flags_["TRUE_FOR_TESTING"] = true; build_flags_["TRUE_FOR_TESTING"] = true;
build_flags_["FALSE_FOR_TESTING"] = false; build_flags_["FALSE_FOR_TESTING"] = false;
} }
......
...@@ -1096,6 +1096,39 @@ TEST_F(MachineOperatorReducerTest, ...@@ -1096,6 +1096,39 @@ TEST_F(MachineOperatorReducerTest,
} }
} }
// -----------------------------------------------------------------------------
// Branch
TEST_F(MachineOperatorReducerTest, BranchWithShiftedMaskedValue) {
// Branch condition (x >> K1) & K2 => x & (K2 << K1)
Node* const p0 = Parameter(0);
TRACED_FOREACH(uint32_t, mask, kUint32Values) {
TRACED_FORRANGE(uint32_t, shift_bits, 1, 31) {
Node* node = graph()->NewNode(
common()->Branch(),
graph()->NewNode(machine()->Word32And(),
graph()->NewNode(machine()->Word32Shr(), p0,
Uint32Constant(shift_bits)),
Uint32Constant(mask)),
graph()->start());
Reduction r = Reduce(node);
uint32_t new_mask = mask << shift_bits;
if (new_mask >> shift_bits == mask) {
ASSERT_TRUE(r.Changed());
// The branch condition is now a Word32And operation, unless the mask is
// zero in which case the newly-created Word32And is immediately reduced
// away.
Matcher<Node*> lhs = mask == 0
? IsInt32Constant(0)
: IsWord32And(p0, IsInt32Constant(new_mask));
EXPECT_THAT(r.replacement(), IsBranch(lhs, graph()->start()));
} else {
ASSERT_FALSE(r.Changed());
}
}
}
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Int32Sub // Int32Sub
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment