Commit 84da489d authored by Jakob Kummerow's avatar Jakob Kummerow Committed by V8 LUCI CQ

[wasm-gc] Avoid emitting IR loops for array.new_default

Instead, make the array-allocating builtin initialize the object.
This speeds up later stages of Turbofan graph processing, in particular
live range computation.

Bug: v8:7748
Change-Id: Iba0d682922b444b1d6151eeaee8d939821ebc980
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2983457
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarMaya Lekova <mslekova@chromium.org>
Reviewed-by: 's avatarManos Koukoutos <manoskouk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75367}
parent 9f747b5f
......@@ -332,6 +332,13 @@ constexpr 'CodeStubAssembler::ExtractFixedArrayFlag' {
...
}
extern enum InitializationMode
constexpr 'CodeStubAssembler::InitializationMode' {
kUninitialized,
kInitializeToZero,
kInitializeToNull
}
const kBigIntMaxLength: constexpr intptr generates 'BigInt::kMaxLength';
extern enum MessageTemplate {
......
......@@ -42,6 +42,8 @@ extern runtime WasmArrayCopy(
namespace unsafe {
extern macro TimesTaggedSize(intptr): intptr;
extern macro Allocate(intptr, constexpr AllocationFlag): HeapObject;
extern macro AllocateWasmArray(
intptr, constexpr InitializationMode): HeapObject;
}
namespace wasm {
......@@ -311,16 +313,17 @@ builtin WasmAllocateStructWithRtt(rtt: Map): HeapObject {
return result;
}
builtin WasmAllocateArrayWithRtt(
rtt: Map, length: uint32, elementSize: uint32): HeapObject {
macro WasmAllocateArray(
rtt: Map, length: uint32, elementSize: uint32,
initializationMode: constexpr InitializationMode): HeapObject {
// instanceSize = RoundUp(elementSize * length, kObjectAlignment)
// + WasmArray::kHeaderSize
const instanceSize: intptr =
torque_internal::AlignTagged(
Convert<intptr>(length) * Convert<intptr>(elementSize)) +
Convert<intptr>(kWasmArrayHeaderSize);
const result: HeapObject = unsafe::Allocate(
instanceSize, AllocationFlag::kAllowLargeObjectAllocation);
const result: HeapObject =
unsafe::AllocateWasmArray(instanceSize, initializationMode);
*UnsafeConstCast(&result.map) = rtt;
// TODO(ishell): consider removing properties_or_hash field from WasmObjects.
%RawDownCast<WasmArray>(result).properties_or_hash = kEmptyFixedArray;
......@@ -328,6 +331,24 @@ builtin WasmAllocateArrayWithRtt(
return result;
}
builtin WasmAllocateArray_Uninitialized(
rtt: Map, length: uint32, elementSize: uint32): HeapObject {
return WasmAllocateArray(
rtt, length, elementSize, InitializationMode::kUninitialized);
}
builtin WasmAllocateArray_InitZero(
rtt: Map, length: uint32, elementSize: uint32): HeapObject {
return WasmAllocateArray(
rtt, length, elementSize, InitializationMode::kInitializeToZero);
}
builtin WasmAllocateArray_InitNull(
rtt: Map, length: uint32, elementSize: uint32): HeapObject {
return WasmAllocateArray(
rtt, length, elementSize, InitializationMode::kInitializeToNull);
}
// We put all uint32 parameters at the beginning so that they are assigned to
// registers.
builtin WasmArrayCopyWithChecks(
......
......@@ -1449,6 +1449,34 @@ TNode<BoolT> CodeStubAssembler::IsRegularHeapObjectSize(TNode<IntPtrT> size) {
IntPtrConstant(kMaxRegularHeapObjectSize));
}
#if V8_ENABLE_WEBASSEMBLY
TNode<HeapObject> CodeStubAssembler::AllocateWasmArray(
TNode<IntPtrT> size_in_bytes, int initialization) {
TNode<HeapObject> array =
Allocate(size_in_bytes, AllocationFlag::kAllowLargeObjectAllocation);
if (initialization == kUninitialized) return array;
TNode<IntPtrT> array_address = BitcastTaggedToWord(array);
TNode<IntPtrT> start = IntPtrAdd(
array_address, IntPtrConstant(WasmArray::kHeaderSize - kHeapObjectTag));
TNode<IntPtrT> limit = IntPtrAdd(
array_address, IntPtrSub(size_in_bytes, IntPtrConstant(kHeapObjectTag)));
TNode<Object> value;
if (initialization == kInitializeToZero) {
// A pointer-sized zero pattern is just what we need for numeric Wasm
// arrays (their object size is rounded up to a multiple of kPointerSize).
value = SmiConstant(0);
} else if (initialization == kInitializeToNull) {
value = NullConstant();
} else {
UNREACHABLE();
}
StoreFieldsNoWriteBarrier(start, limit, value);
return array;
}
#endif // V8_ENABLE_WEBASSEMBLY
void CodeStubAssembler::BranchIfToBooleanIsTrue(TNode<Object> value,
Label* if_true,
Label* if_false) {
......
......@@ -336,6 +336,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return Signed(value);
}
enum InitializationMode {
kUninitialized,
kInitializeToZero,
kInitializeToNull
};
TNode<Smi> ParameterToTagged(TNode<Smi> value) { return value; }
TNode<Smi> ParameterToTagged(TNode<IntPtrT> value) { return SmiTag(value); }
......@@ -1985,6 +1991,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<PropertyArray> AllocatePropertyArray(TNode<IntPtrT> capacity);
TNode<HeapObject> AllocateWasmArray(TNode<IntPtrT> size_in_bytes,
int initialization);
// TODO(v8:9722): Return type should be JSIteratorResult
TNode<JSObject> AllocateJSIteratorResult(TNode<Context> context,
TNode<Object> value,
......
......@@ -601,7 +601,9 @@ bool NodeProperties::IsFreshObject(Node* node) {
// Note: Make sure to only add builtins which are guaranteed to return a
// fresh object. E.g. kWasmAllocateFixedArray may return the canonical
// empty array, and kWasmAllocateRtt may return a cached rtt.
return callee == Builtin::kWasmAllocateArrayWithRtt ||
return callee == Builtin::kWasmAllocateArray_Uninitialized ||
callee == Builtin::kWasmAllocateArray_InitNull ||
callee == Builtin::kWasmAllocateArray_InitZero ||
callee == Builtin::kWasmAllocateStructWithRtt ||
callee == Builtin::kWasmAllocateObjectWrapper ||
callee == Builtin::kWasmAllocatePair;
......
......@@ -5575,6 +5575,18 @@ Node* WasmGraphBuilder::StructNewWithRtt(uint32_t struct_index,
return s;
}
Builtin ChooseArrayAllocationBuiltin(wasm::ValueType element_type,
Node* initial_value) {
if (initial_value != nullptr) {
// {initial_value} will be used for initialization after allocation.
return Builtin::kWasmAllocateArray_Uninitialized;
}
if (element_type.is_reference()) {
return Builtin::kWasmAllocateArray_InitNull;
}
return Builtin::kWasmAllocateArray_InitZero;
}
Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
const wasm::ArrayType* type,
Node* length, Node* initial_value,
......@@ -5585,30 +5597,33 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
length, gasm_->Uint32Constant(wasm::kV8MaxWasmArrayLength)),
position);
wasm::ValueType element_type = type->element_type();
Node* a = gasm_->CallBuiltin(
Builtin::kWasmAllocateArrayWithRtt, Operator::kEliminatable, rtt, length,
Int32Constant(element_type.element_size_bytes()));
auto loop = gasm_->MakeLoopLabel(MachineRepresentation::kWord32);
auto done = gasm_->MakeLabel();
Node* start_offset =
Int32Constant(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize));
Node* element_size = Int32Constant(element_type.element_size_bytes());
Node* end_offset =
gasm_->Int32Add(start_offset, gasm_->Int32Mul(element_size, length));
// Loops need the graph's end to have been set up.
gasm_->EnsureEnd();
gasm_->Goto(&loop, start_offset);
gasm_->Bind(&loop);
{
Node* offset = loop.PhiAt(0);
Node* check = gasm_->Uint32LessThan(offset, end_offset);
gasm_->GotoIfNot(check, &done);
gasm_->StoreToObject(ObjectAccessForGCStores(type->element_type()), a,
offset, initial_value);
offset = gasm_->Int32Add(offset, element_size);
gasm_->Goto(&loop, offset);
Builtin stub = ChooseArrayAllocationBuiltin(element_type, initial_value);
Node* a =
gasm_->CallBuiltin(stub, Operator::kEliminatable, rtt, length,
Int32Constant(element_type.element_size_bytes()));
if (initial_value != nullptr) {
auto loop = gasm_->MakeLoopLabel(MachineRepresentation::kWord32);
auto done = gasm_->MakeLabel();
Node* start_offset =
Int32Constant(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize));
Node* element_size = Int32Constant(element_type.element_size_bytes());
Node* end_offset =
gasm_->Int32Add(start_offset, gasm_->Int32Mul(element_size, length));
// Loops need the graph's end to have been set up.
gasm_->EnsureEnd();
gasm_->Goto(&loop, start_offset);
gasm_->Bind(&loop);
{
Node* offset = loop.PhiAt(0);
Node* check = gasm_->Uint32LessThan(offset, end_offset);
gasm_->GotoIfNot(check, &done);
gasm_->StoreToObject(ObjectAccessForGCStores(type->element_type()), a,
offset, initial_value);
offset = gasm_->Int32Add(offset, element_size);
gasm_->Goto(&loop, offset);
}
gasm_->Bind(&done);
}
gasm_->Bind(&done);
return a;
}
......
......@@ -5026,46 +5026,49 @@ class LiftoffCompiler {
__ LoadConstant(elem_size_reg, WasmValue(elem_size));
LiftoffAssembler::VarState elem_size_var(kI32, elem_size_reg, 0);
CallRuntimeStub(WasmCode::kWasmAllocateArrayWithRtt,
MakeSig::Returns(kRef).Params(rtt_kind, kI32, kI32),
{rtt_var, length_var, elem_size_var},
decoder->position());
WasmCode::RuntimeStubId stub_id =
initial_value_on_stack
? WasmCode::kWasmAllocateArray_Uninitialized
: is_reference(elem_kind) ? WasmCode::kWasmAllocateArray_InitNull
: WasmCode::kWasmAllocateArray_InitZero;
CallRuntimeStub(
stub_id, MakeSig::Returns(kRef).Params(rtt_kind, kI32, kI32),
{rtt_var, length_var, elem_size_var}, decoder->position());
// Drop the RTT.
__ cache_state()->stack_state.pop_back(1);
}
LiftoffRegister obj(kReturnRegister0);
LiftoffRegList pinned = LiftoffRegList::ForRegs(obj);
LiftoffRegister length = pinned.set(__ PopToModifiableRegister(pinned));
LiftoffRegister value = initial_value_on_stack
? pinned.set(__ PopToRegister(pinned))
: pinned.set(__ GetUnusedRegister(
reg_class_for(elem_kind), pinned));
if (!initial_value_on_stack) {
if (initial_value_on_stack) {
LiftoffRegList pinned = LiftoffRegList::ForRegs(obj);
LiftoffRegister length = pinned.set(__ PopToModifiableRegister(pinned));
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
// Initialize the array's elements.
LiftoffRegister offset = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(
offset,
WasmValue(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize)));
LiftoffRegister end_offset = length;
if (element_size_log2(elem_kind) != 0) {
__ emit_i32_shli(end_offset.gp(), length.gp(),
element_size_log2(elem_kind));
}
__ emit_i32_add(end_offset.gp(), end_offset.gp(), offset.gp());
Label loop, done;
__ bind(&loop);
__ emit_cond_jump(kUnsignedGreaterEqual, &done, kI32, offset.gp(),
end_offset.gp());
StoreObjectField(obj.gp(), offset.gp(), 0, value, pinned, elem_kind);
__ emit_i32_addi(offset.gp(), offset.gp(), elem_size);
__ emit_jump(&loop);
__ bind(&done);
} else {
if (!CheckSupportedType(decoder, elem_kind, "default value")) return;
SetDefaultValue(value, elem_kind, pinned);
// Drop the length.
__ cache_state()->stack_state.pop_back(1);
}
// Initialize the array's elements.
LiftoffRegister offset = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(
offset,
WasmValue(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize)));
LiftoffRegister end_offset = length;
if (element_size_log2(elem_kind) != 0) {
__ emit_i32_shli(end_offset.gp(), length.gp(),
element_size_log2(elem_kind));
}
__ emit_i32_add(end_offset.gp(), end_offset.gp(), offset.gp());
Label loop, done;
__ bind(&loop);
__ emit_cond_jump(kUnsignedGreaterEqual, &done, kI32, offset.gp(),
end_offset.gp());
StoreObjectField(obj.gp(), offset.gp(), 0, value, pinned, elem_kind);
__ emit_i32_addi(offset.gp(), offset.gp(), elem_size);
__ emit_jump(&loop);
__ bind(&done);
__ PushRegister(kRef, obj);
}
......
......@@ -945,7 +945,9 @@ class WasmGraphBuildingInterface {
void ArrayNewDefault(FullDecoder* decoder,
const ArrayIndexImmediate<validate>& imm,
const Value& length, const Value& rtt, Value* result) {
TFNode* initial_value = DefaultValue(imm.array_type->element_type());
// This will cause the default value to be chosen automatically based
// on the element type.
TFNode* initial_value = nullptr;
result->node =
builder_->ArrayNewWithRtt(imm.index, imm.array_type, length.node,
initial_value, rtt.node, decoder->position());
......
......@@ -106,7 +106,9 @@ struct WasmModule;
IF_TSAN(V, TSANRelaxedLoad32SaveFP) \
IF_TSAN(V, TSANRelaxedLoad64IgnoreFP) \
IF_TSAN(V, TSANRelaxedLoad64SaveFP) \
V(WasmAllocateArrayWithRtt) \
V(WasmAllocateArray_Uninitialized) \
V(WasmAllocateArray_InitNull) \
V(WasmAllocateArray_InitZero) \
V(WasmArrayCopy) \
V(WasmArrayCopyWithChecks) \
V(WasmAllocateRtt) \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment