Commit 7c4b9302 authored by Andreas Haas's avatar Andreas Haas Committed by Commit Bot

Reland "[wasm][liftoff] Emit safepoints for externref values on the stack"

The emitted safepoint entries had the wrong size, because it did not
contain StandardFrameConstants::kFixedFrameSizeAboveFp. The code still
worked because the indices of encoded in the entries where too low by
StandardFrameConstants::kFixedFrameSizeAboveFp and thereby corrected
the invalid size. It worked as follows:

First the stack_slots_size gets calculated from the safepoint entry.
Then the position of a stack slot was
"frame_header_base + stack_slots_size - index * pointer_size", where
"index" is what is encoded in the safepoint map. Because of the incorrect
encoding, both stack_slot_size and index were too low by
StandardFrameConstants::kFixedFrameSizeAboveFp. Therefore the errors in
both values eliminated each other, making the end result correct.

With --print-code, the safepoint entry size was also read, and it
crashed because the encoded value was too low.

The reland fixes the indices.

Original message:

With this CL we emit safepoint maps for externref values on the Liftoff
value stack. With that there is support for externref parameters and
locals in Liftoff, as well as for intermediate values of type
externref.

R=thibaudm@chromium.org

Bug: v8:7581
Change-Id: I88444e57745d7b9fe8f1630e904d49736fa9d720
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2398531
Commit-Queue: Andreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarThibaud Michaud <thibaudm@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69786}
parent 81231c23
......@@ -765,6 +765,8 @@ DEFINE_INT(trace_wasm_ast_start, 0,
DEFINE_INT(trace_wasm_ast_end, 0, "end function for wasm AST trace (exclusive)")
DEFINE_BOOL(liftoff, true,
"enable Liftoff, the baseline compiler for WebAssembly")
DEFINE_BOOL(liftoff_extern_ref, false,
"enable support for externref in Liftoff")
// We can't tier up (from Liftoff to TurboFan) in single-threaded mode, hence
// disable Liftoff in that configuration for now. The alternative is disabling
// TurboFan, which would reduce peak performance considerably.
......
......@@ -272,6 +272,8 @@ inline void Store(LiftoffAssembler* assm, LiftoffRegister src, MemOperand dst,
#endif
switch (type.kind()) {
case ValueType::kI32:
case ValueType::kOptRef:
case ValueType::kRef:
assm->str(src.gp(), dst);
break;
case ValueType::kI64:
......@@ -303,6 +305,8 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
ValueType type) {
switch (type.kind()) {
case ValueType::kI32:
case ValueType::kOptRef:
case ValueType::kRef:
assm->ldr(dst.gp(), src);
break;
case ValueType::kI64:
......@@ -497,13 +501,7 @@ int LiftoffAssembler::SlotSizeForType(ValueType type) {
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
switch (type.kind()) {
case ValueType::kS128:
return true;
default:
// No alignment because all other types are kStackSlotSize.
return false;
}
return (type.kind() == ValueType::kS128 || type.is_reference_type());
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
......@@ -1283,7 +1281,7 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
DCHECK_NE(dst, src);
DCHECK_EQ(type, kWasmI32);
DCHECK(type == kWasmI32 || type.is_reference_type());
TurboAssembler::Move(dst, src);
}
......
......@@ -50,6 +50,8 @@ inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueType type) {
case ValueType::kI32:
return reg.gp().W();
case ValueType::kI64:
case ValueType::kRef:
case ValueType::kOptRef:
return reg.gp().X();
case ValueType::kF32:
return reg.fp().S();
......@@ -276,13 +278,7 @@ int LiftoffAssembler::SlotSizeForType(ValueType type) {
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
switch (type.kind()) {
case ValueType::kS128:
return true;
default:
// No alignment because all other types are kStackSlotSize.
return false;
}
return type.kind() == ValueType::kS128 || type.is_reference_type();
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
......@@ -735,7 +731,7 @@ void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
if (type == kWasmI32) {
Mov(dst.W(), src.W());
} else {
DCHECK_EQ(kWasmI64, type);
DCHECK(kWasmI64 == type || type.is_reference_type());
Mov(dst.X(), src.X());
}
}
......
......@@ -42,6 +42,8 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
Operand src(base, offset);
switch (type.kind()) {
case ValueType::kI32:
case ValueType::kOptRef:
case ValueType::kRef:
assm->mov(dst.gp(), src);
break;
case ValueType::kI64:
......@@ -229,7 +231,9 @@ int LiftoffAssembler::SlotSizeForType(ValueType type) {
return type.element_size_bytes();
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) { return false; }
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
return type.is_reference_type();
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
......@@ -1028,7 +1032,7 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
DCHECK_NE(dst, src);
DCHECK_EQ(kWasmI32, type);
DCHECK(kWasmI32 == type || type.is_reference_type());
mov(dst, src);
}
......@@ -1050,6 +1054,8 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
Operand dst = liftoff::GetStackSlot(offset);
switch (type.kind()) {
case ValueType::kI32:
case ValueType::kOptRef:
case ValueType::kRef:
mov(dst, reg.gp());
break;
case ValueType::kI64:
......
......@@ -488,6 +488,35 @@ void LiftoffAssembler::CacheState::Split(const CacheState& source) {
*this = source;
}
void LiftoffAssembler::CacheState::DefineSafepoint(Safepoint& safepoint) {
for (auto slot : stack_state) {
DCHECK(!slot.is_reg());
if (slot.type().is_reference_type()) {
// index = 0 is for the stack slot at 'fp + kFixedFrameSizeAboveFp -
// kSystemPointerSize', the location of the current stack slot is 'fp -
// slot.offset()'. The index we need is therefore '(fp +
// kFixedFrameSizeAboveFp - kSystemPointerSize) - (fp - slot.offset())' =
// 'slot.offset() + kFixedFrameSizeAboveFp - kSystemPointerSize'.
auto index =
(slot.offset() + StandardFrameConstants::kFixedFrameSizeAboveFp -
kSystemPointerSize) /
kSystemPointerSize;
safepoint.DefinePointerSlot(index);
}
}
}
int LiftoffAssembler::GetTotalFrameSlotCountForGC() const {
// The GC does not care about the actual number of spill slots, just about
// the number of references that could be there in the spilling area. Note
// that the offset of the first spill slot is kSystemPointerSize and not
// '0'. Therefore we don't have to add '+1' here.
return (max_used_spill_offset_ +
StandardFrameConstants::kFixedFrameSizeAboveFp) /
kSystemPointerSize;
}
namespace {
constexpr AssemblerOptions DefaultLiftoffOptions() {
......
......@@ -135,6 +135,8 @@ class LiftoffAssembler : public TurboAssembler {
// Disallow copy construction.
CacheState(const CacheState&) = delete;
void DefineSafepoint(Safepoint& safepoint);
base::SmallVector<VarState, 8> stack_state;
LiftoffRegList used_registers;
uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
......@@ -1119,10 +1121,7 @@ class LiftoffAssembler : public TurboAssembler {
uint32_t num_locals() const { return num_locals_; }
void set_num_locals(uint32_t num_locals);
int GetTotalFrameSlotCount() const {
// TODO(zhin): Temporary for migration from index to offset.
return ((max_used_spill_offset_ + kStackSlotSize - 1) / kStackSlotSize);
}
int GetTotalFrameSlotCountForGC() const;
int GetTotalFrameSize() const { return max_used_spill_offset_; }
......
This diff is collapsed.
......@@ -61,6 +61,8 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
MemOperand src(base, offset);
switch (type.kind()) {
case ValueType::kI32:
case ValueType::kRef:
case ValueType::kOptRef:
assm->lw(dst.gp(), src);
break;
case ValueType::kI64:
......@@ -330,13 +332,7 @@ int LiftoffAssembler::SlotSizeForType(ValueType type) {
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
switch (type.kind()) {
case ValueType::kS128:
return true;
default:
// No alignment because all other types are kStackSlotSize.
return false;
}
return type.kind() == ValueType::kS128 || type.is_reference_type();
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
......@@ -651,6 +647,8 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
MemOperand dst = liftoff::GetStackSlot(offset);
switch (type.kind()) {
case ValueType::kI32:
case ValueType::kRef:
case ValueType::kOptRef:
sw(reg.gp(), dst);
break;
case ValueType::kI64:
......@@ -701,6 +699,8 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
MemOperand src = liftoff::GetStackSlot(offset);
switch (type.kind()) {
case ValueType::kI32:
case ValueType::kRef:
case ValueType::kOptRef:
lw(reg.gp(), src);
break;
case ValueType::kI64:
......
......@@ -53,6 +53,8 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
assm->lw(dst.gp(), src);
break;
case ValueType::kI64:
case ValueType::kRef:
case ValueType::kOptRef:
assm->ld(dst.gp(), src);
break;
case ValueType::kF32:
......@@ -295,13 +297,7 @@ int LiftoffAssembler::SlotSizeForType(ValueType type) {
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
switch (type.kind()) {
case ValueType::kS128:
return true;
default:
// No alignment because all other types are kStackSlotSize.
return false;
}
return type.kind() == ValueType::kS128 || type.is_reference_type();
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
......@@ -588,6 +584,8 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
Sw(reg.gp(), dst);
break;
case ValueType::kI64:
case ValueType::kRef:
case ValueType::kOptRef:
Sd(reg.gp(), dst);
break;
case ValueType::kF32:
......@@ -614,7 +612,9 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
sw(tmp.gp(), dst);
break;
}
case ValueType::kI64: {
case ValueType::kI64:
case ValueType::kRef:
case ValueType::kOptRef: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), value.to_i64());
sd(tmp.gp(), dst);
......@@ -634,6 +634,8 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
Lw(reg.gp(), src);
break;
case ValueType::kI64:
case ValueType::kRef:
case ValueType::kOptRef:
Ld(reg.gp(), src);
break;
case ValueType::kF32:
......
......@@ -62,6 +62,8 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
assm->movl(dst.gp(), src);
break;
case ValueType::kI64:
case ValueType::kOptRef:
case ValueType::kRef:
assm->movq(dst.gp(), src);
break;
case ValueType::kF32:
......@@ -201,7 +203,9 @@ int LiftoffAssembler::SlotSizeForType(ValueType type) {
return type.element_size_bytes();
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) { return false; }
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
return type.is_reference_type();
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
......@@ -746,7 +750,7 @@ void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
if (type == kWasmI32) {
movl(dst, src);
} else {
DCHECK_EQ(kWasmI64, type);
DCHECK(kWasmI64 == type || type.is_reference_type());
movq(dst, src);
}
}
......@@ -772,6 +776,8 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
movl(dst, reg.gp());
break;
case ValueType::kI64:
case ValueType::kOptRef:
case ValueType::kRef:
movq(dst, reg.gp());
break;
case ValueType::kF32:
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --expose-wasm --liftoff --no-wasm-tier-up --print-code --wasm-staging
// Flags: --experimental-wasm-threads
load("test/mjsunit/wasm/wasm-module-builder.js");
(function testPrintCode() {
let builder = new WasmModuleBuilder();
builder.addMemory(1, undefined, false);
builder
.addFunction('main', makeSig([kWasmI32, kWasmI32, kWasmF64], [kWasmI32]))
.addBody([
kExprLocalGet, 0, kExprLocalGet, 1, kExprI64UConvertI32, kExprLocalGet,
2, kExprI64SConvertF64, kAtomicPrefix, kExprI64AtomicWait, 0, 0
]);
builder.instantiate();
})();
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --expose-wasm --experimental-wasm-reftypes --expose-gc --liftoff
// Flags: --no-wasm-tier-up --liftoff-extern-ref
load("test/mjsunit/wasm/externref.js");
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment