Commit 1554c7ee authored by Andreas Haas's avatar Andreas Haas Committed by Commit Bot

Revert "[wasm][liftoff] Emit safepoints for externref values on the stack"

This reverts commit 10348e8e.

Reason for revert: https://crbug.com/1125951

Original change's description:
> [wasm][liftoff] Emit safepoints for externref values on the stack
> 
> With this CL we emit safepoint maps for externref values on the Liftoff
> value stack. With that there is support for externref parameters and
> locals in Liftoff, as well as for intermediate values of type
> externref.
> 
> R=​thibaudm@chromium.org
> 
> Bug: v8:7581
> Change-Id: I2df0a8d00b2da33fe06ff474b039cca4c7be726d
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2387571
> Commit-Queue: Andreas Haas <ahaas@chromium.org>
> Reviewed-by: Thibaud Michaud <thibaudm@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#69725}

TBR=ahaas@chromium.org,thibaudm@chromium.org

Change-Id: I4cdf7fedfc91cd99302d5cb05e242dbb032c5803
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:7581
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2398529Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Andreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69754}
parent 6b3e8e69
......@@ -765,8 +765,6 @@ DEFINE_INT(trace_wasm_ast_start, 0,
DEFINE_INT(trace_wasm_ast_end, 0, "end function for wasm AST trace (exclusive)")
DEFINE_BOOL(liftoff, true,
"enable Liftoff, the baseline compiler for WebAssembly")
DEFINE_BOOL(liftoff_extern_ref, false,
"enable support for externref in Liftoff")
// We can't tier up (from Liftoff to TurboFan) in single-threaded mode, hence
// disable Liftoff in that configuration for now. The alternative is disabling
// TurboFan, which would reduce peak performance considerably.
......
......@@ -272,8 +272,6 @@ inline void Store(LiftoffAssembler* assm, LiftoffRegister src, MemOperand dst,
#endif
switch (type.kind()) {
case ValueType::kI32:
case ValueType::kOptRef:
case ValueType::kRef:
assm->str(src.gp(), dst);
break;
case ValueType::kI64:
......@@ -305,8 +303,6 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
ValueType type) {
switch (type.kind()) {
case ValueType::kI32:
case ValueType::kOptRef:
case ValueType::kRef:
assm->ldr(dst.gp(), src);
break;
case ValueType::kI64:
......@@ -501,7 +497,13 @@ int LiftoffAssembler::SlotSizeForType(ValueType type) {
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
return (type.kind() == ValueType::kS128 || type.is_reference_type());
switch (type.kind()) {
case ValueType::kS128:
return true;
default:
// No alignment because all other types are kStackSlotSize.
return false;
}
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
......@@ -1281,7 +1283,7 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
DCHECK_NE(dst, src);
DCHECK(type == kWasmI32 || type.is_reference_type());
DCHECK_EQ(type, kWasmI32);
TurboAssembler::Move(dst, src);
}
......
......@@ -50,8 +50,6 @@ inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueType type) {
case ValueType::kI32:
return reg.gp().W();
case ValueType::kI64:
case ValueType::kRef:
case ValueType::kOptRef:
return reg.gp().X();
case ValueType::kF32:
return reg.fp().S();
......@@ -278,7 +276,13 @@ int LiftoffAssembler::SlotSizeForType(ValueType type) {
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
return type.kind() == ValueType::kS128 || type.is_reference_type();
switch (type.kind()) {
case ValueType::kS128:
return true;
default:
// No alignment because all other types are kStackSlotSize.
return false;
}
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
......@@ -731,7 +735,7 @@ void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
if (type == kWasmI32) {
Mov(dst.W(), src.W());
} else {
DCHECK(kWasmI64 == type || type.is_reference_type());
DCHECK_EQ(kWasmI64, type);
Mov(dst.X(), src.X());
}
}
......
......@@ -42,8 +42,6 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
Operand src(base, offset);
switch (type.kind()) {
case ValueType::kI32:
case ValueType::kOptRef:
case ValueType::kRef:
assm->mov(dst.gp(), src);
break;
case ValueType::kI64:
......@@ -231,9 +229,7 @@ int LiftoffAssembler::SlotSizeForType(ValueType type) {
return type.element_size_bytes();
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
return type.is_reference_type();
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) { return false; }
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
......@@ -1032,7 +1028,7 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
DCHECK_NE(dst, src);
DCHECK(kWasmI32 == type || type.is_reference_type());
DCHECK_EQ(kWasmI32, type);
mov(dst, src);
}
......@@ -1054,8 +1050,6 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
Operand dst = liftoff::GetStackSlot(offset);
switch (type.kind()) {
case ValueType::kI32:
case ValueType::kOptRef:
case ValueType::kRef:
mov(dst, reg.gp());
break;
case ValueType::kI64:
......
......@@ -488,21 +488,6 @@ void LiftoffAssembler::CacheState::Split(const CacheState& source) {
*this = source;
}
void LiftoffAssembler::CacheState::DefineSafepoint(Safepoint& safepoint) {
for (auto slot : stack_state) {
DCHECK(!slot.is_reg());
if (slot.type().is_reference_type()) {
// index = 0 is for the stack slot at 'fp - kSystemPointerSize', the
// location of the current stack slot is 'fp - slot.offset()'.
// The index we need is therefore '(fp - kSystemPointerSize) - (fp -
// slot.offset())' = 'slot.offset() - kSystemPointerSize'.
auto index = (slot.offset() - kSystemPointerSize) / kSystemPointerSize;
safepoint.DefinePointerSlot(index);
}
}
}
namespace {
constexpr AssemblerOptions DefaultLiftoffOptions() {
......
......@@ -135,8 +135,6 @@ class LiftoffAssembler : public TurboAssembler {
// Disallow copy construction.
CacheState(const CacheState&) = delete;
void DefineSafepoint(Safepoint& safepoint);
base::SmallVector<VarState, 8> stack_state;
LiftoffRegList used_registers;
uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
......@@ -1121,12 +1119,9 @@ class LiftoffAssembler : public TurboAssembler {
uint32_t num_locals() const { return num_locals_; }
void set_num_locals(uint32_t num_locals);
int GetTotalFrameSlotCountForGC() const {
// The GC does not care about the actual number of spill slots, just about
// the number of references that could be there in the spilling area. Note
// that the offset of the first spill slot is kSystemPointerSize and not
// '0'. Therefore we don't have to add '+1' here.
return max_used_spill_offset_ / kSystemPointerSize;
int GetTotalFrameSlotCount() const {
// TODO(zhin): Temporary for migration from index to offset.
return ((max_used_spill_offset_ + kStackSlotSize - 1) / kStackSlotSize);
}
int GetTotalFrameSize() const { return max_used_spill_offset_; }
......
This diff is collapsed.
......@@ -61,8 +61,6 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
MemOperand src(base, offset);
switch (type.kind()) {
case ValueType::kI32:
case ValueType::kRef:
case ValueType::kOptRef:
assm->lw(dst.gp(), src);
break;
case ValueType::kI64:
......@@ -332,7 +330,13 @@ int LiftoffAssembler::SlotSizeForType(ValueType type) {
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
return type.kind() == ValueType::kS128 || type.is_reference_type();
switch (type.kind()) {
case ValueType::kS128:
return true;
default:
// No alignment because all other types are kStackSlotSize.
return false;
}
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
......@@ -647,8 +651,6 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
MemOperand dst = liftoff::GetStackSlot(offset);
switch (type.kind()) {
case ValueType::kI32:
case ValueType::kRef:
case ValueType::kOptRef:
sw(reg.gp(), dst);
break;
case ValueType::kI64:
......@@ -699,8 +701,6 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
MemOperand src = liftoff::GetStackSlot(offset);
switch (type.kind()) {
case ValueType::kI32:
case ValueType::kRef:
case ValueType::kOptRef:
lw(reg.gp(), src);
break;
case ValueType::kI64:
......
......@@ -53,8 +53,6 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
assm->lw(dst.gp(), src);
break;
case ValueType::kI64:
case ValueType::kRef:
case ValueType::kOptRef:
assm->ld(dst.gp(), src);
break;
case ValueType::kF32:
......@@ -297,7 +295,13 @@ int LiftoffAssembler::SlotSizeForType(ValueType type) {
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
return type.kind() == ValueType::kS128 || type.is_reference_type();
switch (type.kind()) {
case ValueType::kS128:
return true;
default:
// No alignment because all other types are kStackSlotSize.
return false;
}
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
......@@ -584,8 +588,6 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
Sw(reg.gp(), dst);
break;
case ValueType::kI64:
case ValueType::kRef:
case ValueType::kOptRef:
Sd(reg.gp(), dst);
break;
case ValueType::kF32:
......@@ -612,9 +614,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
sw(tmp.gp(), dst);
break;
}
case ValueType::kI64:
case ValueType::kRef:
case ValueType::kOptRef: {
case ValueType::kI64: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), value.to_i64());
sd(tmp.gp(), dst);
......@@ -634,8 +634,6 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
Lw(reg.gp(), src);
break;
case ValueType::kI64:
case ValueType::kRef:
case ValueType::kOptRef:
Ld(reg.gp(), src);
break;
case ValueType::kF32:
......
......@@ -62,8 +62,6 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
assm->movl(dst.gp(), src);
break;
case ValueType::kI64:
case ValueType::kOptRef:
case ValueType::kRef:
assm->movq(dst.gp(), src);
break;
case ValueType::kF32:
......@@ -203,9 +201,7 @@ int LiftoffAssembler::SlotSizeForType(ValueType type) {
return type.element_size_bytes();
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
return type.is_reference_type();
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) { return false; }
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
......@@ -750,7 +746,7 @@ void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
if (type == kWasmI32) {
movl(dst, src);
} else {
DCHECK(kWasmI64 == type || type.is_reference_type());
DCHECK_EQ(kWasmI64, type);
movq(dst, src);
}
}
......@@ -776,8 +772,6 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
movl(dst, reg.gp());
break;
case ValueType::kI64:
case ValueType::kOptRef:
case ValueType::kRef:
movq(dst, reg.gp());
break;
case ValueType::kF32:
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --expose-wasm --experimental-wasm-reftypes --expose-gc --liftoff
// Flags: --no-wasm-tier-up --liftoff-extern-ref
load("test/mjsunit/wasm/externref.js");
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment