Commit 0c9805d0 authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

[liftoff] Allocate stack slot sizes based on type

Liftoff supports unfixed stack slot sizes now, so we can have
SlotSizeForType return different values based on the value type it is
spilling. We make the change for architectures that support unaligned
access, x64, ia32, arm64.

Note for ppc/s390/mips/mips64 ports: SlotSizeForType remains as 8 byte
(old behavior), but can be changed.

This patch also makes adjustments to PatchPrepareStackFrame to align sp
to appropriate values (pointer size).

Bug: v8:9909
Change-Id: Iddd2dcd652b162a04a02ed704c5b06f6af8a186d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1956165
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65467}
parent 2886c2eb
......@@ -288,6 +288,10 @@ void LiftoffAssembler::FinishCode() { CheckConstPool(true, false); }
void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) {
return kStackSlotSize;
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type()) {
......
......@@ -176,6 +176,13 @@ void LiftoffAssembler::FinishCode() { ForceConstantPoolEmissionWithoutJump(); }
void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) {
// TODO(zhin): Unaligned access typically take additional cycles, we should do
// some performance testing to see how big an effect it will take. When we add
// SIMD we will have to add logic for alignment too.
return kStackSlotSize;
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type()) {
......
......@@ -156,6 +156,7 @@ int LiftoffAssembler::PrepareStackFrame() {
void LiftoffAssembler::PatchPrepareStackFrame(int offset, uint32_t spill_size) {
uint32_t bytes = liftoff::kConstantStackSpace + spill_size;
DCHECK_LE(bytes, kMaxInt);
DCHECK_EQ(bytes % kSystemPointerSize, 0);
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 64;
......@@ -193,6 +194,10 @@ void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {}
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) {
return ValueTypes::ElementSizeInBytes(type);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type()) {
......
......@@ -38,11 +38,6 @@ class LiftoffAssembler : public TurboAssembler {
static constexpr ValueType kWasmIntPtr =
kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
// TODO(zhin): Temporary while migrating away from fixed slot sizes.
inline static constexpr uint32_t SlotSizeForType(ValueType type) {
return kStackSlotSize;
}
class VarState {
public:
enum Location : uint8_t { kStack, kRegister, kIntConst };
......@@ -400,6 +395,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void PatchPrepareStackFrame(int offset, uint32_t spill_size);
inline void FinishCode();
inline void AbortCompilation();
inline static uint32_t SlotSizeForType(ValueType type);
inline void LoadConstant(LiftoffRegister, WasmValue,
RelocInfo::Mode rmode = RelocInfo::NONE);
......
......@@ -301,6 +301,10 @@ void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {}
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) {
return kStackSlotSize;
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type()) {
......
......@@ -259,6 +259,10 @@ void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {}
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) {
return kStackSlotSize;
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type()) {
......
......@@ -64,6 +64,10 @@ void LiftoffAssembler::FinishCode() { EmitConstantPool(); }
void LiftoffAssembler::AbortCompilation() { FinishCode(); }
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) {
return kStackSlotSize;
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
bailout(kUnsupportedArchitecture, "LoadConstant");
......
......@@ -63,6 +63,10 @@ void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {}
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) {
return kStackSlotSize;
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
bailout(kUnsupportedArchitecture, "LoadConstant");
......
......@@ -139,6 +139,8 @@ int LiftoffAssembler::PrepareStackFrame() {
void LiftoffAssembler::PatchPrepareStackFrame(int offset, uint32_t spill_size) {
uint32_t bytes = liftoff::kConstantStackSpace + spill_size;
DCHECK_LE(bytes, kMaxInt);
// Need to align sp to system pointer size.
bytes = RoundUp(bytes, kSystemPointerSize);
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 64;
......@@ -175,6 +177,10 @@ void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {}
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) {
return ValueTypes::ElementSizeInBytes(type);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type()) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment