Commit 50f6baf4 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[cleanup] Fix kPointerSize usages in src/wasm/

Also added != 0 for readability in checks like:
  if (FIELD_SIZE(kFooOffset) != 0) {...}

Bug: v8:8477, v8:8562
Change-Id: Ibc305103475e6ec029e89e7ad095ec0a1fa30189
Reviewed-on: https://chromium-review.googlesource.com/c/1382743
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58360}
parent e95be459
......@@ -189,7 +189,7 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
// - Set the byte_length field to byte_length.
// - Set backing_store to null/Smi(0).
// - Set all embedder fields to Smi(0).
if (FIELD_SIZE(JSArrayBuffer::kOptionalPaddingOffset)) {
if (FIELD_SIZE(JSArrayBuffer::kOptionalPaddingOffset) != 0) {
DCHECK_EQ(4, FIELD_SIZE(JSArrayBuffer::kOptionalPaddingOffset));
StoreObjectFieldNoWriteBarrier(
buffer, JSArrayBuffer::kOptionalPaddingOffset, Int32Constant(0),
......
......@@ -3055,7 +3055,7 @@ TNode<BigInt> CodeStubAssembler::AllocateRawBigInt(TNode<IntPtrT> length) {
Signed(WordShl(length, kSystemPointerSizeLog2)));
Node* raw_result = Allocate(size, kNone);
StoreMapNoWriteBarrier(raw_result, RootIndex::kBigIntMap);
if (FIELD_SIZE(BigInt::kOptionalPaddingOffset)) {
if (FIELD_SIZE(BigInt::kOptionalPaddingOffset) != 0) {
DCHECK_EQ(4, FIELD_SIZE(BigInt::kOptionalPaddingOffset));
StoreObjectFieldNoWriteBarrier(raw_result, BigInt::kOptionalPaddingOffset,
Int32Constant(0),
......
......@@ -696,7 +696,7 @@ class WasmInstanceObject::BodyDescriptor final : public BodyDescriptorBase {
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject* obj, int object_size,
ObjectVisitor* v) {
IteratePointers(obj, kPropertiesOrHashOffset, kFirstUntaggedOffset, v);
IteratePointers(obj, kPropertiesOrHashOffset, kEndOfTaggedFieldsOffset, v);
IterateJSObjectBodyImpl(map, obj, kSize, object_size, v);
}
......
......@@ -1610,7 +1610,7 @@ void JSProxy::JSProxyVerify(Isolate* isolate) {
void JSArrayBuffer::JSArrayBufferVerify(Isolate* isolate) {
CHECK(IsJSArrayBuffer());
if (FIELD_SIZE(kOptionalPaddingOffset)) {
if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
CHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
CHECK_EQ(0,
*reinterpret_cast<uint32_t*>(address() + kOptionalPaddingOffset));
......@@ -1829,8 +1829,8 @@ void WasmInstanceObject::WasmInstanceObjectVerify(Isolate* isolate) {
// Just generically check all tagged fields. Don't check the untagged fields,
// as some of them might still contain the "undefined" value if the
// WasmInstanceObject is not fully set up yet.
for (int offset = kHeaderSize; offset < kFirstUntaggedOffset;
offset += kPointerSize) {
for (int offset = kHeaderSize; offset < kEndOfTaggedFieldsOffset;
offset += kTaggedSize) {
VerifyObjectField(isolate, offset);
}
}
......
......@@ -119,7 +119,7 @@ class FreshlyAllocatedBigInt : public BigIntBase {
// Clear uninitialized padding space.
inline void clear_padding() {
if (FIELD_SIZE(kOptionalPaddingOffset)) {
if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
memset(reinterpret_cast<void*>(address() + kOptionalPaddingOffset), 0,
FIELD_SIZE(kOptionalPaddingOffset));
......
......@@ -87,7 +87,7 @@ void JSArrayBuffer::set_is_wasm_memory(bool is_wasm_memory) {
}
void JSArrayBuffer::clear_padding() {
if (FIELD_SIZE(kOptionalPaddingOffset)) {
if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
memset(reinterpret_cast<void*>(address() + kOptionalPaddingOffset), 0,
FIELD_SIZE(kOptionalPaddingOffset));
......
......@@ -48,7 +48,7 @@ ObjectSlot PreParsedScopeData::child_data_start() const {
}
void PreParsedScopeData::clear_padding() {
if (FIELD_SIZE(kOptionalPaddingOffset)) {
if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
memset(reinterpret_cast<void*>(address() + kOptionalPaddingOffset), 0,
FIELD_SIZE(kOptionalPaddingOffset));
......@@ -65,7 +65,7 @@ INT32_ACCESSORS(UncompiledData, end_position, kEndPositionOffset)
INT32_ACCESSORS(UncompiledData, function_literal_id, kFunctionLiteralIdOffset)
void UncompiledData::clear_padding() {
if (FIELD_SIZE(kOptionalPaddingOffset)) {
if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
memset(reinterpret_cast<void*>(address() + kOptionalPaddingOffset), 0,
FIELD_SIZE(kOptionalPaddingOffset));
......
......@@ -37,11 +37,12 @@ namespace liftoff {
// | | v
// -----+--------------------+ <-- stack ptr (sp)
//
static_assert(2 * kPointerSize == LiftoffAssembler::kStackSlotSize,
static_assert(2 * kSystemPointerSize == LiftoffAssembler::kStackSlotSize,
"Slot size should be twice the size of the 32 bit pointer.");
constexpr int32_t kInstanceOffset = 2 * kPointerSize;
constexpr int32_t kFirstStackSlotOffset = kInstanceOffset + 2 * kPointerSize;
constexpr int32_t kConstantStackSpace = kPointerSize;
constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kFirstStackSlotOffset =
kInstanceOffset + 2 * kSystemPointerSize;
constexpr int32_t kConstantStackSpace = kSystemPointerSize;
// kPatchInstructionsRequired sets a maximum limit of how many instructions that
// PatchPrepareStackFrame will use in order to increase the stack appropriately.
// Three instructions are required to sub a large constant, movw + movt + sub.
......@@ -1337,7 +1338,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
ExternalReference ext_ref) {
// Arguments are passed by pushing them all to the stack and then passing
// a pointer to them.
DCHECK_EQ(stack_bytes % kPointerSize, 0);
DCHECK(IsAligned(stack_bytes, kSystemPointerSize));
// Reserve space in the stack.
sub(sp, sp, Operand(stack_bytes));
......@@ -1392,7 +1393,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
break;
case kWasmI64:
ldr(result_reg->low_gp(), MemOperand(sp));
ldr(result_reg->high_gp(), MemOperand(sp, kPointerSize));
ldr(result_reg->high_gp(), MemOperand(sp, kSystemPointerSize));
break;
case kWasmF32:
vldr(liftoff::GetFloatRegister(result_reg->fp()), MemOperand(sp));
......
......@@ -41,8 +41,8 @@ namespace liftoff {
// -----+--------------------+ <-- stack ptr (sp)
//
constexpr int32_t kInstanceOffset = 2 * kPointerSize;
constexpr int32_t kFirstStackSlotOffset = kInstanceOffset + kPointerSize;
constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kFirstStackSlotOffset = kInstanceOffset + kSystemPointerSize;
constexpr int32_t kConstantStackSpace = 0;
inline MemOperand GetStackSlot(uint32_t index) {
......
......@@ -384,7 +384,8 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
liftoff::Load(this, dst, ebp, kPointerSize * (caller_slot_idx + 1), type);
liftoff::Load(this, dst, ebp, kSystemPointerSize * (caller_slot_idx + 1),
type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
......@@ -1649,8 +1650,9 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
ret(static_cast<int>(num_stack_slots * kPointerSize));
DCHECK_LT(num_stack_slots,
(1 << 16) / kSystemPointerSize); // 16 bit immediate
ret(static_cast<int>(num_stack_slots * kSystemPointerSize));
}
void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
......
......@@ -36,7 +36,7 @@ class LiftoffAssembler : public TurboAssembler {
static constexpr uint32_t kStackSlotSize = 8;
static constexpr ValueType kWasmIntPtr =
kPointerSize == 8 ? kWasmI64 : kWasmI32;
kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
class VarState {
public:
......@@ -452,7 +452,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i32_to_intptr(Register dst, Register src);
inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs) {
if (kPointerSize == 8) {
if (kSystemPointerSize == 8) {
emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs),
LiftoffRegister(rhs));
} else {
......@@ -460,7 +460,7 @@ class LiftoffAssembler : public TurboAssembler {
}
}
inline void emit_ptrsize_sub(Register dst, Register lhs, Register rhs) {
if (kPointerSize == 8) {
if (kSystemPointerSize == 8) {
emit_i64_sub(LiftoffRegister(dst), LiftoffRegister(lhs),
LiftoffRegister(rhs));
} else {
......@@ -468,7 +468,7 @@ class LiftoffAssembler : public TurboAssembler {
}
}
inline void emit_ptrsize_and(Register dst, Register lhs, Register rhs) {
if (kPointerSize == 8) {
if (kSystemPointerSize == 8) {
emit_i64_and(LiftoffRegister(dst), LiftoffRegister(lhs),
LiftoffRegister(rhs));
} else {
......@@ -476,7 +476,7 @@ class LiftoffAssembler : public TurboAssembler {
}
}
inline void emit_ptrsize_shr(Register dst, Register src, int amount) {
if (kPointerSize == 8) {
if (kSystemPointerSize == 8) {
emit_i64_shr(LiftoffRegister(dst), LiftoffRegister(src), amount);
} else {
emit_i32_shr(dst, src, amount);
......
......@@ -52,8 +52,7 @@ struct assert_field_size {
};
#define WASM_INSTANCE_OBJECT_FIELD_SIZE(name) \
(WasmInstanceObject::k##name##OffsetEnd - \
WasmInstanceObject::k##name##Offset + 1) // NOLINT(whitespace/indent)
FIELD_SIZE(WasmInstanceObject::k##name##Offset)
#define LOAD_INSTANCE_FIELD(dst, name, load_size) \
__ LoadFromInstance(dst, WASM_INSTANCE_OBJECT_FIELD_OFFSET(name), \
......@@ -76,7 +75,7 @@ struct assert_field_size {
#endif
constexpr LoadType::LoadTypeValue kPointerLoadType =
kPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
kSystemPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
#if V8_TARGET_ARCH_ARM64
// On ARM64, the Assembler keeps track of pointers to Labels to resolve
......@@ -107,8 +106,9 @@ class MovableLabel {
compiler::CallDescriptor* GetLoweredCallDescriptor(
Zone* zone, compiler::CallDescriptor* call_desc) {
return kPointerSize == 4 ? compiler::GetI32WasmCallDescriptor(zone, call_desc)
: call_desc;
return kSystemPointerSize == 4
? compiler::GetI32WasmCallDescriptor(zone, call_desc)
: call_desc;
}
constexpr ValueType kSupportedTypesArr[] = {kWasmI32, kWasmI64, kWasmF32,
......@@ -306,7 +306,7 @@ class LiftoffCompiler {
OutOfLineCode::StackCheck(position, __ cache_state()->used_registers));
OutOfLineCode& ool = out_of_line_code_.back();
Register limit_address = __ GetUnusedRegister(kGpReg).gp();
LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kPointerSize);
LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize);
__ StackCheck(ool.label.get(), limit_address);
__ bind(ool.continuation.get());
}
......@@ -1167,12 +1167,12 @@ class LiftoffCompiler {
LiftoffRegList& pinned, uint32_t* offset) {
Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
if (global->mutability && global->imported) {
LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kPointerSize);
LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize);
__ Load(LiftoffRegister(addr), addr, no_reg,
global->index * sizeof(Address), kPointerLoadType, pinned);
*offset = 0;
} else {
LOAD_INSTANCE_FIELD(addr, GlobalsStart, kPointerSize);
LOAD_INSTANCE_FIELD(addr, GlobalsStart, kSystemPointerSize);
*offset = global->offset;
}
return addr;
......@@ -1397,9 +1397,9 @@ class LiftoffCompiler {
LiftoffRegister end_offset_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
Register mem_size = __ GetUnusedRegister(kGpReg, pinned).gp();
LOAD_INSTANCE_FIELD(mem_size, MemorySize, kPointerSize);
LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
if (kPointerSize == 8) {
if (kSystemPointerSize == 8) {
__ LoadConstant(end_offset_reg, WasmValue(end_offset));
} else {
__ LoadConstant(end_offset_reg,
......@@ -1509,7 +1509,7 @@ class LiftoffCompiler {
Register tmp = __ GetUnusedRegister(kGpReg, pinned).gp();
__ LoadConstant(LiftoffRegister(tmp), WasmValue(*offset));
__ emit_ptrsize_add(index, index, tmp);
LOAD_INSTANCE_FIELD(tmp, MemoryMask, kPointerSize);
LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize);
__ emit_ptrsize_and(index, index, tmp);
*offset = 0;
return index;
......@@ -1530,7 +1530,7 @@ class LiftoffCompiler {
index = AddMemoryMasking(index, &offset, pinned);
DEBUG_CODE_COMMENT("Load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kPointerSize);
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
RegClass rc = reg_class_for(value_type);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
uint32_t protected_load_pc = 0;
......@@ -1564,7 +1564,7 @@ class LiftoffCompiler {
index = AddMemoryMasking(index, &offset, pinned);
DEBUG_CODE_COMMENT("Store to memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kPointerSize);
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
uint32_t protected_store_pc = 0;
LiftoffRegList outer_pinned;
if (FLAG_trace_wasm_memory) outer_pinned.set(index);
......@@ -1583,7 +1583,7 @@ class LiftoffCompiler {
void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
Register mem_size = __ GetUnusedRegister(kGpReg).gp();
LOAD_INSTANCE_FIELD(mem_size, MemorySize, kPointerSize);
LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
__ emit_ptrsize_shr(mem_size, mem_size, kWasmPageSizeLog2);
__ PushRegister(kWasmI32, LiftoffRegister(mem_size));
}
......@@ -1643,7 +1643,7 @@ class LiftoffCompiler {
Register imported_targets = tmp;
LOAD_INSTANCE_FIELD(imported_targets, ImportedFunctionTargets,
kPointerSize);
kSystemPointerSize);
__ Load(LiftoffRegister(target), imported_targets, no_reg,
imm.index * sizeof(Address), kPointerLoadType, pinned);
......@@ -1752,7 +1752,7 @@ class LiftoffCompiler {
DEBUG_CODE_COMMENT("Check indirect call signature");
// Load the signature from {instance->ift_sig_ids[key]}
LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kPointerSize);
LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kSystemPointerSize);
__ LoadConstant(LiftoffRegister(tmp_const),
WasmValue(static_cast<uint32_t>(sizeof(uint32_t))));
// TODO(wasm): use a emit_i32_shli() instead of a multiply.
......@@ -1770,7 +1770,7 @@ class LiftoffCompiler {
LiftoffAssembler::kWasmIntPtr, scratch, tmp_const);
DEBUG_CODE_COMMENT("Execute indirect call");
if (kPointerSize == 8) {
if (kSystemPointerSize == 8) {
// {index} has already been multiplied by 4. Multiply by another 2.
__ LoadConstant(LiftoffRegister(tmp_const), WasmValue(2));
__ emit_i32_mul(index, index, tmp_const);
......@@ -1786,7 +1786,8 @@ class LiftoffCompiler {
Register* explicit_instance = &tmp_const;
// Load the target from {instance->ift_targets[key]}
LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets, kPointerSize);
LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets,
kSystemPointerSize);
__ Load(LiftoffRegister(scratch), table, index, 0, kPointerLoadType,
pinned);
......
......@@ -16,7 +16,7 @@ namespace v8 {
namespace internal {
namespace wasm {
static constexpr bool kNeedI64RegPair = kPointerSize == 4;
static constexpr bool kNeedI64RegPair = kSystemPointerSize == 4;
enum RegClass : uint8_t {
kGpReg,
......
......@@ -481,7 +481,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
int32_t offset = kPointerSize * (caller_slot_idx + 1);
int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
liftoff::Load(this, dst, fp, offset, type);
}
......@@ -1320,11 +1320,11 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList gp_regs = regs & kGpCacheRegList;
unsigned num_gp_regs = gp_regs.GetNumRegsSet();
if (num_gp_regs) {
unsigned offset = num_gp_regs * kPointerSize;
unsigned offset = num_gp_regs * kSystemPointerSize;
addiu(sp, sp, -offset);
while (!gp_regs.is_empty()) {
LiftoffRegister reg = gp_regs.GetFirstRegSet();
offset -= kPointerSize;
offset -= kSystemPointerSize;
sw(reg.gp(), MemOperand(sp, offset));
gp_regs.clear(reg);
}
......@@ -1361,13 +1361,14 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
LiftoffRegister reg = gp_regs.GetLastRegSet();
lw(reg.gp(), MemOperand(sp, gp_offset));
gp_regs.clear(reg);
gp_offset += kPointerSize;
gp_offset += kSystemPointerSize;
}
addiu(sp, sp, gp_offset);
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
DCHECK_LT(num_stack_slots,
(1 << 16) / kSystemPointerSize); // 16 bit immediate
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
......
......@@ -72,18 +72,18 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
switch (type) {
case kWasmI32:
assm->daddiu(sp, sp, -kPointerSize);
assm->daddiu(sp, sp, -kSystemPointerSize);
assm->sw(reg.gp(), MemOperand(sp, 0));
break;
case kWasmI64:
assm->push(reg.gp());
break;
case kWasmF32:
assm->daddiu(sp, sp, -kPointerSize);
assm->daddiu(sp, sp, -kSystemPointerSize);
assm->swc1(reg.fp(), MemOperand(sp, 0));
break;
case kWasmF64:
assm->daddiu(sp, sp, -kPointerSize);
assm->daddiu(sp, sp, -kSystemPointerSize);
assm->Sdc1(reg.fp(), MemOperand(sp, 0));
break;
default:
......@@ -406,7 +406,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
MemOperand src(fp, kPointerSize * (caller_slot_idx + 1));
MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1));
liftoff::Load(this, dst, src, type);
}
......@@ -1172,11 +1172,11 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList gp_regs = regs & kGpCacheRegList;
unsigned num_gp_regs = gp_regs.GetNumRegsSet();
if (num_gp_regs) {
unsigned offset = num_gp_regs * kPointerSize;
unsigned offset = num_gp_regs * kSystemPointerSize;
daddiu(sp, sp, -offset);
while (!gp_regs.is_empty()) {
LiftoffRegister reg = gp_regs.GetFirstRegSet();
offset -= kPointerSize;
offset -= kSystemPointerSize;
sd(reg.gp(), MemOperand(sp, offset));
gp_regs.clear(reg);
}
......@@ -1213,13 +1213,14 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
LiftoffRegister reg = gp_regs.GetLastRegSet();
ld(reg.gp(), MemOperand(sp, gp_offset));
gp_regs.clear(reg);
gp_offset += kPointerSize;
gp_offset += kSystemPointerSize;
}
daddiu(sp, sp, gp_offset);
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
DCHECK_LT(num_stack_slots,
(1 << 16) / kSystemPointerSize); // 16 bit immediate
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
......
......@@ -112,11 +112,11 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
assm->pushq(reg.gp());
break;
case kWasmF32:
assm->subp(rsp, Immediate(kPointerSize));
assm->subp(rsp, Immediate(kSystemPointerSize));
assm->Movss(Operand(rsp, 0), reg.fp());
break;
case kWasmF64:
assm->subp(rsp, Immediate(kPointerSize));
assm->subp(rsp, Immediate(kSystemPointerSize));
assm->Movsd(Operand(rsp, 0), reg.fp());
break;
default:
......@@ -311,7 +311,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
Operand src(rbp, kPointerSize * (caller_slot_idx + 1));
Operand src(rbp, kSystemPointerSize * (caller_slot_idx + 1));
liftoff::Load(this, dst, src, type);
}
......@@ -1457,8 +1457,9 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
ret(static_cast<int>(num_stack_slots * kPointerSize));
DCHECK_LT(num_stack_slots,
(1 << 16) / kSystemPointerSize); // 16 bit immediate
ret(static_cast<int>(num_stack_slots * kSystemPointerSize));
}
void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
......
......@@ -989,7 +989,7 @@ size_t WasmCodeManager::EstimateNativeModuleCodeSize(const WasmModule* module) {
constexpr size_t kCodeSizeMultiplier = 4;
constexpr size_t kCodeOverhead = 32; // for prologue, stack check, ...
constexpr size_t kStaticCodeSize = 512; // runtime stubs, ...
constexpr size_t kImportSize = 64 * kPointerSize;
constexpr size_t kImportSize = 64 * kSystemPointerSize;
size_t estimate = kStaticCodeSize;
for (auto& function : module->functions) {
......
......@@ -205,7 +205,7 @@ class LinkageAllocator {
// Stackslots are counted upwards starting from 0 (or the offset set by
// {SetStackOffset}.
int NumStackSlots(MachineRepresentation type) {
return std::max(1, ElementSizeInBytes(type) / kPointerSize);
return std::max(1, ElementSizeInBytes(type) / kSystemPointerSize);
}
// Stackslots are counted upwards starting from 0 (or the offset set by
......
......@@ -231,6 +231,14 @@ inline bool WasmInstanceObject::has_indirect_function_table() {
return indirect_function_table_sig_ids() != nullptr;
}
void WasmInstanceObject::clear_padding() {
if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
memset(reinterpret_cast<void*>(address() + kOptionalPaddingOffset), 0,
FIELD_SIZE(kOptionalPaddingOffset));
}
}
IndirectFunctionTableEntry::IndirectFunctionTableEntry(
Handle<WasmInstanceObject> instance, int index)
: instance_(instance), index_(index) {
......
......@@ -145,13 +145,14 @@ class WasmInstanceNativeAllocations {
};
size_t EstimateNativeAllocationsSize(const WasmModule* module) {
size_t estimate = sizeof(WasmInstanceNativeAllocations) +
(1 * kPointerSize * module->num_imported_mutable_globals) +
(2 * kPointerSize * module->num_imported_functions) +
((kPointerSize + sizeof(uint32_t) + sizeof(uint8_t)) *
module->num_declared_data_segments);
size_t estimate =
sizeof(WasmInstanceNativeAllocations) +
(1 * kSystemPointerSize * module->num_imported_mutable_globals) +
(2 * kSystemPointerSize * module->num_imported_functions) +
((kSystemPointerSize + sizeof(uint32_t) + sizeof(uint8_t)) *
module->num_declared_data_segments);
for (auto& table : module->tables) {
estimate += 3 * kPointerSize * table.initial_size;
estimate += 3 * kSystemPointerSize * table.initial_size;
}
return estimate;
}
......@@ -1283,6 +1284,7 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(*instance_object), isolate);
instance->clear_padding();
// Initialize the imported function arrays.
auto module = module_object->module();
......
This diff is collapsed.
......@@ -340,7 +340,7 @@ Handle<Code> WasmFunctionWrapper::GetWrapperCode() {
auto call_descriptor =
compiler::Linkage::GetSimplifiedCDescriptor(zone(), signature_, true);
if (kPointerSize == 4) {
if (kSystemPointerSize == 4) {
size_t num_params = signature_->parameter_count();
// One additional parameter for the pointer of the return value.
Signature<MachineRepresentation>::Builder rep_builder(zone(), 1,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment