Commit 57bf0bfe authored by Ben L. Titzer's avatar Ben L. Titzer Committed by Commit Bot

[wasm] Merge the WasmContext into WasmInstanceObject

This change makes lifetime management of WasmCode much simpler.
By using the WasmInstanceObject as the context for WASM code execution,
including the pointer to the memory base and indirect function tables,
this keeps the instance alive when WASM code is on the stack, since
the instance object is passed as a parameter and spilled onto the stack.
This is in preparation of sharing the code between instances and
isolates.

Bug: v8:7424

Change-Id: Ic2e4b7bcc2feb20001d0553a615a8a9dff36317e
Reviewed-on: https://chromium-review.googlesource.com/958520
Commit-Queue: Ben Titzer <titzer@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#52361}
parent f81f301f
......@@ -213,12 +213,6 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() {
#endif
}
void RelocInfo::set_wasm_context_reference(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmContextReference(rmode_));
set_embedded_address(address, icache_flush_mode);
}
void RelocInfo::set_global_handle(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, WASM_GLOBAL_HANDLE);
......@@ -242,11 +236,6 @@ Address RelocInfo::global_handle() const {
return embedded_address();
}
Address RelocInfo::wasm_context_reference() const {
DCHECK(IsWasmContextReference(rmode_));
return embedded_address();
}
void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
......@@ -546,8 +535,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "constant pool";
case VENEER_POOL:
return "veneer pool";
case WASM_CONTEXT_REFERENCE:
return "wasm context reference";
case WASM_GLOBAL_HANDLE:
return "global handle";
case WASM_CALL:
......@@ -650,7 +637,6 @@ void RelocInfo::Verify(Isolate* isolate) {
case DEOPT_ID:
case CONST_POOL:
case VENEER_POOL:
case WASM_CONTEXT_REFERENCE:
case WASM_GLOBAL_HANDLE:
case WASM_CALL:
case JS_TO_WASM_CALL:
......
......@@ -366,10 +366,6 @@ class RelocInfo {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CODE_TARGET,
EMBEDDED_OBJECT,
// Wasm entries are to relocate pointers into the wasm memory embedded in
// wasm code. Everything after WASM_CONTEXT_REFERENCE (inclusive) is not
// GC'ed.
WASM_CONTEXT_REFERENCE,
WASM_GLOBAL_HANDLE,
WASM_CALL,
JS_TO_WASM_CALL,
......@@ -466,15 +462,12 @@ class RelocInfo {
return mode == OFF_HEAP_TARGET;
}
static inline bool IsNone(Mode mode) { return mode == NONE; }
static inline bool IsWasmContextReference(Mode mode) {
return mode == WASM_CONTEXT_REFERENCE;
}
static inline bool IsWasmReference(Mode mode) {
return IsWasmPtrReference(mode);
}
static inline bool IsWasmPtrReference(Mode mode) {
return mode == WASM_CONTEXT_REFERENCE || mode == WASM_GLOBAL_HANDLE ||
mode == WASM_CALL || mode == JS_TO_WASM_CALL;
return mode == WASM_GLOBAL_HANDLE || mode == WASM_CALL ||
mode == JS_TO_WASM_CALL;
}
static constexpr int ModeMask(Mode mode) { return 1 << mode; }
......@@ -509,14 +502,10 @@ class RelocInfo {
// constant pool, otherwise the pointer is embedded in the instruction stream.
bool IsInConstantPool();
Address wasm_context_reference() const;
Address global_handle() const;
Address js_to_wasm_address() const;
Address wasm_call_address() const;
void set_wasm_context_reference(
Address address,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_target_address(
Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
......
......@@ -281,15 +281,15 @@ void Int64Lowering::LowerNode(Node* node) {
static_cast<int>(signature()->parameter_count())) {
int old_index = ParameterIndexOf(node->op());
// TODO(wasm): Make this part not wasm specific.
// Prevent special lowering of the WasmContext parameter.
if (old_index == kWasmContextParameterIndex) {
// Prevent special lowering of the instance parameter.
if (old_index == kWasmInstanceParameterIndex) {
DefaultLowering(node);
break;
}
// Adjust old_index to be compliant with the signature.
--old_index;
int new_index = GetParameterIndexAfterLowering(signature(), old_index);
// Adjust new_index to consider the WasmContext parameter.
// Adjust new_index to consider the instance parameter.
++new_index;
NodeProperties::ChangeOp(node, common()->Parameter(new_index));
......
This diff is collapsed.
This diff is collapsed.
......@@ -32,6 +32,8 @@ MachineType MachineTypeFor(ValueType type) {
return MachineType::Float32();
case wasm::kWasmS128:
return MachineType::Simd128();
case wasm::kWasmAnyRef:
return MachineType::TaggedPointer();
default:
UNREACHABLE();
}
......@@ -225,15 +227,15 @@ static constexpr Allocator parameter_registers(kGPParamRegisters,
// General code uses the above configuration data.
CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig,
bool use_retpoline) {
// The '+ 1' here is to accomodate the wasm_context as first parameter.
// The '+ 1' here is to accomodate the instance object as first parameter.
LocationSignature::Builder locations(zone, fsig->return_count(),
fsig->parameter_count() + 1);
// Add register and/or stack parameter(s).
Allocator params = parameter_registers;
// The wasm_context.
locations.AddParam(params.Next(MachineType::PointerRepresentation()));
// The instance object.
locations.AddParam(params.Next(MachineRepresentation::kTaggedPointer));
const int parameter_count = static_cast<int>(fsig->parameter_count());
for (int i = 0; i < parameter_count; i++) {
......
......@@ -137,8 +137,8 @@ class ElementsAccessor {
virtual uint32_t Push(Handle<JSArray> receiver, Arguments* args,
uint32_t push_size) = 0;
virtual uint32_t Unshift(Handle<JSArray> receiver,
Arguments* args, uint32_t unshift_size) = 0;
virtual uint32_t Unshift(Handle<JSArray> receiver, Arguments* args,
uint32_t unshift_size) = 0;
virtual Handle<JSObject> Slice(Handle<JSObject> receiver, uint32_t start,
uint32_t end) = 0;
......
......@@ -1140,8 +1140,6 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
WasmExportedFunction* function = WasmExportedFunction::cast(this);
os << "\n - WASM instance "
<< reinterpret_cast<void*>(function->instance());
os << "\n context "
<< reinterpret_cast<void*>(function->instance()->wasm_context()->get());
os << "\n - WASM function index " << function->function_index();
}
shared()->PrintSourceCode(os);
......
......@@ -14226,7 +14226,6 @@ bool Code::IsProcessIndependent() {
mode_mask ==
(RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::WASM_CONTEXT_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::WASM_GLOBAL_HANDLE) |
RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL) |
......
......@@ -37,6 +37,7 @@ WasmInstanceObject* GetWasmInstanceOnStackTop(Isolate* isolate) {
return owning_instance;
}
// TODO(titzer): rename to GetNativeContextFromWasmInstanceOnStackTop()
Context* GetWasmContextOnStackTop(Isolate* isolate) {
return GetWasmInstanceOnStackTop(isolate)
->compiled_module()
......
......@@ -28,17 +28,17 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
BAILOUT("LoadConstant");
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
BAILOUT("LoadFromContext");
void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
int size) {
BAILOUT("LoadFromInstance");
}
void LiftoffAssembler::SpillContext(Register context) {
BAILOUT("SpillContext");
void LiftoffAssembler::SpillInstance(Register instance) {
BAILOUT("SpillInstance");
}
void LiftoffAssembler::FillContextInto(Register dst) {
BAILOUT("FillContextInto");
void LiftoffAssembler::FillInstanceInto(Register dst) {
BAILOUT("FillInstanceInto");
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
......
......@@ -28,17 +28,17 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
BAILOUT("LoadConstant");
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
BAILOUT("LoadFromContext");
void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
int size) {
BAILOUT("LoadFromInstance");
}
void LiftoffAssembler::SpillContext(Register context) {
BAILOUT("SpillContext");
void LiftoffAssembler::SpillInstance(Register instance) {
BAILOUT("SpillInstance");
}
void LiftoffAssembler::FillContextInto(Register dst) {
BAILOUT("FillContextInto");
void LiftoffAssembler::FillInstanceInto(Register dst) {
BAILOUT("FillInstanceInto");
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
......
......@@ -16,8 +16,8 @@ namespace wasm {
namespace liftoff {
// ebp-8 holds the stack marker, ebp-16 is the wasm context, first stack slot
// is located at ebp-24.
// ebp-8 holds the stack marker, ebp-16 is the instance parameter, first stack
// slot is located at ebp-24.
constexpr int32_t kConstantStackSpace = 16;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
......@@ -33,7 +33,7 @@ inline Operand GetHalfStackSlot(uint32_t half_index) {
}
// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
inline Operand GetContextOperand() { return Operand(ebp, -16); }
inline Operand GetInstanceOperand() { return Operand(ebp, -16); }
static constexpr LiftoffRegList kByteRegs =
LiftoffRegList::FromBits<Register::ListOf<eax, ecx, edx, ebx>()>();
......@@ -133,20 +133,20 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
int size) {
DCHECK_LE(offset, kMaxInt);
mov(dst, liftoff::GetContextOperand());
mov(dst, liftoff::GetInstanceOperand());
DCHECK_EQ(4, size);
mov(dst, Operand(dst, offset));
}
void LiftoffAssembler::SpillContext(Register context) {
mov(liftoff::GetContextOperand(), context);
void LiftoffAssembler::SpillInstance(Register instance) {
mov(liftoff::GetInstanceOperand(), instance);
}
void LiftoffAssembler::FillContextInto(Register dst) {
mov(dst, liftoff::GetContextOperand());
void LiftoffAssembler::FillInstanceInto(Register dst) {
mov(dst, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
......@@ -1182,7 +1182,7 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
// Set context to zero.
// Set instance to zero.
xor_(esi, esi);
CallRuntimeDelayed(zone, fid);
}
......
......@@ -437,7 +437,7 @@ void LiftoffAssembler::SpillAllRegisters() {
void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register* target,
LiftoffRegister* explicit_context) {
LiftoffRegister* target_instance) {
uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
// Input 0 is the call target.
constexpr size_t kInputShift = 1;
......@@ -455,14 +455,14 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
StackTransferRecipe stack_transfers(this);
LiftoffRegList param_regs;
// Move the explicit context (if any) into the correct context register.
compiler::LinkageLocation context_loc =
// Move the target instance (if supplied) into the correct instance register.
compiler::LinkageLocation instance_loc =
call_descriptor->GetInputLocation(kInputShift);
DCHECK(context_loc.IsRegister() && !context_loc.IsAnyRegister());
LiftoffRegister context_reg(Register::from_code(context_loc.AsRegister()));
param_regs.set(context_reg);
if (explicit_context && *explicit_context != context_reg) {
stack_transfers.MoveRegister(context_reg, *explicit_context, kWasmIntPtr);
DCHECK(instance_loc.IsRegister() && !instance_loc.IsAnyRegister());
LiftoffRegister instance_reg(Register::from_code(instance_loc.AsRegister()));
param_regs.set(instance_reg);
if (target_instance && *target_instance != instance_reg) {
stack_transfers.MoveRegister(instance_reg, *target_instance, kWasmIntPtr);
}
// Now move all parameter values into the right slot for the call.
......@@ -504,7 +504,7 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
}
}
}
// {call_desc_input_idx} should point after the context parameter now.
// {call_desc_input_idx} should point after the instance parameter now.
DCHECK_EQ(call_desc_input_idx, kInputShift + 1);
// If the target register overlaps with a parameter register, then move the
......@@ -523,7 +523,7 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
}
}
// Execute the stack transfers before filling the context register.
// Execute the stack transfers before filling the instance register.
stack_transfers.Execute();
// Pop parameters from the value stack.
......@@ -533,9 +533,9 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
// Reset register use counters.
cache_state_.reset_used_registers();
// Reload the context from the stack.
if (!explicit_context) {
FillContextInto(context_reg.gp());
// Reload the instance from the stack.
if (!target_instance) {
FillInstanceInto(instance_reg.gp());
}
}
......
......@@ -322,7 +322,7 @@ class LiftoffAssembler : public TurboAssembler {
// register, or {no_reg} if target was spilled to the stack.
void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*,
Register* target = nullptr,
LiftoffRegister* explicit_context = nullptr);
LiftoffRegister* target_instance = nullptr);
// Process return values of the call.
void FinishCall(wasm::FunctionSig*, compiler::CallDescriptor*);
......@@ -352,9 +352,9 @@ class LiftoffAssembler : public TurboAssembler {
inline void LoadConstant(LiftoffRegister, WasmValue,
RelocInfo::Mode rmode = RelocInfo::NONE);
inline void LoadFromContext(Register dst, uint32_t offset, int size);
inline void SpillContext(Register context);
inline void FillContextInto(Register dst);
inline void LoadFromInstance(Register dst, uint32_t offset, int size);
inline void SpillInstance(Register instance);
inline void FillInstanceInto(Register dst);
inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
uint32_t offset_imm, LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc = nullptr);
......
This diff is collapsed.
......@@ -15,8 +15,8 @@ namespace wasm {
namespace liftoff {
// fp-8 holds the stack marker, fp-16 is the wasm context, first stack slot
// is located at fp-24.
// fp-8 holds the stack marker, fp-16 is the instance parameter, first stack
// slot is located at fp-24.
constexpr int32_t kConstantStackSpace = 16;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
......@@ -31,7 +31,7 @@ inline MemOperand GetHalfStackSlot(uint32_t half_index) {
return MemOperand(fp, -kFirstStackSlotOffset - offset);
}
inline MemOperand GetContextOperand() { return MemOperand(fp, -16); }
inline MemOperand GetInstanceOperand() { return MemOperand(fp, -16); }
// Use this register to store the address of the last argument pushed on the
// stack for a call to C. This register must be callee saved according to the c
......@@ -129,20 +129,20 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
int size) {
DCHECK_LE(offset, kMaxInt);
lw(dst, liftoff::GetContextOperand());
lw(dst, liftoff::GetInstanceOperand());
DCHECK_EQ(4, size);
lw(dst, MemOperand(dst, offset));
}
void LiftoffAssembler::SpillContext(Register context) {
sw(context, liftoff::GetContextOperand());
void LiftoffAssembler::SpillInstance(Register instance) {
sw(instance, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::FillContextInto(Register dst) {
lw(dst, liftoff::GetContextOperand());
void LiftoffAssembler::FillInstanceInto(Register dst) {
lw(dst, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
......@@ -880,7 +880,7 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
// Set context to zero.
// Set instance to zero.
TurboAssembler::Move(cp, zero_reg);
CallRuntimeDelayed(zone, fid);
}
......
......@@ -15,8 +15,8 @@ namespace wasm {
namespace liftoff {
// fp-8 holds the stack marker, fp-16 is the wasm context, first stack slot
// is located at fp-24.
// fp-8 holds the stack marker, fp-16 is the instance parameter, first stack
// slot is located at fp-24.
constexpr int32_t kConstantStackSpace = 16;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
......@@ -26,7 +26,7 @@ inline MemOperand GetStackSlot(uint32_t index) {
return MemOperand(fp, -kFirstStackSlotOffset - offset);
}
inline MemOperand GetContextOperand() { return MemOperand(fp, -16); }
inline MemOperand GetInstanceOperand() { return MemOperand(fp, -16); }
// Use this register to store the address of the last argument pushed on the
// stack for a call to C. This register must be callee saved according to the c
......@@ -120,10 +120,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
int size) {
DCHECK_LE(offset, kMaxInt);
ld(dst, liftoff::GetContextOperand());
ld(dst, liftoff::GetInstanceOperand());
DCHECK(size == 4 || size == 8);
if (size == 4) {
lw(dst, MemOperand(dst, offset));
......@@ -132,12 +132,12 @@ void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
}
}
void LiftoffAssembler::SpillContext(Register context) {
sd(context, liftoff::GetContextOperand());
void LiftoffAssembler::SpillInstance(Register instance) {
sd(instance, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::FillContextInto(Register dst) {
ld(dst, liftoff::GetContextOperand());
void LiftoffAssembler::FillInstanceInto(Register dst) {
ld(dst, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
......@@ -707,7 +707,7 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
// Set context to zero.
// Set instance to zero.
TurboAssembler::Move(cp, zero_reg);
CallRuntimeDelayed(zone, fid);
}
......
......@@ -28,17 +28,17 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
BAILOUT("LoadConstant");
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
BAILOUT("LoadFromContext");
void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
int size) {
BAILOUT("LoadFromInstance");
}
void LiftoffAssembler::SpillContext(Register context) {
BAILOUT("SpillContext");
void LiftoffAssembler::SpillInstance(Register instance) {
BAILOUT("SpillInstance");
}
void LiftoffAssembler::FillContextInto(Register dst) {
BAILOUT("FillContextInto");
void LiftoffAssembler::FillInstanceInto(Register dst) {
BAILOUT("FillInstanceInto");
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
......
......@@ -28,17 +28,17 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
BAILOUT("LoadConstant");
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
BAILOUT("LoadFromContext");
void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
int size) {
BAILOUT("LoadFromInstance");
}
void LiftoffAssembler::SpillContext(Register context) {
BAILOUT("SpillContext");
void LiftoffAssembler::SpillInstance(Register instance) {
BAILOUT("SpillInstance");
}
void LiftoffAssembler::FillContextInto(Register dst) {
BAILOUT("FillContextInto");
void LiftoffAssembler::FillInstanceInto(Register dst) {
BAILOUT("FillInstanceInto");
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
......
......@@ -16,8 +16,8 @@ namespace wasm {
namespace liftoff {
// rbp-8 holds the stack marker, rbp-16 is the wasm context, first stack slot
// is located at rbp-24.
// rbp-8 holds the stack marker, rbp-16 is the instance parameter, first stack
// slot is located at rbp-24.
constexpr int32_t kConstantStackSpace = 16;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
......@@ -28,7 +28,7 @@ inline Operand GetStackSlot(uint32_t index) {
}
// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
inline Operand GetContextOperand() { return Operand(rbp, -16); }
inline Operand GetInstanceOperand() { return Operand(rbp, -16); }
// Use this register to store the address of the last argument pushed on the
// stack for a call to C. This register must be callee saved according to the c
......@@ -131,10 +131,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
int size) {
DCHECK_LE(offset, kMaxInt);
movp(dst, liftoff::GetContextOperand());
movp(dst, liftoff::GetInstanceOperand());
DCHECK(size == 4 || size == 8);
if (size == 4) {
movl(dst, Operand(dst, offset));
......@@ -143,12 +143,12 @@ void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
}
}
void LiftoffAssembler::SpillContext(Register context) {
movp(liftoff::GetContextOperand(), context);
void LiftoffAssembler::SpillInstance(Register instance) {
movp(liftoff::GetInstanceOperand(), instance);
}
void LiftoffAssembler::FillContextInto(Register dst) {
movp(dst, liftoff::GetContextOperand());
void LiftoffAssembler::FillInstanceInto(Register dst) {
movp(dst, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
......@@ -972,7 +972,7 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
// Set context to zero.
// Set instance to zero.
xorp(rsi, rsi);
CallRuntimeDelayed(zone, fid);
}
......
......@@ -774,7 +774,7 @@ class WasmDecoder : public Decoder {
case kExprGrowMemory:
case kExprCallFunction:
case kExprCallIndirect:
// Add context cache nodes to the assigned set.
// Add instance cache nodes to the assigned set.
// TODO(titzer): make this more clear.
assigned->Add(locals_count - 1);
length = OpcodeLength(decoder, pc);
......
......@@ -37,7 +37,7 @@ struct SsaEnv {
State state;
TFNode* control;
TFNode* effect;
compiler::WasmContextCacheNodes context_cache;
compiler::WasmInstanceCacheNodes instance_cache;
TFNode** locals;
bool go() { return state >= kReached; }
......@@ -46,7 +46,7 @@ struct SsaEnv {
locals = nullptr;
control = nullptr;
effect = nullptr;
context_cache = {};
instance_cache = {};
}
void SetNotMerged() {
if (state == kMerged) state = kReached;
......@@ -100,14 +100,14 @@ class WasmGraphBuildingInterface {
: nullptr;
// The first '+ 1' is needed by TF Start node, the second '+ 1' is for the
// wasm_context parameter.
// instance parameter.
TFNode* start = builder_->Start(
static_cast<int>(decoder->sig_->parameter_count() + 1 + 1));
// Initialize the wasm_context (the paramater at index 0).
builder_->set_wasm_context(
builder_->Param(compiler::kWasmContextParameterIndex));
// Initialize the instance parameter (index 0).
builder_->set_instance_node(
builder_->Param(compiler::kWasmInstanceParameterIndex));
// Initialize local variables. Parameters are shifted by 1 because of the
// the wasm_context.
// the instance parameter.
uint32_t index = 0;
for (; index < decoder->sig_->parameter_count(); ++index) {
ssa_env->locals[index] = builder_->Param(index + 1);
......@@ -129,11 +129,10 @@ class WasmGraphBuildingInterface {
SetEnv(ssa_env);
}
// Reload the wasm context variables from the WasmContext structure attached
// to the memory object into the Ssa Environment.
// Reload the instance cache entries into the Ssa Environment.
void LoadContextIntoSsa(SsaEnv* ssa_env) {
if (!ssa_env || !ssa_env->go()) return;
builder_->InitContextCache(&ssa_env->context_cache);
builder_->InitInstanceCache(&ssa_env->instance_cache);
}
void StartFunctionBody(Decoder* decoder, Control* block) {
......@@ -366,7 +365,7 @@ class WasmGraphBuildingInterface {
void GrowMemory(Decoder* decoder, const Value& value, Value* result) {
result->node = BUILD(GrowMemory, value.node);
// Always reload the context cache after growing memory.
// Always reload the instance cache after growing memory.
LoadContextIntoSsa(ssa_env_);
}
......@@ -549,10 +548,10 @@ class WasmGraphBuildingInterface {
}
#endif
ssa_env_ = env;
// TODO(wasm): combine the control and effect pointers with context cache.
// TODO(wasm): combine the control and effect pointers with instance cache.
builder_->set_control_ptr(&env->control);
builder_->set_effect_ptr(&env->effect);
builder_->set_context_cache(&env->context_cache);
builder_->set_instance_cache(&env->instance_cache);
}
TFNode* CheckForException(Decoder* decoder, TFNode* node) {
......@@ -638,7 +637,7 @@ class WasmGraphBuildingInterface {
to->locals = from->locals;
to->control = from->control;
to->effect = from->effect;
to->context_cache = from->context_cache;
to->instance_cache = from->instance_cache;
break;
}
case SsaEnv::kReached: { // Create a new merge.
......@@ -662,9 +661,9 @@ class WasmGraphBuildingInterface {
builder_->Phi(decoder->GetLocalType(i), 2, vals, merge);
}
}
// Start a new merge from the context cache.
builder_->NewContextCacheMerge(&to->context_cache, &from->context_cache,
merge);
// Start a new merge from the instance cache.
builder_->NewInstanceCacheMerge(&to->instance_cache,
&from->instance_cache, merge);
break;
}
case SsaEnv::kMerged: {
......@@ -679,9 +678,9 @@ class WasmGraphBuildingInterface {
to->locals[i] = builder_->CreateOrMergeIntoPhi(
decoder->GetLocalType(i), merge, to->locals[i], from->locals[i]);
}
// Merge the context caches.
builder_->MergeContextCacheInto(&to->context_cache,
&from->context_cache, merge);
// Merge the instance caches.
builder_->MergeInstanceCacheInto(&to->instance_cache,
&from->instance_cache, merge);
break;
}
default:
......@@ -697,21 +696,22 @@ class WasmGraphBuildingInterface {
env->control = builder_->Loop(env->control);
env->effect = builder_->EffectPhi(1, &env->effect, env->control);
builder_->Terminate(env->effect, env->control);
// The '+ 1' here is to be able to set the context cache as assigned.
// The '+ 1' here is to be able to set the instance cache as assigned.
BitVector* assigned = WasmDecoder<validate>::AnalyzeLoopAssignment(
decoder, decoder->pc(), decoder->total_locals() + 1, decoder->zone());
if (decoder->failed()) return env;
if (assigned != nullptr) {
// Only introduce phis for variables assigned in this loop.
int context_cache_index = decoder->total_locals();
int instance_cache_index = decoder->total_locals();
for (int i = decoder->NumLocals() - 1; i >= 0; i--) {
if (!assigned->Contains(i)) continue;
env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1,
&env->locals[i], env->control);
}
// Introduce phis for context cache pointers if necessary.
if (assigned->Contains(context_cache_index)) {
builder_->PrepareContextCacheForLoop(&env->context_cache, env->control);
// Introduce phis for instance cache pointers if necessary.
if (assigned->Contains(instance_cache_index)) {
builder_->PrepareInstanceCacheForLoop(&env->instance_cache,
env->control);
}
SsaEnv* loop_body_env = Split(decoder, env);
......@@ -726,8 +726,8 @@ class WasmGraphBuildingInterface {
&env->locals[i], env->control);
}
// Conservatively introduce phis for context cache.
builder_->PrepareContextCacheForLoop(&env->context_cache, env->control);
// Conservatively introduce phis for instance cache.
builder_->PrepareInstanceCacheForLoop(&env->instance_cache, env->control);
SsaEnv* loop_body_env = Split(decoder, env);
builder_->StackCheck(decoder->position(), &loop_body_env->effect,
......@@ -750,11 +750,11 @@ class WasmGraphBuildingInterface {
size > 0 ? reinterpret_cast<TFNode**>(decoder->zone()->New(size))
: nullptr;
memcpy(result->locals, from->locals, size);
result->context_cache = from->context_cache;
result->instance_cache = from->instance_cache;
} else {
result->state = SsaEnv::kUnreachable;
result->locals = nullptr;
result->context_cache = {};
result->instance_cache = {};
}
return result;
......@@ -770,7 +770,7 @@ class WasmGraphBuildingInterface {
result->locals = from->locals;
result->control = from->control;
result->effect = from->effect;
result->context_cache = from->context_cache;
result->instance_cache = from->instance_cache;
from->Kill(SsaEnv::kUnreachable);
return result;
}
......@@ -782,7 +782,7 @@ class WasmGraphBuildingInterface {
result->control = nullptr;
result->effect = nullptr;
result->locals = nullptr;
result->context_cache = {};
result->instance_cache = {};
return result;
}
......
This diff is collapsed.
......@@ -70,7 +70,7 @@ Address CompileLazy(Isolate* isolate);
// logic to actually orchestrate parallel execution of wasm compilation jobs.
// TODO(clemensh): Implement concurrent lazy compilation.
class LazyCompilationOrchestrator {
const WasmCode* CompileFunction(Isolate*, Handle<WasmInstanceObject>,
const WasmCode* CompileFunction(Isolate*, Handle<WasmCompiledModule>,
int func_index);
public:
......@@ -79,9 +79,8 @@ class LazyCompilationOrchestrator {
Handle<Code> caller,
uint32_t exported_func_index);
const wasm::WasmCode* CompileDirectCall(Isolate*, Handle<WasmInstanceObject>,
Maybe<uint32_t>,
const WasmCode* caller,
int call_offset);
int caller_ret_offset);
const wasm::WasmCode* CompileIndirectCall(Isolate*,
Handle<WasmInstanceObject>,
uint32_t func_index);
......
......@@ -449,9 +449,15 @@ void NativeModule::ResizeCodeTableForTest(size_t last_index) {
}
WasmCode* NativeModule::GetCode(uint32_t index) const {
DCHECK_LT(index, FunctionCount());
return code_table_[index];
}
void NativeModule::SetCode(uint32_t index, WasmCode* wasm_code) {
DCHECK_LT(index, FunctionCount());
code_table_[index] = wasm_code;
}
uint32_t NativeModule::FunctionCount() const {
DCHECK_LE(code_table_.size(), std::numeric_limits<uint32_t>::max());
return static_cast<uint32_t>(code_table_.size());
......@@ -594,6 +600,10 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
// made while iterating over the RelocInfo above.
Assembler::FlushICache(ret->instructions().start(),
ret->instructions().size());
if (FLAG_print_wasm_code) {
// TODO(mstarzinger): don't need the isolate here.
ret->Print(code->GetIsolate());
}
return ret;
}
......
......@@ -258,6 +258,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
// FunctionCount is WasmModule::functions.size().
uint32_t FunctionCount() const;
WasmCode* GetCode(uint32_t index) const;
void SetCode(uint32_t index, WasmCode* wasm_code);
// We special-case lazy cloning because we currently rely on making copies
// of the lazy builtin, to be able to identify, in the runtime, which function
......
......@@ -63,10 +63,12 @@ CodeSpecialization::CodeSpecialization(Isolate* isolate, Zone* zone) {}
CodeSpecialization::~CodeSpecialization() {}
void CodeSpecialization::RelocateWasmContextReferences(Address new_context) {
DCHECK_NOT_NULL(new_context);
DCHECK_NULL(new_wasm_context_address_);
new_wasm_context_address_ = new_context;
void CodeSpecialization::UpdateInstanceReferences(
Handle<WeakCell> old_weak_instance, Handle<WeakCell> new_weak_instance) {
DCHECK(!old_weak_instance.is_null());
DCHECK(!new_weak_instance.is_null());
old_weak_instance_ = old_weak_instance;
new_weak_instance_ = new_weak_instance;
}
void CodeSpecialization::RelocateDirectCalls(NativeModule* native_module) {
......@@ -100,12 +102,11 @@ bool CodeSpecialization::ApplyToWholeModule(NativeModule* native_module,
changed |= ApplyToWasmCode(wasm_function, icache_flush_mode);
}
bool patch_wasm_weak_instances =
!old_weak_instance_.is_identical_to(new_weak_instance_);
// Patch all exported functions (JS_TO_WASM_FUNCTION).
int reloc_mode = 0;
// We need to patch WASM_CONTEXT_REFERENCE to put the correct address.
if (new_wasm_context_address_) {
reloc_mode |= RelocInfo::ModeMask(RelocInfo::WASM_CONTEXT_REFERENCE);
}
// Patch CODE_TARGET if we shall relocate direct calls. If we patch direct
// calls, the instance registered for that (relocate_direct_calls_module_)
// should match the instance we currently patch (instance).
......@@ -113,6 +114,10 @@ bool CodeSpecialization::ApplyToWholeModule(NativeModule* native_module,
DCHECK_EQ(native_module, relocate_direct_calls_module_);
reloc_mode |= RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL);
}
// Instance references are simply embedded objects.
if (patch_wasm_weak_instances) {
reloc_mode |= RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
}
if (!reloc_mode) return changed;
int wrapper_index = 0;
for (auto exp : module->export_table) {
......@@ -123,20 +128,25 @@ bool CodeSpecialization::ApplyToWholeModule(NativeModule* native_module,
for (RelocIterator it(export_wrapper, reloc_mode); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
switch (mode) {
case RelocInfo::WASM_CONTEXT_REFERENCE:
it.rinfo()->set_wasm_context_reference(new_wasm_context_address_,
icache_flush_mode);
break;
case RelocInfo::JS_TO_WASM_CALL: {
changed = true;
const WasmCode* new_code = native_module->GetCode(exp.index);
it.rinfo()->set_js_to_wasm_address(new_code->instructions().start(),
icache_flush_mode);
} break;
case RelocInfo::EMBEDDED_OBJECT: {
changed = true;
const HeapObject* old = it.rinfo()->target_object();
if (*old_weak_instance_ == old) {
it.rinfo()->set_target_object(
*new_weak_instance_, WriteBarrierMode::UPDATE_WRITE_BARRIER,
icache_flush_mode);
}
} break;
default:
UNREACHABLE();
}
}
changed = true;
}
DCHECK_EQ(module->functions.size(), func_index);
DCHECK_EQ(compiled_module->export_wrappers()->length(), wrapper_index);
......
......@@ -28,8 +28,10 @@ class CodeSpecialization {
CodeSpecialization(Isolate*, Zone*);
~CodeSpecialization();
// Update WasmContext references.
void RelocateWasmContextReferences(Address new_context);
// Update instance references in code. Instance references should only
// appear in export wrappers.
void UpdateInstanceReferences(Handle<WeakCell> old_weak_instance,
Handle<WeakCell> new_weak_instance);
// Update all direct call sites based on the code table in the given instance.
void RelocateDirectCalls(NativeModule* module);
// Apply all relocations and patching to all code in the instance (wasm code
......@@ -41,8 +43,8 @@ class CodeSpecialization {
ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
private:
Address new_wasm_context_address_ = 0;
Handle<WeakCell> old_weak_instance_;
Handle<WeakCell> new_weak_instance_;
NativeModule* relocate_direct_calls_module_ = nullptr;
};
......
......@@ -140,12 +140,13 @@ class InterpreterHandle {
}
public:
// TODO(wasm): properly handlify this constructor.
InterpreterHandle(Isolate* isolate, WasmDebugInfo* debug_info)
: isolate_(isolate),
module_(
debug_info->wasm_instance()->compiled_module()->shared()->module()),
interpreter_(isolate, module_, GetBytes(debug_info),
debug_info->wasm_instance()->wasm_context()->get()) {}
handle(debug_info->wasm_instance())) {}
~InterpreterHandle() { DCHECK_EQ(0, activations_.size()); }
......@@ -197,8 +198,6 @@ class InterpreterHandle {
uint32_t activation_id = StartActivation(frame_pointer);
WasmInterpreter::HeapObjectsScope heap_objects_scope(&interpreter_,
instance_object);
WasmInterpreter::Thread* thread = interpreter_.GetThread(0);
thread->InitFrame(&module()->functions[func_index], wasm_args.start());
bool finished = false;
......@@ -681,7 +680,7 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
if (!interpreted_functions->get(func_index)->IsUndefined(isolate)) continue;
Handle<Code> new_code = compiler::CompileWasmInterpreterEntry(
isolate, func_index, module->functions[func_index].sig, instance);
isolate, func_index, module->functions[func_index].sig);
const wasm::WasmCode* wasm_new_code =
native_module->AddInterpreterWrapper(new_code, func_index);
const wasm::WasmCode* old_code =
......
This diff is collapsed.
......@@ -16,7 +16,6 @@ class AccountingAllocator;
namespace internal {
class WasmInstanceObject;
struct WasmContext;
namespace wasm {
......@@ -88,19 +87,6 @@ class InterpretedFrame {
// An interpreter capable of executing WebAssembly.
class V8_EXPORT_PRIVATE WasmInterpreter {
public:
// Open a HeapObjectsScope before running any code in the interpreter which
// needs access to the instance object or needs to call to JS functions.
class V8_EXPORT_PRIVATE HeapObjectsScope {
public:
HeapObjectsScope(WasmInterpreter* interpreter,
Handle<WasmInstanceObject> instance);
~HeapObjectsScope();
private:
char data[3 * sizeof(void*)]; // must match sizeof(HeapObjectsScopeImpl).
DISALLOW_COPY_AND_ASSIGN(HeapObjectsScope);
};
// State machine for a Thread:
// +---------Run()/Step()--------+
// V |
......@@ -181,7 +167,8 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
};
WasmInterpreter(Isolate* isolate, const WasmModule* module,
const ModuleWireBytes& wire_bytes, WasmContext* wasm_context);
const ModuleWireBytes& wire_bytes,
Handle<WasmInstanceObject> instance);
~WasmInterpreter();
//==========================================================================
......
......@@ -65,19 +65,21 @@ SMI_ACCESSORS(WasmGlobalObject, offset, kOffsetOffset)
SMI_ACCESSORS(WasmGlobalObject, is_mutable, kIsMutableOffset)
// WasmInstanceObject
ACCESSORS(WasmInstanceObject, wasm_context, Managed<WasmContext>,
kWasmContextOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_start, byte*, kMemoryStartOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_size, uintptr_t,
kMemorySizeOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_mask, uintptr_t,
kMemoryMaskOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, imported_function_targets, Address*,
kImportedFunctionTargetsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, globals_start, byte*,
kGlobalsStartOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table,
IndirectFunctionTableEntry*, kIndirectFunctionTableOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_size, uintptr_t,
kIndirectFunctionTableSizeOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_sig_ids,
uint32_t*, kIndirectFunctionTableSigIdsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_targets,
Address*, kIndirectFunctionTableTargetsOffset)
ACCESSORS(WasmInstanceObject, compiled_module, WasmCompiledModule,
kCompiledModuleOffset)
......@@ -90,12 +92,18 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, WasmDebugInfo,
kDebugInfoOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, table_object, WasmTableObject,
kTableObjectOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, function_tables, FixedArray,
kFunctionTablesOffset)
ACCESSORS(WasmInstanceObject, directly_called_instances, FixedArray,
kDirectlyCalledInstancesOffset)
ACCESSORS(WasmInstanceObject, js_imports_table, FixedArray,
kJsImportsTableOffset)
ACCESSORS(WasmInstanceObject, imported_function_instances, FixedArray,
kImportedFunctionInstancesOffset)
ACCESSORS(WasmInstanceObject, imported_function_callables, FixedArray,
kImportedFunctionCallablesOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_instances,
FixedArray, kIndirectFunctionTableInstancesOffset)
ACCESSORS(WasmInstanceObject, managed_native_allocations, Foreign,
kManagedNativeAllocationsOffset)
inline bool WasmInstanceObject::has_indirect_function_table() {
return indirect_function_table_sig_ids() != nullptr;
}
// WasmSharedModuleData
ACCESSORS(WasmSharedModuleData, module_wrapper, Object, kModuleWrapperOffset)
......@@ -168,7 +176,6 @@ WCM_OBJECT(WasmCompiledModule, prev_instance, kPrevInstanceOffset)
WCM_WEAK_LINK(WasmInstanceObject, owning_instance, kOwningInstanceOffset)
WCM_WEAK_LINK(WasmModuleObject, wasm_module, kWasmModuleOffset)
WCM_OBJECT(Foreign, native_module, kNativeModuleOffset)
WCM_OBJECT(FixedArray, lazy_compile_data, kLazyCompileDataOffset)
WCM_SMALL_CONST_NUMBER(bool, use_trap_handler, kUseTrapHandlerOffset)
ACCESSORS(WasmCompiledModule, raw_next_instance, Object, kNextInstanceOffset);
ACCESSORS(WasmCompiledModule, raw_prev_instance, Object, kPrevInstanceOffset);
......@@ -185,6 +192,10 @@ uint32_t WasmTableObject::current_length() { return functions()->length(); }
bool WasmMemoryObject::has_maximum_pages() { return maximum_pages() >= 0; }
inline bool WasmCompiledModule::has_instance() const {
return !weak_owning_instance()->cleared();
}
#include "src/objects/object-macros-undef.h"
} // namespace internal
......
This diff is collapsed.
This diff is collapsed.
......@@ -23,8 +23,8 @@ constexpr ValueType kWasmI32 = MachineRepresentation::kWord32;
constexpr ValueType kWasmI64 = MachineRepresentation::kWord64;
constexpr ValueType kWasmF32 = MachineRepresentation::kFloat32;
constexpr ValueType kWasmF64 = MachineRepresentation::kFloat64;
constexpr ValueType kWasmAnyRef = MachineRepresentation::kTaggedPointer;
constexpr ValueType kWasmS128 = MachineRepresentation::kSimd128;
constexpr ValueType kWasmAnyRef = MachineRepresentation::kTaggedPointer;
constexpr ValueType kWasmVar = MachineRepresentation::kTagged;
using FunctionSig = Signature<ValueType>;
......
......@@ -682,7 +682,6 @@ MaybeHandle<WasmCompiledModule> DeserializeNativeModule(
Handle<WasmCompiledModule> compiled_module =
WasmCompiledModule::New(isolate, shared->module(), export_wrappers,
std::vector<wasm::GlobalHandleAddress>(),
trap_handler::IsTrapHandlerEnabled());
compiled_module->set_shared(*shared);
script->set_wasm_compiled_module(*compiled_module);
......
......@@ -100,7 +100,6 @@ v8_source_set("cctest_sources") {
"compiler/test-run-tail-calls.cc",
"compiler/test-run-unwinding-info.cc",
"compiler/test-run-variables.cc",
"compiler/test-run-wasm-machops.cc",
"compiler/value-helper.cc",
"compiler/value-helper.h",
"expression-type-collector-macros.h",
......@@ -240,7 +239,6 @@ v8_source_set("cctest_sources") {
"wasm/test-run-wasm-interpreter.cc",
"wasm/test-run-wasm-js.cc",
"wasm/test-run-wasm-module.cc",
"wasm/test-run-wasm-relocation.cc",
"wasm/test-run-wasm-sign-extension.cc",
"wasm/test-run-wasm-simd.cc",
"wasm/test-run-wasm.cc",
......@@ -277,7 +275,6 @@ v8_source_set("cctest_sources") {
"test-code-stubs.h",
"test-disasm-arm.cc",
"test-macro-assembler-arm.cc",
"test-run-wasm-relocation-arm.cc",
"test-sync-primitives-arm.cc",
]
} else if (v8_current_cpu == "arm64") {
......@@ -290,7 +287,6 @@ v8_source_set("cctest_sources") {
"test-fuzz-arm64.cc",
"test-javascript-arm64.cc",
"test-js-arm64-variables.cc",
"test-run-wasm-relocation-arm64.cc",
"test-sync-primitives-arm64.cc",
"test-utils-arm64.cc",
"test-utils-arm64.h",
......@@ -303,7 +299,6 @@ v8_source_set("cctest_sources") {
"test-code-stubs.h",
"test-disasm-ia32.cc",
"test-log-stack-tracer.cc",
"test-run-wasm-relocation-ia32.cc",
]
} else if (v8_current_cpu == "mips") {
sources += [ ### gcmole(arch:mips) ###
......@@ -350,7 +345,6 @@ v8_source_set("cctest_sources") {
"test-disasm-x64.cc",
"test-log-stack-tracer.cc",
"test-macro-assembler-x64.cc",
"test-run-wasm-relocation-x64.cc",
"wasm/test-run-wasm-atomics64.cc",
]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
......
This diff is collapsed.
......@@ -61,7 +61,6 @@ UNINITIALIZED_TEST(VerifyBuiltinsIsolateIndependence) {
mode_mask ==
(RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::WASM_CONTEXT_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::WASM_GLOBAL_HANDLE) |
RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL) |
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <iostream> // NOLINT(readability/streams)
#include "src/v8.h"
#include "test/cctest/cctest.h"
#include "src/arm/assembler-arm-inl.h"
#include "src/arm/simulator-arm.h"
#include "src/disassembler.h"
#include "src/factory.h"
#include "src/ostreams.h"
#include "test/cctest/compiler/c-signature.h"
#include "test/cctest/compiler/call-tester.h"
namespace v8 {
namespace internal {
namespace wasm {
#define __ assm.
static int32_t DummyStaticFunction(Object* result) { return 1; }
TEST(WasmRelocationArmContextReference) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::internal::byte buffer[4096];
DummyStaticFunction(nullptr);
int32_t imm = 1234567;
Assembler assm(isolate, buffer, sizeof buffer);
__ mov(r0, Operand(imm, RelocInfo::WASM_CONTEXT_REFERENCE));
__ mov(pc, Operand(lr));
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
compiler::CSignatureOf<int32_t> csig;
compiler::CodeRunner<int32_t> runnable(isolate, code, &csig);
int32_t ret_value = runnable.Call();
CHECK_EQ(ret_value, imm);
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
::printf("f() = %d\n\n", ret_value);
#endif
int offset = 1234;
// Relocating references by offset
int mode_mask = (1 << RelocInfo::WASM_CONTEXT_REFERENCE);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
// TODO(6792): No longer needed once WebAssembly code is off heap.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
DCHECK(RelocInfo::IsWasmContextReference(it.rinfo()->rmode()));
it.rinfo()->set_wasm_context_reference(
it.rinfo()->wasm_context_reference() + offset, SKIP_ICACHE_FLUSH);
}
// Call into relocated code object
ret_value = runnable.Call();
CHECK_EQ((imm + offset), ret_value);
#ifdef DEBUG
code->Print(os);
::printf("f() = %d\n\n", ret_value);
#endif
}
#undef __
} // namespace wasm
} // namespace internal
} // namespace v8
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment