Commit 99de47f1 authored by Ben L. Titzer's avatar Ben L. Titzer Committed by Commit Bot

[cleanup] Remove redundant wasm:: namespace prefixes

The wasm/ directory is inconsistent in many places, often within the
same file. For all code that exists in a v8::internal::wasm namespace,
this CL removes any wasm:: qualifiers, which is especially helpful
since most types are already Wasm-named, such as WasmCode, WasmModule,
etc. Namespace qualifiers are redundant inside the wasm:: namespace and
thus go against the main point of using namespaces. Removing the
qualifiers for non Wasm-named classes also makes the code somewhat more
future-proof, should we move some things that are not really WASM-specific
(such as ErrorThrower and Decoder) into a higher namespace.

R=clemensh@chromium.org,mstarzinger@chromium.org

Change-Id: Ibff3e1e93c64c12dcb53c46c03d1bfb2fb0b7586
Reviewed-on: https://chromium-review.googlesource.com/1160232
Commit-Queue: Ben Titzer <titzer@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54862}
parent 0cd263ac
......@@ -446,7 +446,7 @@ void LiftoffAssembler::SpillAllRegisters() {
cache_state_.reset_used_registers();
}
void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
void LiftoffAssembler::PrepareCall(FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register* target,
LiftoffRegister* target_instance) {
......@@ -555,7 +555,7 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
}
}
void LiftoffAssembler::FinishCall(wasm::FunctionSig* sig,
void LiftoffAssembler::FinishCall(FunctionSig* sig,
compiler::CallDescriptor* call_descriptor) {
const size_t return_count = sig->return_count();
if (return_count != 0) {
......
......@@ -314,11 +314,11 @@ class LiftoffAssembler : public TurboAssembler {
// Load parameters into the right registers / stack slots for the call.
// Move {*target} into another register if needed and update {*target} to that
// register, or {no_reg} if target was spilled to the stack.
void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*,
void PrepareCall(FunctionSig*, compiler::CallDescriptor*,
Register* target = nullptr,
LiftoffRegister* target_instance = nullptr);
// Process return values of the call.
void FinishCall(wasm::FunctionSig*, compiler::CallDescriptor*);
void FinishCall(FunctionSig*, compiler::CallDescriptor*);
// Move {src} into {dst}. {src} and {dst} must be different.
void Move(LiftoffRegister dst, LiftoffRegister src, ValueType);
......@@ -540,13 +540,13 @@ class LiftoffAssembler : public TurboAssembler {
// this is the return value of the C function, stored in {rets[0]}. Further
// outputs (specified in {sig->returns()}) are read from the buffer and stored
// in the remaining {rets} registers.
inline void CallC(wasm::FunctionSig* sig, const LiftoffRegister* args,
inline void CallC(FunctionSig* sig, const LiftoffRegister* args,
const LiftoffRegister* rets, ValueType out_argument_type,
int stack_bytes, ExternalReference ext_ref);
inline void CallNativeWasmCode(Address addr);
// Indirect call: If {target == no_reg}, then pop the target from the stack.
inline void CallIndirect(wasm::FunctionSig* sig,
inline void CallIndirect(FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target);
inline void CallRuntimeStub(WasmCode::RuntimeStubId sid);
......
......@@ -94,8 +94,7 @@ class LiftoffCompiler {
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(LiftoffCompiler);
// TODO(clemensh): Make this a template parameter.
static constexpr wasm::Decoder::ValidateFlag validate =
wasm::Decoder::kValidate;
static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
using Value = ValueBase;
......@@ -112,7 +111,7 @@ class LiftoffCompiler {
MovableLabel label;
};
using Decoder = WasmFullDecoder<validate, LiftoffCompiler>;
using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
struct OutOfLineCode {
MovableLabel label;
......@@ -161,20 +160,20 @@ class LiftoffCompiler {
return __ GetTotalFrameSlotCount();
}
void unsupported(Decoder* decoder, const char* reason) {
void unsupported(FullDecoder* decoder, const char* reason) {
ok_ = false;
TRACE("unsupported: %s\n", reason);
decoder->errorf(decoder->pc(), "unsupported liftoff operation: %s", reason);
BindUnboundLabels(decoder);
}
bool DidAssemblerBailout(Decoder* decoder) {
bool DidAssemblerBailout(FullDecoder* decoder) {
if (decoder->failed() || !__ did_bailout()) return false;
unsupported(decoder, __ bailout_reason());
return true;
}
bool CheckSupportedType(Decoder* decoder,
bool CheckSupportedType(FullDecoder* decoder,
Vector<const ValueType> supported_types,
ValueType type, const char* context) {
char buffer[128];
......@@ -191,7 +190,7 @@ class LiftoffCompiler {
return safepoint_table_builder_.GetCodeOffset();
}
void BindUnboundLabels(Decoder* decoder) {
void BindUnboundLabels(FullDecoder* decoder) {
#ifdef DEBUG
// Bind all labels now, otherwise their destructor will fire a DCHECK error
// if they where referenced before.
......@@ -211,7 +210,7 @@ class LiftoffCompiler {
#endif
}
void StartFunction(Decoder* decoder) {
void StartFunction(FullDecoder* decoder) {
int num_locals = decoder->NumLocals();
__ set_num_locals(num_locals);
for (int i = 0; i < num_locals; ++i) {
......@@ -302,7 +301,7 @@ class LiftoffCompiler {
__ bind(ool.continuation.get());
}
void StartFunctionBody(Decoder* decoder, Control* block) {
void StartFunctionBody(FullDecoder* decoder, Control* block) {
for (uint32_t i = 0; i < __ num_locals(); ++i) {
if (!CheckSupportedType(decoder, kTypes_ilfd, __ local_type(i), "param"))
return;
......@@ -418,7 +417,7 @@ class LiftoffCompiler {
}
}
void FinishFunction(Decoder* decoder) {
void FinishFunction(FullDecoder* decoder) {
if (DidAssemblerBailout(decoder)) return;
for (OutOfLineCode& ool : out_of_line_code_) {
GenerateOutOfLineCode(ool);
......@@ -431,23 +430,23 @@ class LiftoffCompiler {
DidAssemblerBailout(decoder);
}
void OnFirstError(Decoder* decoder) {
void OnFirstError(FullDecoder* decoder) {
ok_ = false;
BindUnboundLabels(decoder);
asm_.AbortCompilation();
}
void NextInstruction(Decoder* decoder, WasmOpcode opcode) {
void NextInstruction(FullDecoder* decoder, WasmOpcode opcode) {
TraceCacheState(decoder);
SLOW_DCHECK(__ ValidateCacheState());
DEBUG_CODE_COMMENT(WasmOpcodes::OpcodeName(opcode));
}
void Block(Decoder* decoder, Control* block) {
void Block(FullDecoder* decoder, Control* block) {
block->label_state.stack_base = __ cache_state()->stack_height();
}
void Loop(Decoder* decoder, Control* loop) {
void Loop(FullDecoder* decoder, Control* loop) {
loop->label_state.stack_base = __ cache_state()->stack_height();
// Before entering a loop, spill all locals to the stack, in order to free
......@@ -467,9 +466,11 @@ class LiftoffCompiler {
StackCheck(decoder->position());
}
void Try(Decoder* decoder, Control* block) { unsupported(decoder, "try"); }
void Try(FullDecoder* decoder, Control* block) {
unsupported(decoder, "try");
}
void If(Decoder* decoder, const Value& cond, Control* if_block) {
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
DCHECK_EQ(if_block, decoder->control_at(0));
DCHECK(if_block->is_if());
......@@ -489,7 +490,7 @@ class LiftoffCompiler {
if_block->else_state->state.Split(*__ cache_state());
}
void FallThruTo(Decoder* decoder, Control* c) {
void FallThruTo(FullDecoder* decoder, Control* c) {
if (c->end_merge.reached) {
__ MergeFullStackWith(c->label_state);
} else if (c->is_onearmed_if()) {
......@@ -502,7 +503,7 @@ class LiftoffCompiler {
TraceCacheState(decoder);
}
void PopControl(Decoder* decoder, Control* c) {
void PopControl(FullDecoder* decoder, Control* c) {
if (!c->is_loop() && c->end_merge.reached) {
__ cache_state()->Steal(c->label_state);
}
......@@ -511,7 +512,7 @@ class LiftoffCompiler {
}
}
void EndControl(Decoder* decoder, Control* c) {}
void EndControl(FullDecoder* decoder, Control* c) {}
enum CCallReturn : bool { kHasReturn = true, kNoReturn = false };
......@@ -610,7 +611,7 @@ class LiftoffCompiler {
__ PushRegister(dst_type, dst);
}
void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
void UnOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& value, Value* result) {
#define CASE_I32_UNOP(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
......@@ -743,7 +744,7 @@ class LiftoffCompiler {
}
}
void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
void BinOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& lhs, const Value& rhs, Value* result) {
#define CASE_I32_BINOP(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
......@@ -990,11 +991,11 @@ class LiftoffCompiler {
#undef CASE_CCALL_BINOP
}
void I32Const(Decoder* decoder, Value* result, int32_t value) {
void I32Const(FullDecoder* decoder, Value* result, int32_t value) {
__ cache_state()->stack_state.emplace_back(kWasmI32, value);
}
void I64Const(Decoder* decoder, Value* result, int64_t value) {
void I64Const(FullDecoder* decoder, Value* result, int64_t value) {
// The {VarState} stores constant values as int32_t, thus we only store
// 64-bit constants in this field if it fits in an int32_t. Larger values
// cannot be used as immediate value anyway, so we can also just put them in
......@@ -1009,30 +1010,30 @@ class LiftoffCompiler {
}
}
void F32Const(Decoder* decoder, Value* result, float value) {
void F32Const(FullDecoder* decoder, Value* result, float value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF32, reg);
}
void F64Const(Decoder* decoder, Value* result, double value) {
void F64Const(FullDecoder* decoder, Value* result, double value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF64, reg);
}
void RefNull(Decoder* decoder, Value* result) {
void RefNull(FullDecoder* decoder, Value* result) {
unsupported(decoder, "ref_null");
}
void Drop(Decoder* decoder, const Value& value) {
void Drop(FullDecoder* decoder, const Value& value) {
auto& slot = __ cache_state()->stack_state.back();
// If the dropped slot contains a register, decrement it's use count.
if (slot.is_reg()) __ cache_state()->dec_used(slot.reg());
__ cache_state()->stack_state.pop_back();
}
void DoReturn(Decoder* decoder, Vector<Value> values, bool implicit) {
void DoReturn(FullDecoder* decoder, Vector<Value> values, bool implicit) {
if (implicit) {
DCHECK_EQ(1, decoder->control_depth());
Control* func_block = decoder->control_at(0);
......@@ -1056,7 +1057,7 @@ class LiftoffCompiler {
static_cast<uint32_t>(descriptor_->StackParameterCount()));
}
void GetLocal(Decoder* decoder, Value* result,
void GetLocal(FullDecoder* decoder, Value* result,
const LocalIndexImmediate<validate>& imm) {
auto& slot = __ cache_state()->stack_state[imm.index];
DCHECK_EQ(slot.type(), imm.type);
......@@ -1119,12 +1120,12 @@ class LiftoffCompiler {
if (!is_tee) __ cache_state()->stack_state.pop_back();
}
void SetLocal(Decoder* decoder, const Value& value,
void SetLocal(FullDecoder* decoder, const Value& value,
const LocalIndexImmediate<validate>& imm) {
SetLocal(imm.index, false);
}
void TeeLocal(Decoder* decoder, const Value& value, Value* result,
void TeeLocal(FullDecoder* decoder, const Value& value, Value* result,
const LocalIndexImmediate<validate>& imm) {
SetLocal(imm.index, true);
}
......@@ -1146,7 +1147,7 @@ class LiftoffCompiler {
return addr;
}
void GetGlobal(Decoder* decoder, Value* result,
void GetGlobal(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
const auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder, kTypes_ilfd, global->type, "global"))
......@@ -1161,7 +1162,7 @@ class LiftoffCompiler {
__ PushRegister(global->type, value);
}
void SetGlobal(Decoder* decoder, const Value& value,
void SetGlobal(FullDecoder* decoder, const Value& value,
const GlobalIndexImmediate<validate>& imm) {
auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder, kTypes_ilfd, global->type, "global"))
......@@ -1174,14 +1175,14 @@ class LiftoffCompiler {
__ Store(addr.gp(), no_reg, offset, reg, type, pinned);
}
void Unreachable(Decoder* decoder) {
void Unreachable(FullDecoder* decoder) {
Label* unreachable_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapUnreachable);
__ emit_jump(unreachable_label);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
void Select(Decoder* decoder, const Value& cond, const Value& fval,
void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
const Value& tval, Value* result) {
LiftoffRegList pinned;
Register condition = pinned.set(__ PopToRegister()).gp();
......@@ -1215,11 +1216,9 @@ class LiftoffCompiler {
__ jmp(target->label.get());
}
void Br(Decoder* decoder, Control* target) {
Br(target);
}
void Br(FullDecoder* decoder, Control* target) { Br(target); }
void BrIf(Decoder* decoder, const Value& cond, Control* target) {
void BrIf(FullDecoder* decoder, const Value& cond, Control* target) {
Label cont_false;
Register value = __ PopToRegister().gp();
__ emit_cond_jump(kEqual, &cont_false, kWasmI32, value);
......@@ -1230,7 +1229,7 @@ class LiftoffCompiler {
// Generate a branch table case, potentially reusing previously generated
// stack transfer code.
void GenerateBrCase(Decoder* decoder, uint32_t br_depth,
void GenerateBrCase(FullDecoder* decoder, uint32_t br_depth,
std::map<uint32_t, MovableLabel>& br_targets) {
MovableLabel& label = br_targets[br_depth];
if (label.get()->is_bound()) {
......@@ -1243,7 +1242,7 @@ class LiftoffCompiler {
// Generate a branch table for input in [min, max).
// TODO(wasm): Generate a real branch table (like TF TableSwitch).
void GenerateBrTable(Decoder* decoder, LiftoffRegister tmp,
void GenerateBrTable(FullDecoder* decoder, LiftoffRegister tmp,
LiftoffRegister value, uint32_t min, uint32_t max,
BranchTableIterator<validate>& table_iterator,
std::map<uint32_t, MovableLabel>& br_targets) {
......@@ -1269,7 +1268,7 @@ class LiftoffCompiler {
br_targets);
}
void BrTable(Decoder* decoder, const BranchTableImmediate<validate>& imm,
void BrTable(FullDecoder* decoder, const BranchTableImmediate<validate>& imm,
const Value& key) {
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
......@@ -1294,7 +1293,7 @@ class LiftoffCompiler {
DCHECK(!table_iterator.has_next());
}
void Else(Decoder* decoder, Control* if_block) {
void Else(FullDecoder* decoder, Control* if_block) {
if (if_block->reachable()) __ emit_jump(if_block->label.get());
__ bind(if_block->else_state->label.get());
__ cache_state()->Steal(if_block->else_state->state);
......@@ -1314,8 +1313,8 @@ class LiftoffCompiler {
// Returns true if the memory access is statically known to be out of bounds
// (a jump to the trap was generated then); return false otherwise.
bool BoundsCheckMem(Decoder* decoder, uint32_t access_size, uint32_t offset,
Register index, LiftoffRegList pinned) {
bool BoundsCheckMem(FullDecoder* decoder, uint32_t access_size,
uint32_t offset, Register index, LiftoffRegList pinned) {
const bool statically_oob = access_size > env_->max_memory_size ||
offset > env_->max_memory_size - access_size;
......@@ -1392,27 +1391,27 @@ class LiftoffCompiler {
__ LoadConstant(address, WasmValue(offset));
__ emit_i32_add(address.gp(), address.gp(), index);
// Get a register to hold the stack slot for wasm::MemoryTracingInfo.
// Get a register to hold the stack slot for MemoryTracingInfo.
LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
// Allocate stack slot for wasm::MemoryTracingInfo.
__ AllocateStackSlot(info.gp(), sizeof(wasm::MemoryTracingInfo));
// Allocate stack slot for MemoryTracingInfo.
__ AllocateStackSlot(info.gp(), sizeof(MemoryTracingInfo));
// Now store all information into the wasm::MemoryTracingInfo struct.
__ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, address),
address, StoreType::kI32Store, pinned);
// Now store all information into the MemoryTracingInfo struct.
__ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, address), address,
StoreType::kI32Store, pinned);
__ LoadConstant(address, WasmValue(is_store ? 1 : 0));
__ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, is_store),
address, StoreType::kI32Store8, pinned);
__ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, is_store), address,
StoreType::kI32Store8, pinned);
__ LoadConstant(address, WasmValue(static_cast<int>(rep)));
__ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, mem_rep),
address, StoreType::kI32Store8, pinned);
__ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, mem_rep), address,
StoreType::kI32Store8, pinned);
source_position_table_builder_.AddPosition(__ pc_offset(),
SourcePosition(position), false);
Register args[] = {info.gp()};
GenerateRuntimeCall(Runtime::kWasmTraceMemory, arraysize(args), args);
__ DeallocateStackSlot(sizeof(wasm::MemoryTracingInfo));
__ DeallocateStackSlot(sizeof(MemoryTracingInfo));
}
void GenerateRuntimeCall(Runtime::FunctionId runtime_function, int num_args,
......@@ -1469,7 +1468,7 @@ class LiftoffCompiler {
return index;
}
void LoadMem(Decoder* decoder, LoadType type,
void LoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, Value* result) {
ValueType value_type = type.value_type();
......@@ -1502,7 +1501,7 @@ class LiftoffCompiler {
}
}
void StoreMem(Decoder* decoder, StoreType type,
void StoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, const Value& value_val) {
ValueType value_type = type.value_type();
......@@ -1532,7 +1531,7 @@ class LiftoffCompiler {
}
}
void CurrentMemoryPages(Decoder* decoder, Value* result) {
void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
LiftoffRegList pinned;
LiftoffRegister mem_size = pinned.set(__ GetUnusedRegister(kGpReg));
LiftoffRegister tmp_const =
......@@ -1540,12 +1539,12 @@ class LiftoffCompiler {
LOAD_INSTANCE_FIELD(mem_size, MemorySize, LoadType::kI32Load);
// TODO(clemensh): Shift by immediate directly.
__ LoadConstant(tmp_const,
WasmValue(int32_t{WhichPowerOf2(wasm::kWasmPageSize)}));
WasmValue(int32_t{WhichPowerOf2(kWasmPageSize)}));
__ emit_i32_shr(mem_size.gp(), mem_size.gp(), tmp_const.gp(), pinned);
__ PushRegister(kWasmI32, mem_size);
}
void GrowMemory(Decoder* decoder, const Value& value, Value* result_val) {
void GrowMemory(FullDecoder* decoder, const Value& value, Value* result_val) {
// Pop the input, then spill all cache registers to make the runtime call.
LiftoffRegList pinned;
LiftoffRegister input = pinned.set(__ PopToRegister());
......@@ -1566,7 +1565,7 @@ class LiftoffCompiler {
Register param_reg = descriptor.GetRegisterParameter(0);
if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kWasmI32);
__ CallRuntimeStub(wasm::WasmCode::kWasmGrowMemory);
__ CallRuntimeStub(WasmCode::kWasmGrowMemory);
safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
......@@ -1577,7 +1576,8 @@ class LiftoffCompiler {
__ PushRegister(kWasmI32, result);
}
void CallDirect(Decoder* decoder, const CallFunctionImmediate<validate>& imm,
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[]) {
if (imm.sig->return_count() > 1)
return unsupported(decoder, "multi-return");
......@@ -1641,7 +1641,7 @@ class LiftoffCompiler {
}
}
void CallIndirect(Decoder* decoder, const Value& index_val,
void CallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
if (imm.sig->return_count() > 1) {
......@@ -1765,36 +1765,36 @@ class LiftoffCompiler {
__ FinishCall(imm.sig, call_descriptor);
}
void SimdOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
unsupported(decoder, "simd");
}
void SimdLaneOp(Decoder* decoder, WasmOpcode opcode,
void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdLaneImmediate<validate>& imm,
const Vector<Value> inputs, Value* result) {
unsupported(decoder, "simd");
}
void SimdShiftOp(Decoder* decoder, WasmOpcode opcode,
void SimdShiftOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdShiftImmediate<validate>& imm, const Value& input,
Value* result) {
unsupported(decoder, "simd");
}
void Simd8x16ShuffleOp(Decoder* decoder,
void Simd8x16ShuffleOp(FullDecoder* decoder,
const Simd8x16ShuffleImmediate<validate>& imm,
const Value& input0, const Value& input1,
Value* result) {
unsupported(decoder, "simd");
}
void Throw(Decoder* decoder, const ExceptionIndexImmediate<validate>&,
void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>&,
Control* block, const Vector<Value>& args) {
unsupported(decoder, "throw");
}
void CatchException(Decoder* decoder,
void CatchException(FullDecoder* decoder,
const ExceptionIndexImmediate<validate>& imm,
Control* block, Vector<Value> caught_values) {
unsupported(decoder, "catch");
}
void AtomicOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
unsupported(decoder, "atomicop");
}
......@@ -1816,7 +1816,7 @@ class LiftoffCompiler {
// patch the actually needed stack size in the end.
uint32_t pc_offset_stack_frame_construction_ = 0;
void TraceCacheState(Decoder* decoder) const {
void TraceCacheState(FullDecoder* decoder) const {
#ifdef DEBUG
if (!FLAG_trace_liftoff || !FLAG_trace_wasm_decoder) return;
StdoutStream os;
......@@ -1845,18 +1845,18 @@ bool LiftoffCompilationUnit::ExecuteCompilation() {
}
Zone zone(wasm_unit_->wasm_engine_->allocator(), "LiftoffCompilationZone");
const wasm::WasmModule* module =
const WasmModule* module =
wasm_unit_->env_ ? wasm_unit_->env_->module : nullptr;
auto call_descriptor =
compiler::GetWasmCallDescriptor(&zone, wasm_unit_->func_body_.sig);
base::Optional<TimedHistogramScope> liftoff_compile_time_scope(
base::in_place, wasm_unit_->counters_->liftoff_compile_time());
wasm::WasmFullDecoder<wasm::Decoder::kValidate, wasm::LiftoffCompiler>
decoder(&zone, module, wasm_unit_->func_body_, call_descriptor,
wasm_unit_->env_, &zone);
WasmFullDecoder<Decoder::kValidate, LiftoffCompiler> decoder(
&zone, module, wasm_unit_->func_body_, call_descriptor, wasm_unit_->env_,
&zone);
decoder.Decode();
liftoff_compile_time_scope.reset();
wasm::LiftoffCompiler* compiler = &decoder.interface();
LiftoffCompiler* compiler = &decoder.interface();
if (decoder.failed()) return false; // validation error
if (!compiler->ok()) {
// Liftoff compilation failed.
......@@ -1887,13 +1887,13 @@ bool LiftoffCompilationUnit::ExecuteCompilation() {
code_ = wasm_unit_->native_module_->AddCode(
wasm_unit_->func_index_, desc, frame_slot_count, safepoint_table_offset,
0, std::move(protected_instructions), std::move(source_positions),
wasm::WasmCode::kLiftoff);
WasmCode::kLiftoff);
wasm_unit_->native_module_->PublishCode(code_);
return true;
}
wasm::WasmCode* LiftoffCompilationUnit::FinishCompilation(wasm::ErrorThrower*) {
WasmCode* LiftoffCompilationUnit::FinishCompilation(ErrorThrower*) {
return code_;
}
......
......@@ -21,7 +21,7 @@ class LiftoffCompilationUnit final {
: wasm_unit_(wasm_unit) {}
bool ExecuteCompilation();
wasm::WasmCode* FinishCompilation(wasm::ErrorThrower*);
WasmCode* FinishCompilation(ErrorThrower*);
private:
WasmCompilationUnit* const wasm_unit_;
......
......@@ -1214,7 +1214,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
public:
template <typename... InterfaceArgs>
WasmFullDecoder(Zone* zone, const wasm::WasmModule* module,
WasmFullDecoder(Zone* zone, const WasmModule* module,
const FunctionBody& body, InterfaceArgs&&... interface_args)
: WasmDecoder<validate>(module, body.sig, body.start, body.end,
body.offset),
......@@ -1301,7 +1301,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return local_type_vec_[index];
}
inline wasm::WasmCodePosition position() {
inline WasmCodePosition position() {
int offset = static_cast<int>(this->pc_ - this->start_);
DCHECK_EQ(this->pc_ - this->start_, offset); // overflows cannot happen
return offset;
......@@ -2476,14 +2476,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
class EmptyInterface {
public:
static constexpr wasm::Decoder::ValidateFlag validate =
wasm::Decoder::kValidate;
static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
using Value = ValueBase;
using Control = ControlBase<Value>;
using Decoder = WasmFullDecoder<validate, EmptyInterface>;
using FullDecoder = WasmFullDecoder<validate, EmptyInterface>;
#define DEFINE_EMPTY_CALLBACK(name, ...) \
void name(Decoder* decoder, ##__VA_ARGS__) {}
void name(FullDecoder* decoder, ##__VA_ARGS__) {}
INTERFACE_FUNCTIONS(DEFINE_EMPTY_CALLBACK)
#undef DEFINE_EMPTY_CALLBACK
};
......
......@@ -61,9 +61,8 @@ constexpr uint32_t kNullCatch = static_cast<uint32_t>(-1);
class WasmGraphBuildingInterface {
public:
static constexpr wasm::Decoder::ValidateFlag validate =
wasm::Decoder::kValidate;
using Decoder = WasmFullDecoder<validate, WasmGraphBuildingInterface>;
static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
using FullDecoder = WasmFullDecoder<validate, WasmGraphBuildingInterface>;
struct Value : public ValueWithNamedConstructors<Value> {
TFNode* node;
......@@ -85,7 +84,7 @@ class WasmGraphBuildingInterface {
explicit WasmGraphBuildingInterface(TFBuilder* builder) : builder_(builder) {}
void StartFunction(Decoder* decoder) {
void StartFunction(FullDecoder* decoder) {
SsaEnv* ssa_env =
reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
uint32_t num_locals = decoder->NumLocals();
......@@ -101,8 +100,7 @@ class WasmGraphBuildingInterface {
TFNode* start = builder_->Start(
static_cast<int>(decoder->sig_->parameter_count() + 1 + 1));
// Initialize the instance parameter (index 0).
builder_->set_instance_node(
builder_->Param(wasm::kWasmInstanceParameterIndex));
builder_->set_instance_node(builder_->Param(kWasmInstanceParameterIndex));
// Initialize local variables. Parameters are shifted by 1 because of the
// the instance parameter.
uint32_t index = 0;
......@@ -132,25 +130,25 @@ class WasmGraphBuildingInterface {
builder_->InitInstanceCache(&ssa_env->instance_cache);
}
void StartFunctionBody(Decoder* decoder, Control* block) {
void StartFunctionBody(FullDecoder* decoder, Control* block) {
SsaEnv* break_env = ssa_env_;
SetEnv(Steal(decoder->zone(), break_env));
block->end_env = break_env;
}
void FinishFunction(Decoder*) { builder_->PatchInStackCheckIfNeeded(); }
void FinishFunction(FullDecoder*) { builder_->PatchInStackCheckIfNeeded(); }
void OnFirstError(Decoder*) {}
void OnFirstError(FullDecoder*) {}
void NextInstruction(Decoder*, WasmOpcode) {}
void NextInstruction(FullDecoder*, WasmOpcode) {}
void Block(Decoder* decoder, Control* block) {
void Block(FullDecoder* decoder, Control* block) {
// The break environment is the outer environment.
block->end_env = ssa_env_;
SetEnv(Steal(decoder->zone(), ssa_env_));
}
void Loop(Decoder* decoder, Control* block) {
void Loop(FullDecoder* decoder, Control* block) {
SsaEnv* finish_try_env = Steal(decoder->zone(), ssa_env_);
block->end_env = finish_try_env;
// The continue environment is the inner environment.
......@@ -164,7 +162,7 @@ class WasmGraphBuildingInterface {
}
}
void Try(Decoder* decoder, Control* block) {
void Try(FullDecoder* decoder, Control* block) {
SsaEnv* outer_env = ssa_env_;
SsaEnv* catch_env = Split(decoder, outer_env);
// Mark catch environment as unreachable, since only accessable
......@@ -179,7 +177,7 @@ class WasmGraphBuildingInterface {
current_catch_ = static_cast<int32_t>(decoder->control_depth() - 1);
}
void If(Decoder* decoder, const Value& cond, Control* if_block) {
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
TFNode* if_true = nullptr;
TFNode* if_false = nullptr;
if (ssa_env_->go()) BUILD(BranchNoHint, cond.node, &if_true, &if_false);
......@@ -193,51 +191,51 @@ class WasmGraphBuildingInterface {
SetEnv(true_env);
}
void FallThruTo(Decoder* decoder, Control* c) {
void FallThruTo(FullDecoder* decoder, Control* c) {
DCHECK(!c->is_loop());
MergeValuesInto(decoder, c, &c->end_merge);
}
void PopControl(Decoder* decoder, Control* block) {
void PopControl(FullDecoder* decoder, Control* block) {
if (!block->is_loop()) SetEnv(block->end_env);
}
void EndControl(Decoder* decoder, Control* block) { ssa_env_->Kill(); }
void EndControl(FullDecoder* decoder, Control* block) { ssa_env_->Kill(); }
void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig* sig,
void UnOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig* sig,
const Value& value, Value* result) {
result->node = BUILD(Unop, opcode, value.node, decoder->position());
}
void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig* sig,
void BinOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig* sig,
const Value& lhs, const Value& rhs, Value* result) {
auto node = BUILD(Binop, opcode, lhs.node, rhs.node, decoder->position());
if (result) result->node = node;
}
void I32Const(Decoder* decoder, Value* result, int32_t value) {
void I32Const(FullDecoder* decoder, Value* result, int32_t value) {
result->node = builder_->Int32Constant(value);
}
void I64Const(Decoder* decoder, Value* result, int64_t value) {
void I64Const(FullDecoder* decoder, Value* result, int64_t value) {
result->node = builder_->Int64Constant(value);
}
void F32Const(Decoder* decoder, Value* result, float value) {
void F32Const(FullDecoder* decoder, Value* result, float value) {
result->node = builder_->Float32Constant(value);
}
void F64Const(Decoder* decoder, Value* result, double value) {
void F64Const(FullDecoder* decoder, Value* result, double value) {
result->node = builder_->Float64Constant(value);
}
void RefNull(Decoder* decoder, Value* result) {
void RefNull(FullDecoder* decoder, Value* result) {
result->node = builder_->RefNull();
}
void Drop(Decoder* decoder, const Value& value) {}
void Drop(FullDecoder* decoder, const Value& value) {}
void DoReturn(Decoder* decoder, Vector<Value> values, bool implicit) {
void DoReturn(FullDecoder* decoder, Vector<Value> values, bool implicit) {
if (implicit) {
DCHECK_EQ(1, decoder->control_depth());
SetEnv(decoder->control_at(0)->end_env);
......@@ -250,40 +248,40 @@ class WasmGraphBuildingInterface {
BUILD(Return, static_cast<unsigned>(values.size()), buffer);
}
void GetLocal(Decoder* decoder, Value* result,
void GetLocal(FullDecoder* decoder, Value* result,
const LocalIndexImmediate<validate>& imm) {
if (!ssa_env_->locals) return; // unreachable
result->node = ssa_env_->locals[imm.index];
}
void SetLocal(Decoder* decoder, const Value& value,
void SetLocal(FullDecoder* decoder, const Value& value,
const LocalIndexImmediate<validate>& imm) {
if (!ssa_env_->locals) return; // unreachable
ssa_env_->locals[imm.index] = value.node;
}
void TeeLocal(Decoder* decoder, const Value& value, Value* result,
void TeeLocal(FullDecoder* decoder, const Value& value, Value* result,
const LocalIndexImmediate<validate>& imm) {
result->node = value.node;
if (!ssa_env_->locals) return; // unreachable
ssa_env_->locals[imm.index] = value.node;
}
void GetGlobal(Decoder* decoder, Value* result,
void GetGlobal(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
result->node = BUILD(GetGlobal, imm.index);
}
void SetGlobal(Decoder* decoder, const Value& value,
void SetGlobal(FullDecoder* decoder, const Value& value,
const GlobalIndexImmediate<validate>& imm) {
BUILD(SetGlobal, imm.index, value.node);
}
void Unreachable(Decoder* decoder) {
void Unreachable(FullDecoder* decoder) {
BUILD(Unreachable, decoder->position());
}
void Select(Decoder* decoder, const Value& cond, const Value& fval,
void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
const Value& tval, Value* result) {
TFNode* controls[2];
BUILD(BranchNoHint, cond.node, &controls[0], &controls[1]);
......@@ -294,11 +292,11 @@ class WasmGraphBuildingInterface {
ssa_env_->control = merge;
}
void Br(Decoder* decoder, Control* target) {
void Br(FullDecoder* decoder, Control* target) {
MergeValuesInto(decoder, target, target->br_merge());
}
void BrIf(Decoder* decoder, const Value& cond, Control* target) {
void BrIf(FullDecoder* decoder, const Value& cond, Control* target) {
SsaEnv* fenv = ssa_env_;
SsaEnv* tenv = Split(decoder, fenv);
fenv->SetNotMerged();
......@@ -308,7 +306,7 @@ class WasmGraphBuildingInterface {
ssa_env_ = fenv;
}
void BrTable(Decoder* decoder, const BranchTableImmediate<validate>& imm,
void BrTable(FullDecoder* decoder, const BranchTableImmediate<validate>& imm,
const Value& key) {
if (imm.table_count == 0) {
// Only a default target. Do the equivalent of br.
......@@ -336,11 +334,11 @@ class WasmGraphBuildingInterface {
ssa_env_ = break_env;
}
void Else(Decoder* decoder, Control* if_block) {
void Else(FullDecoder* decoder, Control* if_block) {
SetEnv(if_block->false_env);
}
void LoadMem(Decoder* decoder, LoadType type,
void LoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm, const Value& index,
Value* result) {
result->node =
......@@ -348,56 +346,57 @@ class WasmGraphBuildingInterface {
imm.offset, imm.alignment, decoder->position());
}
void StoreMem(Decoder* decoder, StoreType type,
void StoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm, const Value& index,
const Value& value) {
BUILD(StoreMem, type.mem_rep(), index.node, imm.offset, imm.alignment,
value.node, decoder->position(), type.value_type());
}
void CurrentMemoryPages(Decoder* decoder, Value* result) {
void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
result->node = BUILD(CurrentMemoryPages);
}
void GrowMemory(Decoder* decoder, const Value& value, Value* result) {
void GrowMemory(FullDecoder* decoder, const Value& value, Value* result) {
result->node = BUILD(GrowMemory, value.node);
// Always reload the instance cache after growing memory.
LoadContextIntoSsa(ssa_env_);
}
void CallDirect(Decoder* decoder, const CallFunctionImmediate<validate>& imm,
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[]) {
DoCall(decoder, nullptr, imm.sig, imm.index, args, returns);
}
void CallIndirect(Decoder* decoder, const Value& index,
void CallIndirect(FullDecoder* decoder, const Value& index,
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
DoCall(decoder, index.node, imm.sig, imm.sig_index, args, returns);
}
void SimdOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
TFNode** inputs = GetNodes(args);
TFNode* node = BUILD(SimdOp, opcode, inputs);
if (result) result->node = node;
}
void SimdLaneOp(Decoder* decoder, WasmOpcode opcode,
void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdLaneImmediate<validate> imm, Vector<Value> inputs,
Value* result) {
TFNode** nodes = GetNodes(inputs);
result->node = BUILD(SimdLaneOp, opcode, imm.lane, nodes);
}
void SimdShiftOp(Decoder* decoder, WasmOpcode opcode,
void SimdShiftOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdShiftImmediate<validate> imm, const Value& input,
Value* result) {
TFNode* inputs[] = {input.node};
result->node = BUILD(SimdShiftOp, opcode, imm.shift, inputs);
}
void Simd8x16ShuffleOp(Decoder* decoder,
void Simd8x16ShuffleOp(FullDecoder* decoder,
const Simd8x16ShuffleImmediate<validate>& imm,
const Value& input0, const Value& input1,
Value* result) {
......@@ -405,14 +404,14 @@ class WasmGraphBuildingInterface {
result->node = BUILD(Simd8x16ShuffleOp, imm.shuffle, input_nodes);
}
TFNode* GetExceptionTag(Decoder* decoder,
TFNode* GetExceptionTag(FullDecoder* decoder,
const ExceptionIndexImmediate<validate>& imm) {
// TODO(kschimpf): Need to get runtime exception tag values. This
// code only handles non-imported/exported exceptions.
return BUILD(Int32Constant, imm.index);
}
void Throw(Decoder* decoder, const ExceptionIndexImmediate<validate>& imm,
void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>& imm,
Control* block, const Vector<Value>& value_args) {
int count = value_args.length();
ZoneVector<TFNode*> args(count, decoder->zone());
......@@ -424,7 +423,7 @@ class WasmGraphBuildingInterface {
EndControl(decoder, block);
}
void CatchException(Decoder* decoder,
void CatchException(FullDecoder* decoder,
const ExceptionIndexImmediate<validate>& imm,
Control* block, Vector<Value> values) {
DCHECK(block->is_try_catch());
......@@ -483,7 +482,7 @@ class WasmGraphBuildingInterface {
}
}
void AtomicOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
TFNode** inputs = GetNodes(args);
TFNode* node = BUILD(AtomicOp, opcode, inputs, imm.alignment, imm.offset,
......@@ -496,7 +495,7 @@ class WasmGraphBuildingInterface {
TFBuilder* builder_;
uint32_t current_catch_ = kNullCatch;
TryInfo* current_try_info(Decoder* decoder) {
TryInfo* current_try_info(FullDecoder* decoder) {
return decoder->control_at(decoder->control_depth() - 1 - current_catch_)
->try_info;
}
......@@ -548,7 +547,7 @@ class WasmGraphBuildingInterface {
builder_->set_instance_cache(&env->instance_cache);
}
TFNode* CheckForException(Decoder* decoder, TFNode* node) {
TFNode* CheckForException(FullDecoder* decoder, TFNode* node) {
if (node == nullptr) return nullptr;
const bool inside_try_scope = current_catch_ != kNullCatch;
......@@ -600,7 +599,7 @@ class WasmGraphBuildingInterface {
}
}
void MergeValuesInto(Decoder* decoder, Control* c, Merge<Value>* merge) {
void MergeValuesInto(FullDecoder* decoder, Control* c, Merge<Value>* merge) {
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
if (!ssa_env_->go()) return;
......@@ -623,7 +622,7 @@ class WasmGraphBuildingInterface {
}
}
void Goto(Decoder* decoder, SsaEnv* from, SsaEnv* to) {
void Goto(FullDecoder* decoder, SsaEnv* from, SsaEnv* to) {
DCHECK_NOT_NULL(to);
if (!from->go()) return;
switch (to->state) {
......@@ -685,7 +684,7 @@ class WasmGraphBuildingInterface {
return from->Kill();
}
SsaEnv* PrepareForLoop(Decoder* decoder, SsaEnv* env) {
SsaEnv* PrepareForLoop(FullDecoder* decoder, SsaEnv* env) {
if (!env->go()) return Split(decoder, env);
env->state = SsaEnv::kMerged;
......@@ -732,7 +731,7 @@ class WasmGraphBuildingInterface {
}
// Create a complete copy of {from}.
SsaEnv* Split(Decoder* decoder, SsaEnv* from) {
SsaEnv* Split(FullDecoder* decoder, SsaEnv* from) {
DCHECK_NOT_NULL(from);
SsaEnv* result =
reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
......@@ -782,9 +781,8 @@ class WasmGraphBuildingInterface {
return result;
}
void DoCall(WasmFullDecoder<validate, WasmGraphBuildingInterface>* decoder,
TFNode* index_node, FunctionSig* sig, uint32_t index,
const Value args[], Value returns[]) {
void DoCall(FullDecoder* decoder, TFNode* index_node, FunctionSig* sig,
uint32_t index, const Value args[], Value returns[]) {
int param_count = static_cast<int>(sig->parameter_count());
TFNode** arg_nodes = builder_->Buffer(param_count + 1);
TFNode** return_nodes = nullptr;
......@@ -835,8 +833,7 @@ BytecodeIterator::BytecodeIterator(const byte* start, const byte* end,
}
DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const wasm::WasmModule* module,
FunctionBody& body) {
const WasmModule* module, FunctionBody& body) {
Zone zone(allocator, ZONE_NAME);
WasmFullDecoder<Decoder::kValidate, EmptyInterface> decoder(&zone, module,
body);
......@@ -894,14 +891,13 @@ const char* RawOpcodeName(WasmOpcode opcode) {
} // namespace
bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
const wasm::WasmModule* module,
PrintLocals print_locals) {
const WasmModule* module, PrintLocals print_locals) {
StdoutStream os;
return PrintRawWasmCode(allocator, body, module, print_locals, os);
}
bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
const wasm::WasmModule* module, PrintLocals print_locals,
const WasmModule* module, PrintLocals print_locals,
std::ostream& os, std::vector<int>* line_numbers) {
Zone zone(allocator, ZONE_NAME);
WasmDecoder<Decoder::kNoValidate> decoder(module, body.sig, body.start,
......
......@@ -27,7 +27,7 @@ const char* GetCompilationModeAsString(
UNREACHABLE();
}
void RecordStats(const wasm::WasmCode* code, Counters* counters) {
void RecordStats(const WasmCode* code, Counters* counters) {
counters->wasm_generated_code_size()->Increment(
static_cast<int>(code->instructions().size()));
counters->wasm_reloc_size()->Increment(
......@@ -42,10 +42,12 @@ WasmCompilationUnit::GetDefaultCompilationMode() {
return FLAG_liftoff ? CompilationMode::kLiftoff : CompilationMode::kTurbofan;
}
WasmCompilationUnit::WasmCompilationUnit(
WasmEngine* wasm_engine, ModuleEnv* env, wasm::NativeModule* native_module,
wasm::FunctionBody body, wasm::WasmName name, int index, Counters* counters,
CompilationMode mode)
WasmCompilationUnit::WasmCompilationUnit(WasmEngine* wasm_engine,
ModuleEnv* env,
NativeModule* native_module,
FunctionBody body, WasmName name,
int index, Counters* counters,
CompilationMode mode)
: env_(env),
wasm_engine_(wasm_engine),
func_body_(body),
......@@ -97,9 +99,8 @@ void WasmCompilationUnit::ExecuteCompilation() {
}
}
wasm::WasmCode* WasmCompilationUnit::FinishCompilation(
wasm::ErrorThrower* thrower) {
wasm::WasmCode* ret;
WasmCode* WasmCompilationUnit::FinishCompilation(ErrorThrower* thrower) {
WasmCode* ret;
switch (mode_) {
case CompilationMode::kLiftoff:
ret = liftoff_unit_->FinishCompilation(thrower);
......@@ -139,15 +140,13 @@ void WasmCompilationUnit::SwitchMode(CompilationMode new_mode) {
}
// static
wasm::WasmCode* WasmCompilationUnit::CompileWasmFunction(
wasm::NativeModule* native_module, wasm::ErrorThrower* thrower,
Isolate* isolate, ModuleEnv* env, const wasm::WasmFunction* function,
CompilationMode mode) {
WasmCode* WasmCompilationUnit::CompileWasmFunction(
NativeModule* native_module, ErrorThrower* thrower, Isolate* isolate,
ModuleEnv* env, const WasmFunction* function, CompilationMode mode) {
ModuleWireBytes wire_bytes(native_module->wire_bytes());
wasm::FunctionBody function_body{
function->sig, function->code.offset(),
wire_bytes.start() + function->code.offset(),
wire_bytes.start() + function->code.end_offset()};
FunctionBody function_body{function->sig, function->code.offset(),
wire_bytes.start() + function->code.offset(),
wire_bytes.start() + function->code.end_offset()};
WasmCompilationUnit unit(isolate->wasm_engine(), env, native_module,
function_body,
......
......@@ -67,12 +67,11 @@ struct ModuleEnv {
: module(module),
use_trap_handler(use_trap_handler),
runtime_exception_support(runtime_exception_support),
min_memory_size(
module ? module->initial_pages * uint64_t{wasm::kWasmPageSize} : 0),
max_memory_size(
module && module->has_maximum_pages
? (module->maximum_pages * uint64_t{wasm::kWasmPageSize})
: wasm::kSpecMaxWasmMemoryBytes),
min_memory_size(module ? module->initial_pages * uint64_t{kWasmPageSize}
: 0),
max_memory_size(module && module->has_maximum_pages
? (module->maximum_pages * uint64_t{kWasmPageSize})
: kSpecMaxWasmMemoryBytes),
lower_simd(lower_simd) {}
};
......@@ -86,21 +85,21 @@ class WasmCompilationUnit final {
// typically means to hold a std::shared_ptr<Counters>).
// If used exclusively from a foreground thread, Isolate::counters() may be
// used by callers to pass Counters.
WasmCompilationUnit(WasmEngine* wasm_engine, ModuleEnv*, wasm::NativeModule*,
wasm::FunctionBody, wasm::WasmName, int index, Counters*,
WasmCompilationUnit(WasmEngine* wasm_engine, ModuleEnv*, NativeModule*,
FunctionBody, WasmName, int index, Counters*,
CompilationMode = GetDefaultCompilationMode());
~WasmCompilationUnit();
void ExecuteCompilation();
wasm::WasmCode* FinishCompilation(wasm::ErrorThrower* thrower);
WasmCode* FinishCompilation(ErrorThrower* thrower);
static wasm::WasmCode* CompileWasmFunction(
wasm::NativeModule* native_module, wasm::ErrorThrower* thrower,
Isolate* isolate, ModuleEnv* env, const wasm::WasmFunction* function,
static WasmCode* CompileWasmFunction(
NativeModule* native_module, ErrorThrower* thrower, Isolate* isolate,
ModuleEnv* env, const WasmFunction* function,
CompilationMode = GetDefaultCompilationMode());
wasm::NativeModule* native_module() const { return native_module_; }
NativeModule* native_module() const { return native_module_; }
CompilationMode mode() const { return mode_; }
private:
......@@ -109,11 +108,11 @@ class WasmCompilationUnit final {
ModuleEnv* env_;
WasmEngine* wasm_engine_;
wasm::FunctionBody func_body_;
wasm::WasmName func_name_;
FunctionBody func_body_;
WasmName func_name_;
Counters* counters_;
int func_index_;
wasm::NativeModule* native_module_;
NativeModule* native_module_;
CompilationMode mode_;
// LiftoffCompilationUnit, set if {mode_ == kLiftoff}.
std::unique_ptr<LiftoffCompilationUnit> liftoff_unit_;
......
......@@ -167,13 +167,14 @@ namespace {
class JSToWasmWrapperCache {
public:
Handle<Code> GetOrCompileJSToWasmWrapper(
Isolate* isolate, const wasm::NativeModule* native_module,
uint32_t func_index, wasm::UseTrapHandler use_trap_handler) {
const wasm::WasmModule* module = native_module->module();
const wasm::WasmFunction* func = &module->functions[func_index];
Handle<Code> GetOrCompileJSToWasmWrapper(Isolate* isolate,
const NativeModule* native_module,
uint32_t func_index,
UseTrapHandler use_trap_handler) {
const WasmModule* module = native_module->module();
const WasmFunction* func = &module->functions[func_index];
bool is_import = func_index < module->num_imported_functions;
std::pair<bool, wasm::FunctionSig> key(is_import, *func->sig);
std::pair<bool, FunctionSig> key(is_import, *func->sig);
Handle<Code>& cached = cache_[key];
if (!cached.is_null()) return cached;
......@@ -188,7 +189,7 @@ class JSToWasmWrapperCache {
private:
// We generate different code for calling imports than calling wasm functions
// in this module. Both are cached separately.
using CacheKey = std::pair<bool, wasm::FunctionSig>;
using CacheKey = std::pair<bool, FunctionSig>;
std::unordered_map<CacheKey, Handle<Code>, base::hash<CacheKey>> cache_;
};
......@@ -234,7 +235,7 @@ class InstanceBuilder {
JSToWasmWrapperCache js_to_wasm_cache_;
std::vector<SanitizedImport> sanitized_imports_;
wasm::UseTrapHandler use_trap_handler() const {
UseTrapHandler use_trap_handler() const {
return module_object_->native_module()->use_trap_handler() ? kUseTrapHandler
: kNoTrapHandler;
}
......@@ -325,9 +326,8 @@ MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
return {};
}
wasm::WasmCode* LazyCompileFunction(Isolate* isolate,
NativeModule* native_module,
int func_index) {
WasmCode* LazyCompileFunction(Isolate* isolate, NativeModule* native_module,
int func_index) {
base::ElapsedTimer compilation_timer;
DCHECK(!native_module->has_code(static_cast<uint32_t>(func_index)));
......@@ -358,9 +358,9 @@ wasm::WasmCode* LazyCompileFunction(Isolate* isolate,
WasmCompilationUnit unit(isolate->wasm_engine(), module_env, native_module,
body, func_name, func_index, isolate->counters());
unit.ExecuteCompilation();
wasm::WasmCode* wasm_code = unit.FinishCompilation(&thrower);
WasmCode* wasm_code = unit.FinishCompilation(&thrower);
if (wasm::WasmCode::ShouldBeLogged(isolate)) wasm_code->LogCode(isolate);
if (WasmCode::ShouldBeLogged(isolate)) wasm_code->LogCode(isolate);
// If there is a pending error, something really went wrong. The module was
// verified before starting execution with lazy compilation.
......@@ -392,8 +392,7 @@ Address CompileLazy(Isolate* isolate, NativeModule* native_module,
NativeModuleModificationScope native_module_modification_scope(native_module);
wasm::WasmCode* result =
LazyCompileFunction(isolate, native_module, func_index);
WasmCode* result = LazyCompileFunction(isolate, native_module, func_index);
DCHECK_NOT_NULL(result);
DCHECK_EQ(func_index, result->index());
......@@ -485,8 +484,7 @@ class CompilationUnitBuilder {
return base::make_unique<WasmCompilationUnit>(
compilation_state_->wasm_engine(), compilation_state_->module_env(),
native_module_,
wasm::FunctionBody{function->sig, buffer_offset, bytes.begin(),
bytes.end()},
FunctionBody{function->sig, buffer_offset, bytes.begin(), bytes.end()},
name, function->func_index,
compilation_state_->isolate()->async_counters().get(), mode);
}
......@@ -547,7 +545,7 @@ void FinishCompilationUnits(CompilationState* compilation_state,
std::unique_ptr<WasmCompilationUnit> unit =
compilation_state->GetNextExecutedUnit();
if (unit == nullptr) break;
wasm::WasmCode* result = unit->FinishCompilation(thrower);
WasmCode* result = unit->FinishCompilation(thrower);
if (thrower->error()) {
compilation_state->Abort();
......@@ -658,7 +656,7 @@ void CompileSequentially(Isolate* isolate, NativeModule* native_module,
if (func.imported) continue; // Imports are compiled at instantiation time.
// Compile the function.
wasm::WasmCode* code = WasmCompilationUnit::CompileWasmFunction(
WasmCode* code = WasmCompilationUnit::CompileWasmFunction(
native_module, thrower, isolate, module_env, &func);
if (code == nullptr) {
TruncatedUserString<> name(wire_bytes.GetName(&func, module));
......@@ -779,7 +777,7 @@ class FinishCompileTask : public CancelableTask {
}
ErrorThrower thrower(compilation_state_->isolate(), "AsyncCompile");
wasm::WasmCode* result = unit->FinishCompilation(&thrower);
WasmCode* result = unit->FinishCompilation(&thrower);
if (thrower.error()) {
DCHECK_NULL(result);
......@@ -796,7 +794,7 @@ class FinishCompileTask : public CancelableTask {
DCHECK_EQ(CompileMode::kTiering, compilation_state_->compile_mode());
DCHECK(!result->is_liftoff());
if (wasm::WasmCode::ShouldBeLogged(isolate)) result->LogCode(isolate);
if (WasmCode::ShouldBeLogged(isolate)) result->LogCode(isolate);
}
// Update the compilation state, and possibly notify
......@@ -1011,11 +1009,11 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Create the WebAssembly.Instance object.
//--------------------------------------------------------------------------
wasm::NativeModule* native_module = module_object_->native_module();
NativeModule* native_module = module_object_->native_module();
TRACE("New module instantiation for %p\n", native_module);
Handle<WasmInstanceObject> instance =
WasmInstanceObject::New(isolate_, module_object_);
wasm::NativeModuleModificationScope native_modification_scope(native_module);
NativeModuleModificationScope native_modification_scope(native_module);
//--------------------------------------------------------------------------
// Set up the globals for the new instance.
......@@ -1491,7 +1489,7 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
RecordStats(*wrapper_code, isolate_->counters());
WasmCode* wasm_code = native_module->AddCodeCopy(
wrapper_code, wasm::WasmCode::kWasmToJsWrapper, func_index);
wrapper_code, WasmCode::kWasmToJsWrapper, func_index);
ImportedFunctionEntry entry(instance, func_index);
entry.set_wasm_to_js(*js_receiver, wasm_code);
}
......@@ -2933,7 +2931,7 @@ void CompileJsToWasmWrappers(Isolate* isolate,
int wrapper_index = 0;
Handle<FixedArray> export_wrappers(module_object->export_wrappers(), isolate);
NativeModule* native_module = module_object->native_module();
wasm::UseTrapHandler use_trap_handler =
UseTrapHandler use_trap_handler =
native_module->use_trap_handler() ? kUseTrapHandler : kNoTrapHandler;
const WasmModule* module = native_module->module();
for (auto exp : module->export_table) {
......
......@@ -451,8 +451,8 @@ class ModuleDecoderImpl : public Decoder {
});
WasmImport* import = &module_->import_table.back();
const byte* pos = pc_;
import->module_name = consume_string(true, "module name");
import->field_name = consume_string(true, "field name");
import->module_name = consume_string(*this, true, "module name");
import->field_name = consume_string(*this, true, "field name");
import->kind =
static_cast<ImportExportKindCode>(consume_u8("import kind"));
switch (import->kind) {
......@@ -615,7 +615,7 @@ class ModuleDecoderImpl : public Decoder {
});
WasmExport* exp = &module_->export_table.back();
exp->name = consume_string(true, "field name");
exp->name = consume_string(*this, true, "field name");
const byte* pos = pc();
exp->kind = static_cast<ImportExportKindCode>(consume_u8("export kind"));
......@@ -816,7 +816,7 @@ class ModuleDecoderImpl : public Decoder {
// Decode module name, ignore the rest.
// Function and local names will be decoded when needed.
if (name_type == NameSectionKindCode::kModule) {
WireBytesRef name = wasm::consume_string(inner, false, "module name");
WireBytesRef name = consume_string(inner, false, "module name");
if (inner.ok() && validate_utf8(&inner, name)) module_->name = name;
} else {
inner.consume_bytes(name_payload_len, "name subsection payload");
......@@ -1069,10 +1069,6 @@ class ModuleDecoderImpl : public Decoder {
}
}
WireBytesRef consume_string(bool validate_utf8, const char* name) {
return wasm::consume_string(*this, validate_utf8, name);
}
uint32_t consume_sig_index(WasmModule* module, FunctionSig** sig) {
const byte* pos = pc_;
uint32_t sig_index = consume_u32v("signature index");
......@@ -1464,7 +1460,7 @@ ModuleResult ModuleDecoder::FinishDecoding(bool verify_functions) {
SectionCode ModuleDecoder::IdentifyUnknownSection(Decoder& decoder,
const byte* end) {
WireBytesRef string = wasm::consume_string(decoder, true, "section name");
WireBytesRef string = consume_string(decoder, true, "section name");
if (decoder.failed() || decoder.pc() > end) {
return kUnknownSectionCode;
}
......@@ -1645,7 +1641,7 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
for (; decoder.ok() && functions_count > 0; --functions_count) {
uint32_t function_index = decoder.consume_u32v("function index");
WireBytesRef name = wasm::consume_string(decoder, false, "function name");
WireBytesRef name = consume_string(decoder, false, "function name");
// Be lenient with errors in the name section: Ignore non-UTF8 names. You
// can even assign to the same function multiple times (last valid one
......@@ -1688,7 +1684,7 @@ void DecodeLocalNames(const byte* module_start, const byte* module_end,
uint32_t num_names = decoder.consume_u32v("namings count");
for (uint32_t k = 0; k < num_names; ++k) {
uint32_t local_index = decoder.consume_u32v("local index");
WireBytesRef name = wasm::consume_string(decoder, true, "local name");
WireBytesRef name = consume_string(decoder, true, "local name");
if (!decoder.ok()) break;
if (local_index > kMaxInt) continue;
func_names.max_local_index =
......
......@@ -117,7 +117,7 @@ void WasmCode::set_trap_handler_index(size_t value) {
void WasmCode::RegisterTrapHandlerData() {
DCHECK(!HasTrapHandlerIndex());
if (kind() != wasm::WasmCode::kFunction) return;
if (kind() != WasmCode::kFunction) return;
Address base = instruction_start();
......@@ -338,11 +338,11 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
}
void NativeModule::LogWasmCodes(Isolate* isolate) {
if (!wasm::WasmCode::ShouldBeLogged(isolate)) return;
if (!WasmCode::ShouldBeLogged(isolate)) return;
// TODO(titzer): we skip the logging of the import wrappers
// here, but they should be included somehow.
for (wasm::WasmCode* code : code_table()) {
for (WasmCode* code : code_table()) {
if (code != nullptr) code->LogCode(isolate);
}
}
......@@ -592,7 +592,7 @@ std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
base::LockGuard<base::Mutex> lock(&allocation_mutex_);
std::vector<WasmCode*> result;
result.reserve(code_table().size());
for (wasm::WasmCode* code : code_table()) result.push_back(code);
for (WasmCode* code : code_table()) result.push_back(code);
return result;
}
......
......@@ -72,8 +72,7 @@ MaybeHandle<String> GetLocalName(Isolate* isolate,
if (!debug_info->has_locals_names()) {
Handle<WasmModuleObject> module_object(
debug_info->wasm_instance()->module_object(), isolate);
Handle<FixedArray> locals_names =
wasm::DecodeLocalNames(isolate, module_object);
Handle<FixedArray> locals_names = DecodeLocalNames(isolate, module_object);
debug_info->set_locals_names(*locals_names);
}
......@@ -290,7 +289,7 @@ class InterpreterHandle {
Handle<WasmInstanceObject> instance_obj(frame->wasm_instance(), isolate_);
// Check that this is indeed the instance which is connected to this
// interpreter.
DCHECK_EQ(this, Managed<wasm::InterpreterHandle>::cast(
DCHECK_EQ(this, Managed<InterpreterHandle>::cast(
instance_obj->debug_info()->interpreter_handle())
->raw());
return instance_obj;
......@@ -406,7 +405,7 @@ class InterpreterHandle {
return interpreter()->GetThread(0)->NumInterpretedCalls();
}
Handle<JSObject> GetGlobalScopeObject(wasm::InterpretedFrame* frame,
Handle<JSObject> GetGlobalScopeObject(InterpretedFrame* frame,
Handle<WasmDebugInfo> debug_info) {
Isolate* isolate = isolate_;
Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
......@@ -430,7 +429,7 @@ class InterpreterHandle {
return global_scope_object;
}
Handle<JSObject> GetLocalScopeObject(wasm::InterpretedFrame* frame,
Handle<JSObject> GetLocalScopeObject(InterpretedFrame* frame,
Handle<WasmDebugInfo> debug_info) {
Isolate* isolate = isolate_;
......
......@@ -120,8 +120,7 @@ void WasmEngine::AsyncCompile(
// Make a copy of the wire bytes to avoid concurrent modification.
std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
memcpy(copy.get(), bytes.start(), bytes.length());
i::wasm::ModuleWireBytes bytes_copy(copy.get(),
copy.get() + bytes.length());
ModuleWireBytes bytes_copy(copy.get(), copy.get() + bytes.length());
module_object = SyncCompile(isolate, &thrower, bytes_copy);
} else {
// The wire bytes are not shared, OK to use them directly.
......
......@@ -1419,7 +1419,7 @@ class ThreadImpl {
len = 1 + imm.length;
if (FLAG_wasm_trace_memory) {
wasm::MemoryTracingInfo info(imm.offset + index, false, rep);
MemoryTracingInfo info(imm.offset + index, false, rep);
TraceMemoryOperation(ExecutionEngine::kInterpreter, &info,
code->function->func_index, static_cast<int>(pc),
instance_object_->memory_start());
......@@ -1445,7 +1445,7 @@ class ThreadImpl {
len = 1 + imm.length;
if (FLAG_wasm_trace_memory) {
wasm::MemoryTracingInfo info(imm.offset + index, true, rep);
MemoryTracingInfo info(imm.offset + index, true, rep);
TraceMemoryOperation(ExecutionEngine::kInterpreter, &info,
code->function->func_index, static_cast<int>(pc),
instance_object_->memory_start());
......@@ -2122,9 +2122,9 @@ class ThreadImpl {
#ifdef DEBUG
// Compute the stack effect of this opcode, and verify later that the
// stack was modified accordingly.
std::pair<uint32_t, uint32_t> stack_effect = wasm::StackEffect(
codemap_->module(), frames_.back().code->function->sig,
code->orig_start + pc, code->orig_end);
std::pair<uint32_t, uint32_t> stack_effect =
StackEffect(codemap_->module(), frames_.back().code->function->sig,
code->orig_start + pc, code->orig_end);
sp_t expected_new_stack_height =
StackHeight() - stack_effect.first + stack_effect.second;
#endif
......@@ -2687,8 +2687,8 @@ class ThreadImpl {
ExternalCallResult CallExternalWasmFunction(
Isolate* isolate, Handle<WasmInstanceObject> instance,
const wasm::WasmCode* code, FunctionSig* sig) {
if (code->kind() == wasm::WasmCode::kWasmToJsWrapper &&
const WasmCode* code, FunctionSig* sig) {
if (code->kind() == WasmCode::kWasmToJsWrapper &&
!IsJSCompatibleSignature(sig)) {
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kWasmTrapTypeError));
......@@ -2881,7 +2881,7 @@ class ThreadImpl {
HandleScope scope(isolate);
FunctionSig* signature = module()->signatures[sig_index];
if (code->kind() == wasm::WasmCode::kFunction) {
if (code->kind() == WasmCode::kFunction) {
if (!instance_object_.is_identical_to(instance)) {
// Cross instance call.
return CallExternalWasmFunction(isolate, instance, code, signature);
......@@ -2890,8 +2890,8 @@ class ThreadImpl {
}
// Call to external function.
if (code->kind() == wasm::WasmCode::kInterpreterEntry ||
code->kind() == wasm::WasmCode::kWasmToJsWrapper) {
if (code->kind() == WasmCode::kInterpreterEntry ||
code->kind() == WasmCode::kWasmToJsWrapper) {
return CallExternalWasmFunction(isolate, instance, code, signature);
}
return {ExternalCallResult::INVALID_FUNC};
......
......@@ -39,8 +39,8 @@ WireBytesRef WasmModule::LookupFunctionName(const ModuleWireBytes& wire_bytes,
uint32_t function_index) const {
if (!function_names) {
function_names.reset(new std::unordered_map<uint32_t, WireBytesRef>());
wasm::DecodeFunctionNames(wire_bytes.start(), wire_bytes.end(),
function_names.get());
DecodeFunctionNames(wire_bytes.start(), wire_bytes.end(),
function_names.get());
}
auto it = function_names->find(function_index);
if (it == function_names->end()) return WireBytesRef();
......
......@@ -114,9 +114,9 @@ struct WasmExport {
enum ModuleOrigin : uint8_t { kWasmOrigin, kAsmJsOrigin };
#define SELECT_WASM_COUNTER(counters, origin, prefix, suffix) \
((origin) == wasm::kWasmOrigin ? (counters)->prefix##_wasm_##suffix() \
: (counters)->prefix##_asm_##suffix())
#define SELECT_WASM_COUNTER(counters, origin, prefix, suffix) \
((origin) == kWasmOrigin ? (counters)->prefix##_wasm_##suffix() \
: (counters)->prefix##_asm_##suffix())
struct ModuleWireBytes;
......
......@@ -374,7 +374,7 @@ std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
bool IsJSCompatibleSignature(const FunctionSig* sig) {
for (auto type : sig->all()) {
if (type == wasm::kWasmI64 || type == wasm::kWasmS128) return false;
if (type == kWasmI64 || type == kWasmS128) return false;
}
return sig->return_count() <= 1;
}
......
......@@ -557,8 +557,8 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
// handler was used or not when serializing.
UseTrapHandler use_trap_handler =
trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler : kNoTrapHandler;
wasm::ModuleEnv env(module, use_trap_handler,
wasm::RuntimeExceptionSupport::kRuntimeExceptionSupport);
ModuleEnv env(module, use_trap_handler,
RuntimeExceptionSupport::kRuntimeExceptionSupport);
OwnedVector<uint8_t> wire_bytes_copy = OwnedVector<uint8_t>::Of(wire_bytes);
......
......@@ -93,7 +93,7 @@ class CWasmEntryArgTester {
std::function<ReturnType(Args...)> expected_fn_;
FunctionSig* sig_;
Handle<JSFunction> c_wasm_entry_fn_;
wasm::WasmCode* wasm_code_;
WasmCode* wasm_code_;
};
} // namespace
......
......@@ -88,7 +88,7 @@ enum class CompilationState {
kFailed,
};
class TestResolver : public i::wasm::CompilationResultResolver {
class TestResolver : public CompilationResultResolver {
public:
explicit TestResolver(CompilationState* state) : state_(state) {}
......
......@@ -47,8 +47,7 @@ TestingModuleBuilder::TestingModuleBuilder(
trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler
: kNoTrapHandler);
auto wasm_to_js_wrapper = native_module_->AddCodeCopy(
code.ToHandleChecked(), wasm::WasmCode::kWasmToJsWrapper,
maybe_import_index);
code.ToHandleChecked(), WasmCode::kWasmToJsWrapper, maybe_import_index);
ImportedFunctionEntry(instance_object_, maybe_import_index)
.set_wasm_to_js(*maybe_import->js_function, wasm_to_js_wrapper);
......@@ -69,7 +68,7 @@ byte* TestingModuleBuilder::AddMemory(uint32_t size) {
test_module_->has_memory = true;
uint32_t alloc_size = RoundUp(size, kWasmPageSize);
Handle<JSArrayBuffer> new_buffer;
CHECK(wasm::NewArrayBuffer(isolate_, alloc_size).ToHandle(&new_buffer));
CHECK(NewArrayBuffer(isolate_, alloc_size).ToHandle(&new_buffer));
CHECK(!new_buffer.is_null());
mem_start_ = reinterpret_cast<byte*>(new_buffer->backing_store());
mem_size_ = size;
......@@ -426,8 +425,8 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
func_body, func_name, function_->func_index,
isolate()->counters(), comp_mode);
unit.ExecuteCompilation();
wasm::WasmCode* wasm_code = unit.FinishCompilation(&thrower);
if (wasm::WasmCode::ShouldBeLogged(isolate())) {
WasmCode* wasm_code = unit.FinishCompilation(&thrower);
if (WasmCode::ShouldBeLogged(isolate())) {
wasm_code->LogCode(isolate());
}
CHECK(!thrower.error());
......
......@@ -201,7 +201,7 @@ class TestingModuleBuilder {
Handle<WasmInstanceObject> instance_object() const {
return instance_object_;
}
wasm::WasmCode* GetFunctionCode(uint32_t index) const {
WasmCode* GetFunctionCode(uint32_t index) const {
return native_module_->code(index);
}
Address globals_start() const {
......@@ -263,7 +263,7 @@ class WasmFunctionWrapper : private compiler::GraphAndBuilders {
Init(call_descriptor, MachineTypeForC<ReturnType>(), param_vec);
}
void SetInnerCode(wasm::WasmCode* code) {
void SetInnerCode(WasmCode* code) {
intptr_t address = static_cast<intptr_t>(code->instruction_start());
compiler::NodeProperties::ChangeOp(
inner_code_node_,
......
......@@ -166,8 +166,8 @@ class WasmCodeManagerTest : public TestWithContext,
std::shared_ptr<WasmModule> module(new WasmModule);
module->num_declared_functions = kNumFunctions;
bool can_request_more = style == Growable;
wasm::ModuleEnv env(module.get(), UseTrapHandler::kNoTrapHandler,
RuntimeExceptionSupport::kNoRuntimeExceptionSupport);
ModuleEnv env(module.get(), UseTrapHandler::kNoTrapHandler,
RuntimeExceptionSupport::kNoRuntimeExceptionSupport);
return manager->NewNativeModule(i_isolate(), size, can_request_more,
std::move(module), env);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment