Commit ccc068d5 authored by Jakob Gruber's avatar Jakob Gruber Committed by Commit Bot

[nojit] Skip the on-heap trampoline for builtin calls

This CL does two things:

1. It introduces Call/JumpCodeObject as the bottleneck for all calls
   to non-heap-constant Code objects; and
2. it dispatches directly to the off-heap entry point for all embedded
   code.

Codegen at runtime remains unchanged to preserve the shorter,
branch-less calling sequence.

Bug: v8:7777
Change-Id: I15fdcb51625209904c6a56737f085a23219319b9
Reviewed-on: https://chromium-review.googlesource.com/c/1382461
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58440}
parent 5fc0b466
This diff is collapsed.
......@@ -298,6 +298,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallBuiltinPointer(Register builtin_pointer) override;
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
void JumpCodeObject(Register code_object) override;
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.
......
......@@ -1714,8 +1714,7 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
Mov(x0, f->nargs);
Mov(x1, ExternalReference::Create(f));
DCHECK(!AreAliased(centry, x0, x1));
Add(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
Call(centry);
CallCodeObject(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
......@@ -1926,37 +1925,26 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code &&
!Builtins::IsIsolateIndependentBuiltin(*code)) {
// Calls to embedded targets are initially generated as standard
// pc-relative calls below. When creating the embedded blob, call offsets
// are patched up to point directly to the off-heap instruction start.
// Note: It is safe to dereference {code} above since code generation
// for builtins and code stubs happens on the main thread.
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
IndirectLoadConstant(scratch, code);
Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(scratch, cond);
return;
} else if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(scratch, cond);
return;
}
}
}
if (CanUseNearCallOrJump(rmode)) {
JumpHelper(static_cast<int64_t>(AddCodeTarget(code)), rmode, cond);
} else {
......@@ -1982,39 +1970,27 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
BlockPoolsScope scope(this);
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code &&
!Builtins::IsIsolateIndependentBuiltin(*code)) {
// Calls to embedded targets are initially generated as standard
// pc-relative calls below. When creating the embedded blob, call offsets
// are patched up to point directly to the off-heap instruction start.
// Note: It is safe to dereference {code} above since code generation
// for builtins and code stubs happens on the main thread.
if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
IndirectLoadConstant(scratch, code);
Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(scratch);
return;
} else if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(scratch);
return;
}
}
}
if (CanUseNearCallOrJump(rmode)) {
near_call(AddCodeTarget(code), rmode);
} else {
......@@ -2044,6 +2020,65 @@ void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
Call(builtin_pointer);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime.
// * Builtin code runs in --jitless mode and thus must not call into on-heap
// Code targets. Instead, we dispatch through the builtins entry table.
// * Codegen at runtime does not have this restriction and we can use the
// shorter, branchless instruction sequence. The assumption here is that
// targets are usually generated code and not builtin Code objects.
if (options().isolate_independent_code) {
DCHECK(root_array_available());
Label if_code_is_builtin, out;
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
DCHECK(!AreAliased(destination, scratch));
DCHECK(!AreAliased(code_object, scratch));
// Check whether the Code object is a builtin. If so, call its (off-heap)
// entry point directly without going through the (on-heap) trampoline.
// Otherwise, just call the Code object as always.
Ldrsw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
Cmp(scratch, Operand(Builtins::kNoBuiltinId));
B(ne, &if_code_is_builtin);
// A non-builtin Code object, the entry point is at
// Code::raw_instruction_start().
Add(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
B(&out);
// A builtin Code object, the entry point is loaded from the builtin entry
// table.
// The builtin index is loaded in scratch.
bind(&if_code_is_builtin);
Lsl(destination, scratch, kSystemPointerSizeLog2);
Add(destination, destination, kRootRegister);
Ldr(destination,
MemOperand(destination, IsolateData::builtin_entry_table_offset()));
bind(&out);
} else {
Add(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
}
}
void TurboAssembler::CallCodeObject(Register code_object) {
LoadCodeObjectEntry(code_object, code_object);
Call(code_object);
}
void TurboAssembler::JumpCodeObject(Register code_object) {
LoadCodeObjectEntry(code_object, code_object);
Jump(code_object);
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.
......@@ -2328,12 +2363,11 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
Ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
Add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
Call(code);
CallCodeObject(code);
} else {
DCHECK(flag == JUMP_FUNCTION);
Jump(code);
JumpCodeObject(code);
}
}
......
......@@ -848,6 +848,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallBuiltinPointer(Register builtin_pointer) override;
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
void JumpCodeObject(Register code_object) override;
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.
......
......@@ -97,8 +97,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(r0, r0);
}
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
__ JumpCodeObject(r2);
}
namespace {
......@@ -487,8 +486,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Move(r1, r4);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
__ JumpCodeObject(r2);
}
__ bind(&prepare_step_in_if_stepping);
......@@ -940,8 +938,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ add(r2, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ LoadCodeObjectEntry(r2, optimized_code_entry);
__ Jump(r2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
......@@ -1460,8 +1457,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// which has be reset to the compile lazy builtin.
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
__ JumpCodeObject(r2);
}
namespace {
......@@ -2386,8 +2382,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3 : new target (passed through to callee)
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(r2);
__ CallCodeObject(r2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
......@@ -2402,8 +2397,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&dont_adapt_arguments);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
__ JumpCodeObject(r2);
__ bind(&stack_overflow);
{
......
......@@ -47,7 +47,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
ASM_LOCATION("Builtins::Generate_InternalArrayConstructor");
Label generic_array_code;
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
......@@ -88,15 +87,12 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
}
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
__ Br(x2);
__ JumpCodeObject(x2);
}
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
Label post_instantiation_deopt_entry;
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- x1 : constructor function
......@@ -562,8 +558,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Mov(x1, x4);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
__ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
__ Jump(x2);
__ JumpCodeObject(x2);
}
__ Bind(&prepare_step_in_if_stepping);
......@@ -1057,8 +1052,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Add(x2, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ LoadCodeObjectEntry(x2, optimized_code_entry);
__ Jump(x2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
......@@ -1635,8 +1629,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
__ Ldr(x4, FieldMemOperand(new_target, JSFunction::kCodeOffset));
__ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
__ Jump(x4);
__ JumpCodeObject(x4);
}
namespace {
......@@ -1692,8 +1685,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ Pop(fp, lr);
// Call builtin.
__ Add(builtin, builtin, Code::kHeaderSize - kHeapObjectTag);
__ Br(builtin);
__ JumpCodeObject(builtin);
}
} // namespace
......@@ -2893,8 +2885,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// x3 : new target (passed through to callee)
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
__ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(x2);
__ CallCodeObject(x2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
......@@ -2908,8 +2899,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bind(&dont_adapt_arguments);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
__ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(x2);
__ JumpCodeObject(x2);
__ Bind(&stack_overflow);
__ RecordComment("-- Stack overflow --");
......
......@@ -71,8 +71,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
}
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
__ jmp(ecx);
__ JumpCodeObject(ecx);
}
namespace {
......@@ -692,8 +691,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);
__ JumpCodeObject(ecx);
}
__ bind(&prepare_step_in_if_stepping);
......@@ -855,8 +853,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
edx, eax);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ Move(ecx, optimized_code_entry);
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ LoadCodeObjectEntry(ecx, optimized_code_entry);
__ pop(edx);
__ pop(eax);
__ jmp(ecx);
......@@ -1498,8 +1495,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// which has be reset to the compile lazy builtin.
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);
__ JumpCodeObject(ecx);
}
namespace {
......@@ -2518,8 +2514,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// edi : function (passed through to callee)
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(ecx);
__ CallCodeObject(ecx);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
......@@ -2534,8 +2529,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&dont_adapt_arguments);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);
__ JumpCodeObject(ecx);
__ bind(&stack_overflow);
{
......
......@@ -70,8 +70,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(rax, rax);
}
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
__ jmp(rcx);
__ JumpCodeObject(rcx);
}
namespace {
......@@ -834,8 +833,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset),
decompr_scratch_for_debug);
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rcx);
__ JumpCodeObject(rcx);
}
__ bind(&prepare_step_in_if_stepping);
......@@ -1010,8 +1008,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
scratch2, scratch3, feedback_vector);
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ Move(rcx, optimized_code_entry);
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rcx);
__ JumpCodeObject(rcx);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
......@@ -1424,6 +1421,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ jmp(&trampoline_loaded, Label::kNear);
__ bind(&builtin_trampoline);
// TODO(jgruber): Replace this by a lookup in the builtin entry table.
__ movp(rbx,
__ ExternalReferenceAsOperand(
ExternalReference::
......@@ -1574,8 +1572,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
__ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset),
decompr_scratch_for_debug);
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rcx);
__ JumpCodeObject(rcx);
}
namespace {
......@@ -2012,8 +2009,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset),
decompr_scratch_for_debug);
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(rcx);
__ CallCodeObject(rcx);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
......@@ -2029,8 +2025,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset),
decompr_scratch_for_debug);
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rcx);
__ JumpCodeObject(rcx);
__ bind(&stack_overflow);
{
......
......@@ -731,8 +731,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ add(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(reg);
__ CallCodeObject(reg);
}
RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
......@@ -774,8 +773,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ add(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(reg);
__ JumpCodeObject(reg);
}
DCHECK_EQ(LeaveCC, i.OutputSBit());
unwinding_info_writer_.MarkBlockWillExit();
......@@ -821,8 +819,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(func, JSFunction::kCodeOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(r2);
__ CallCodeObject(r2);
RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
frame_access_state()->ClearSPDelta();
......
......@@ -613,8 +613,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ Add(reg, reg, Code::kHeaderSize - kHeapObjectTag);
__ Call(reg);
__ CallCodeObject(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
......@@ -655,8 +654,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ Add(reg, reg, Code::kHeaderSize - kHeapObjectTag);
__ Jump(reg);
__ JumpCodeObject(reg);
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
......@@ -701,8 +699,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Ldr(x2, FieldMemOperand(func, JSFunction::kCodeOffset));
__ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(x2);
__ CallCodeObject(x2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
......
......@@ -641,7 +641,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ LoadCodeObjectEntry(reg, reg);
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
......@@ -659,7 +659,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand virtual_call_target_register(
kRootRegister, IsolateData::virtual_call_target_register_offset());
__ mov(eax, i.InputOperand(0));
__ add(eax, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ LoadCodeObjectEntry(eax, eax);
__ mov(virtual_call_target_register, eax);
__ pop(eax);
frame_access_state()->IncreaseSPDelta(-1);
......@@ -716,7 +716,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ LoadCodeObjectEntry(reg, reg);
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
......@@ -768,8 +768,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(func, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(ecx);
__ CallCodeObject(ecx);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
......
......@@ -692,7 +692,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ LoadCodeObjectEntry(reg, reg);
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
......@@ -751,7 +751,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ LoadCodeObjectEntry(reg, reg);
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
......@@ -812,8 +812,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ LoadTaggedPointerField(rcx,
FieldOperand(func, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(rcx);
__ CallCodeObject(rcx);
frame_access_state()->ClearSPDelta();
RecordCallPosition(instr);
break;
......
......@@ -979,8 +979,7 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
Move(kRuntimeCallFunctionRegister, Immediate(ExternalReference::Create(f)));
DCHECK(!AreAliased(centry, kRuntimeCallArgCountRegister,
kRuntimeCallFunctionRegister));
add(centry, Immediate(Code::kHeaderSize - kHeapObjectTag));
Call(centry);
CallCodeObject(centry);
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
......@@ -1239,12 +1238,11 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// call sites.
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
mov(ecx, FieldOperand(function, JSFunction::kCodeOffset));
add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
call(ecx);
CallCodeObject(ecx);
} else {
DCHECK(flag == JUMP_FUNCTION);
jmp(ecx);
JumpCodeObject(ecx);
}
bind(&done);
}
......@@ -1846,25 +1844,19 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
}
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
if (FLAG_embedded_builtins) {
if (root_array_available() && options().isolate_independent_code &&
!Builtins::IsIsolateIndependentBuiltin(*code_object)) {
// All call targets are expected to be isolate-independent builtins.
// If this assumption is ever violated, we could add back support for
// calls through a virtual target register.
UNREACHABLE();
} else if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
call(entry, RelocInfo::OFF_HEAP_TARGET);
return;
}
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
call(entry, RelocInfo::OFF_HEAP_TARGET);
return;
}
}
DCHECK(RelocInfo::IsCodeTarget(rmode));
......@@ -1885,26 +1877,72 @@ void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
call(builtin_pointer);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime.
// * Builtin code runs in --jitless mode and thus must not call into on-heap
// Code targets. Instead, we dispatch through the builtins entry table.
// * Codegen at runtime does not have this restriction and we can use the
// shorter, branchless instruction sequence. The assumption here is that
// targets are usually generated code and not builtin Code objects.
if (options().isolate_independent_code) {
DCHECK(root_array_available());
Label if_code_is_builtin, out;
// Check whether the Code object is a builtin. If so, call its (off-heap)
// entry point directly without going through the (on-heap) trampoline.
// Otherwise, just call the Code object as always.
cmp(FieldOperand(code_object, Code::kBuiltinIndexOffset),
Immediate(Builtins::kNoBuiltinId));
j(not_equal, &if_code_is_builtin);
// A non-builtin Code object, the entry point is at
// Code::raw_instruction_start().
Move(destination, code_object);
add(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
jmp(&out);
// A builtin Code object, the entry point is loaded from the builtin entry
// table.
bind(&if_code_is_builtin);
mov(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset));
mov(destination, Operand(kRootRegister, destination, times_pointer_size,
IsolateData::builtin_entry_table_offset()));
bind(&out);
} else {
Move(destination, code_object);
add(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
}
}
void TurboAssembler::CallCodeObject(Register code_object) {
LoadCodeObjectEntry(code_object, code_object);
call(code_object);
}
void TurboAssembler::JumpCodeObject(Register code_object) {
LoadCodeObjectEntry(code_object, code_object);
jmp(code_object);
}
void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
if (FLAG_embedded_builtins) {
if (root_array_available() && options().isolate_independent_code &&
!Builtins::IsIsolateIndependentBuiltin(*code_object)) {
// All call targets are expected to be isolate-independent builtins.
// If this assumption is ever violated, we could add back support for
// calls through a virtual target register.
UNREACHABLE();
} else if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
jmp(entry, RelocInfo::OFF_HEAP_TARGET);
return;
}
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
jmp(entry, RelocInfo::OFF_HEAP_TARGET);
return;
}
}
DCHECK(RelocInfo::IsCodeTarget(rmode));
......
......@@ -96,6 +96,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallBuiltinPointer(Register builtin_pointer) override;
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
void JumpCodeObject(Register code_object) override;
void RetpolineCall(Register reg);
void RetpolineCall(Address destination, RelocInfo::Mode rmode);
......
......@@ -233,6 +233,20 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallBuiltinPointer(Register builtin_pointer) override;
void LoadCodeObjectEntry(Register destination,
Register code_object) override {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
void CallCodeObject(Register code_object) override {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
void JumpCodeObject(Register code_object) override {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.
......
......@@ -258,6 +258,20 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallBuiltinPointer(Register builtin_pointer) override;
void LoadCodeObjectEntry(Register destination,
Register code_object) override {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
void CallCodeObject(Register code_object) override {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
void JumpCodeObject(Register code_object) override {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.
......
......@@ -43,6 +43,15 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
// object on the heap is not used.
virtual void CallBuiltinPointer(Register builtin_pointer) = 0;
// Calls/jumps to the given Code object. If builtins are embedded, the
// trampoline Code object on the heap is not used.
virtual void CallCodeObject(Register code_object) = 0;
virtual void JumpCodeObject(Register code_object) = 0;
// Loads the given Code object's entry point into the destination register.
virtual void LoadCodeObjectEntry(Register destination,
Register code_object) = 0;
// Loads the given constant or external reference without embedding its direct
// pointer. The produced code is isolate-independent.
void IndirectLoadConstant(Register destination, Handle<HeapObject> object);
......
......@@ -631,8 +631,8 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
Set(rax, f->nargs);
LoadAddress(rbx, ExternalReference::Create(f));
DCHECK(!AreAliased(centry, rax, rbx));
addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
Call(centry);
DCHECK(centry == rcx);
CallCodeObject(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
......@@ -1624,44 +1624,26 @@ void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
Condition cc) {
// TODO(X64): Inline this
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code &&
!Builtins::IsIsolateIndependentBuiltin(*code_object)) {
// Calls to embedded targets are initially generated as standard
// pc-relative calls below. When creating the embedded blob, call offsets
// are patched up to point directly to the off-heap instruction start.
// Note: It is safe to dereference code_object above since code generation
// for builtins and code stubs happens on the main thread.
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
Label skip;
if (cc != always) {
if (cc == never) return;
j(NegateCondition(cc), &skip, Label::kNear);
}
IndirectLoadConstant(kScratchRegister, code_object);
leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
jmp(kScratchRegister);
bind(&skip);
return;
} else if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
Label skip;
if (cc != always) {
if (cc == never) return;
j(NegateCondition(cc), &skip, Label::kNear);
}
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
jmp(kScratchRegister);
bind(&skip);
return;
}
}
}
j(cc, code_object, rmode);
......@@ -1692,31 +1674,20 @@ void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) {
}
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code &&
!Builtins::IsIsolateIndependentBuiltin(*code_object)) {
// Calls to embedded targets are initially generated as standard
// pc-relative calls below. When creating the embedded blob, call offsets
// are patched up to point directly to the off-heap instruction start.
// Note: It is safe to dereference code_object above since code generation
// for builtins and code stubs happens on the main thread.
IndirectLoadConstant(kScratchRegister, code_object);
leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
call(kScratchRegister);
return;
} else if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
call(kScratchRegister);
return;
}
}
}
DCHECK(RelocInfo::IsCodeTarget(rmode));
......@@ -1748,6 +1719,58 @@ void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
#endif // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime.
// * Builtin code runs in --jitless mode and thus must not call into on-heap
// Code targets. Instead, we dispatch through the builtins entry table.
// * Codegen at runtime does not have this restriction and we can use the
// shorter, branchless instruction sequence. The assumption here is that
// targets are usually generated code and not builtin Code objects.
if (options().isolate_independent_code) {
DCHECK(root_array_available());
Label if_code_is_builtin, out;
// Check whether the Code object is a builtin. If so, call its (off-heap)
// entry point directly without going through the (on-heap) trampoline.
// Otherwise, just call the Code object as always.
cmpl(FieldOperand(code_object, Code::kBuiltinIndexOffset),
Immediate(Builtins::kNoBuiltinId));
j(not_equal, &if_code_is_builtin);
// A non-builtin Code object, the entry point is at
// Code::raw_instruction_start().
Move(destination, code_object);
addp(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
jmp(&out);
// A builtin Code object, the entry point is loaded from the builtin entry
// table.
bind(&if_code_is_builtin);
movl(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset));
movp(destination, Operand(kRootRegister, destination, times_pointer_size,
IsolateData::builtin_entry_table_offset()));
bind(&out);
} else {
Move(destination, code_object);
addp(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
}
}
void TurboAssembler::CallCodeObject(Register code_object) {
LoadCodeObjectEntry(code_object, code_object);
call(code_object);
}
void TurboAssembler::JumpCodeObject(Register code_object) {
LoadCodeObjectEntry(code_object, code_object);
jmp(code_object);
}
void TurboAssembler::RetpolineCall(Register reg) {
Label setup_return, setup_target, inner_indirect_branch, capture_spec;
......@@ -2380,12 +2403,11 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
LoadTaggedPointerField(rcx,
FieldOperand(function, JSFunction::kCodeOffset));
addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
call(rcx);
CallCodeObject(rcx);
} else {
DCHECK(flag == JUMP_FUNCTION);
jmp(rcx);
JumpCodeObject(rcx);
}
bind(&done);
}
......
......@@ -360,6 +360,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallBuiltinPointer(Register builtin_pointer) override;
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
void JumpCodeObject(Register code_object) override;
void RetpolineCall(Register reg);
void RetpolineCall(Address destination, RelocInfo::Mode rmode);
......
......@@ -21,7 +21,7 @@ std::unique_ptr<NativeModule> NewModule(Isolate* isolate) {
WasmCodeManager* manager = isolate->wasm_engine()->code_manager();
std::shared_ptr<WasmModule> module(new WasmModule);
bool can_request_more = false;
size_t size = 100;
size_t size = 16384;
auto native_module = manager->NewNativeModule(
isolate, kAllWasmFeatures, size, can_request_more, std::move(module));
native_module->SetRuntimeStubs(isolate);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment