Commit 6ab19087 authored by jgruber's avatar jgruber Committed by Commit Bot

[builtins] Mark initial builtins on ia32 isolate-independent

This populates the isolate-independent builtin whitelist with initial
builtins that do not access any isolate-dependent data and thus don't
need the root register at all.

Unlike most other platforms, we can't use a scratch register in the
off-heap trampoline since there's no free register available. The
trampolines on ia32 are thus implemented as pc-relative jumps
(thankfully we can address the entire address space).

Drive-by: Made Code::IsIsolateIndependent consistent with
FinalizeEmbeddedCodeTargets. Code targets are only allowed on some
platforms.

Bug: v8:6666
Change-Id: I0bf02eecba8a099afa7b7c892188cd377cbda840
Reviewed-on: https://chromium-review.googlesource.com/1183224Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55265}
parent e44e4636
......@@ -338,6 +338,15 @@ bool Builtins::IsIsolateIndependent(int index) {
// ia32 is a work-in-progress. This will let us make builtins
// isolate-independent one-by-one.
switch (index) {
case kContinueToCodeStubBuiltin:
case kContinueToCodeStubBuiltinWithResult:
case kContinueToJavaScriptBuiltin:
case kContinueToJavaScriptBuiltinWithResult:
case kWasmAllocateHeapNumber:
case kWasmCallJavaScript:
case kWasmToNumber:
case kDoubleToI:
return true;
default:
return false;
}
......
......@@ -2411,7 +2411,8 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
// TODO(v8:6666): Extend support to all builtins and user code. Ensure that
// it is mutually exclusive with the Poisoning configuration above; and that
// it cooperates with restricted allocatable registers above.
static_assert(kRootRegister == kSpeculationPoisonRegister);
static_assert(kRootRegister == kSpeculationPoisonRegister,
"The following checks assume root equals poison register");
CHECK_IMPLIES(FLAG_embedded_builtins, !FLAG_branch_load_poisoning);
CHECK_IMPLIES(FLAG_embedded_builtins, !FLAG_untrusted_code_mitigations);
AllocateRegisters(RegisterConfiguration::PreserveRootIA32(),
......
......@@ -53,8 +53,13 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); }
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
DCHECK_EQ(kApplyMask, (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL) |
RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY)));
if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_) ||
rmode_ == RelocInfo::JS_TO_WASM_CALL) {
IsJsToWasmCall(rmode_) || IsOffHeapTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // Relocate entry.
} else if (IsInternalReference(rmode_)) {
......@@ -152,14 +157,15 @@ void RelocInfo::set_target_runtime_entry(Address target,
Address RelocInfo::target_off_heap_target() {
DCHECK(IsOffHeapTarget(rmode_));
return Memory::Address_at(pc_);
return Assembler::target_address_at(pc_, constant_pool_);
}
void RelocInfo::WipeOut() {
if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = kNullAddress;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
IsOffHeapTarget(rmode_)) {
// Effectively write zero into the relocation.
Assembler::set_target_address_at(pc_, constant_pool_,
pc_ + sizeof(int32_t));
......
......@@ -189,16 +189,17 @@ void Displacement::init(Label* L, Type type) {
const int RelocInfo::kApplyMask =
RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL);
RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL) |
RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
// specially coded on IA32 means that it is a relative address, as used by
// branch instructions. These are also the ones that need changing when a
// code object moves.
return (1 << rmode_) & kApplyMask;
return RelocInfo::ModeMask(rmode_) & kApplyMask;
}
......
......@@ -893,8 +893,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
}
void MacroAssembler::JumpToInstructionStream(Address entry) {
mov(kOffHeapTrampolineRegister, Immediate(entry, RelocInfo::OFF_HEAP_TARGET));
jmp(kOffHeapTrampolineRegister);
jmp(entry, RelocInfo::OFF_HEAP_TARGET);
}
void TurboAssembler::PrepareForTailCall(
......
......@@ -14569,9 +14569,14 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
bool is_process_independent = true;
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM)
// On X64, ARM, ARM64 we emit relative builtin-to-builtin jumps for isolate
// independent builtins in the snapshot. They are later rewritten as
// pc-relative jumps to the off-heap instruction stream and are thus
// process-independent.
// See also: FinalizeEmbeddedCodeTargets.
if (RelocInfo::IsCodeTargetMode(it.rinfo()->rmode())) {
// Off-heap code targets are later rewritten as pc-relative jumps to the
// off-heap instruction stream and are thus process-independent.
Address target_address = it.rinfo()->target_address();
if (InstructionStream::PcIsOffHeap(isolate, target_address)) continue;
......@@ -14579,6 +14584,7 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
CHECK(target->IsCode());
if (Builtins::IsIsolateIndependentBuiltin(target)) continue;
}
#endif
is_process_independent = false;
}
......
......@@ -329,10 +329,11 @@ RelocIterator::RelocIterator(Code* host, Address pc, Address constant_pool,
// static
bool RelocInfo::OffHeapTargetIsCodedSpecially() {
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
defined(V8_TARGET_ARCH_X64)
return false;
#elif defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390)
#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \
defined(V8_TARGET_ARCH_S390)
return true;
#endif
}
......
......@@ -350,6 +350,7 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
// On X64, ARM, ARM64 we emit relative builtin-to-builtin jumps for isolate
// independent builtins in the snapshot. This fixes up the relative jumps
// to the right offsets in the snapshot.
// See also: Code::IsIsolateIndependent.
while (!on_heap_it.done()) {
DCHECK(!off_heap_it.done());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment