Commit 06a2c2e0 authored by Thibaud Michaud's avatar Thibaud Michaud Committed by Commit Bot

[wasm][x64] Fix OSR shadow stack violation

We currently allow OSR (On-Stack Replacement) of arbitrarily deep return
addresses. This is in direct violation of Intel CET's shadow stack,
which we plan to enable eventually.

This change works around this by postponing OSR until after we return to
the old code. The main changes are:
- Reserve a slot in Liftoff frames to store the OSR target,
- Skip the return address modification, and instead store the new code
pointer in the dedicated slot,
- Upon returning to the old code, check the slot and do an indirect jump
to the new code if needed.

CET also prevents indirect jumps to arbitrary locations, so the last
point is also a CET violation. Valid indirect jump targets must be
marked with the ENDBRANCH instruction, which I will do in a follow-up
CL.

Bug: v8:11654
Change-Id: I6925005211aa95d60803b9409e3c07c7c226b25c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2826127
Commit-Queue: Thibaud Michaud <thibaudm@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73977}
parent 98427ffe
......@@ -2641,6 +2641,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// TODO(v8:10701): Implement for this platform.
__ Trap();
}
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
// Only needed on x64.
__ Trap();
}
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
......
......@@ -3038,6 +3038,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// TODO(v8:10701): Implement for this platform.
__ Trap();
}
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
// Only needed on x64.
__ Trap();
}
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
......
......@@ -863,6 +863,7 @@ namespace internal {
IF_WASM(ASM, GenericJSToWasmWrapper, Dummy) \
IF_WASM(ASM, WasmCompileLazy, Dummy) \
IF_WASM(ASM, WasmDebugBreak, Dummy) \
IF_WASM(ASM, WasmOnStackReplace, Dummy) \
IF_WASM(TFC, WasmFloat32ToNumber, WasmFloat32ToNumber) \
IF_WASM(TFC, WasmFloat64ToNumber, WasmFloat64ToNumber) \
IF_WASM(TFC, WasmI32AtomicWait32, WasmI32AtomicWait32) \
......
......@@ -2897,6 +2897,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// TODO(v8:10701): Implement for this platform.
__ Trap();
}
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
// Only needed on x64.
__ Trap();
}
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
......
......@@ -3529,6 +3529,13 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ jmp(&compile_wrapper_done);
}
void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
MemOperand OSRTargetSlot(rbp, -wasm::kOSRTargetOffset);
__ movq(kScratchRegister, OSRTargetSlot);
__ movq(OSRTargetSlot, Immediate(0));
__ jmp(kScratchRegister);
}
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
......
......@@ -1414,12 +1414,13 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
}
void Assembler::j(Condition cc, Address entry, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsRuntimeEntry(rmode));
DCHECK(RelocInfo::IsWasmStubCall(rmode));
EnsureSpace ensure_space(this);
DCHECK(is_uint4(cc));
emit(0x0F);
emit(0x80 | cc);
emit_runtime_entry(entry, rmode);
RecordRelocInfo(rmode);
emitl(static_cast<int32_t>(entry));
}
void Assembler::j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode) {
......
......@@ -605,6 +605,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
str(instance, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::ResetOSRTarget() {}
void LiftoffAssembler::FillInstanceInto(Register dst) {
ldr(dst, liftoff::GetInstanceOperand());
}
......@@ -4237,6 +4239,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
add(sp, sp, Operand(size));
}
void LiftoffAssembler::MaybeOSR() {}
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
......
......@@ -430,6 +430,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
Str(instance, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::ResetOSRTarget() {}
void LiftoffAssembler::FillInstanceInto(Register dst) {
Ldr(dst, liftoff::GetInstanceOperand());
}
......@@ -3221,6 +3223,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
Drop(size, 1);
}
void LiftoffAssembler::MaybeOSR() {}
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
// The stack pointer is required to be quadword aligned.
......
......@@ -332,6 +332,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
mov(liftoff::GetInstanceOperand(), instance);
}
void LiftoffAssembler::ResetOSRTarget() {}
void LiftoffAssembler::FillInstanceInto(Register dst) {
mov(dst, liftoff::GetInstanceOperand());
}
......@@ -4910,6 +4912,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
add(esp, Immediate(size));
}
void LiftoffAssembler::MaybeOSR() {}
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
......
......@@ -633,6 +633,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void LoadTaggedPointerFromInstance(Register dst, Register instance,
int offset);
inline void SpillInstance(Register instance);
inline void ResetOSRTarget();
inline void FillInstanceInto(Register dst);
inline void LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg, int32_t offset_imm,
......@@ -1416,6 +1417,9 @@ class LiftoffAssembler : public TurboAssembler {
inline void AllocateStackSlot(Register addr, uint32_t size);
inline void DeallocateStackSlot(uint32_t size);
// Instrumentation for shadow-stack-compatible OSR on x64.
inline void MaybeOSR();
////////////////////////////////////
// End of platform-specific part. //
////////////////////////////////////
......
......@@ -742,6 +742,7 @@ class LiftoffCompiler {
// Store the instance parameter to a special stack slot.
__ SpillInstance(kWasmInstanceRegister);
__ cache_state()->SetInstanceCacheRegister(kWasmInstanceRegister);
if (for_debugging_) __ ResetOSRTarget();
// Process parameters.
if (num_params) DEBUG_CODE_COMMENT("process parameters");
......@@ -909,6 +910,9 @@ class LiftoffCompiler {
ool->debug_sidetable_entry_builder->set_pc_offset(__ pc_offset());
}
DCHECK_EQ(ool->continuation.get()->is_bound(), is_stack_check);
if (is_stack_check) {
MaybeOSR();
}
if (!ool->regs_to_save.is_empty()) __ PopRegisters(ool->regs_to_save);
if (is_stack_check) {
if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
......@@ -1046,6 +1050,7 @@ class LiftoffCompiler {
DefineSafepointWithCalleeSavedRegisters();
RegisterDebugSideTableEntry(decoder,
DebugSideTableBuilder::kAllowRegisters);
MaybeOSR();
}
void PushControl(Control* block) {
......@@ -1181,6 +1186,7 @@ class LiftoffCompiler {
if (depth == decoder->control_depth() - 1) {
// Delegate to the caller, do not emit a landing pad.
Rethrow(decoder, __ cache_state()->stack_state.back());
MaybeOSR();
} else {
DCHECK(target->is_incomplete_try());
if (!target->try_info->catch_reached) {
......@@ -1201,7 +1207,9 @@ class LiftoffCompiler {
int index = try_block->try_info->catch_state.stack_height() - 1;
auto& exception = __ cache_state()->stack_state[index];
Rethrow(decoder, exception);
EmitLandingPad(decoder);
int pc_offset = __ pc_offset();
MaybeOSR();
EmitLandingPad(decoder, pc_offset);
}
void CatchAll(FullDecoder* decoder, Control* block) {
......@@ -4057,10 +4065,9 @@ class LiftoffCompiler {
DCHECK_EQ(index, WasmExceptionPackage::GetEncodedSize(exception));
}
void EmitLandingPad(FullDecoder* decoder) {
void EmitLandingPad(FullDecoder* decoder, int handler_offset) {
if (current_catch_ == -1) return;
MovableLabel handler;
int handler_offset = __ pc_offset();
// If we return from the throwing code normally, just skip over the handler.
Label skip_handler;
......@@ -4105,6 +4112,7 @@ class LiftoffCompiler {
{LiftoffAssembler::VarState{
kSmiKind, LiftoffRegister{encoded_size_reg}, 0}},
decoder->position());
MaybeOSR();
// The FixedArray for the exception values is now in the first gp return
// register.
......@@ -4139,7 +4147,9 @@ class LiftoffCompiler {
LiftoffAssembler::VarState{kPointerKind, values_array, 0}},
decoder->position());
EmitLandingPad(decoder);
int pc_offset = __ pc_offset();
MaybeOSR();
EmitLandingPad(decoder, pc_offset);
}
void AtomicStoreMem(FullDecoder* decoder, StoreType type,
......@@ -5429,6 +5439,7 @@ class LiftoffCompiler {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallIndirect(sig, call_descriptor, target);
FinishCall(decoder, sig, call_descriptor);
}
} else {
// A direct call within this module just gets the current instance.
......@@ -5446,15 +5457,9 @@ class LiftoffCompiler {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallNativeWasmCode(addr);
FinishCall(decoder, sig, call_descriptor);
}
}
if (!tail_call) {
DefineSafepoint();
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
EmitLandingPad(decoder);
__ FinishCall(sig, call_descriptor);
}
}
void CallIndirect(FullDecoder* decoder, const Value& index_val,
......@@ -5619,10 +5624,7 @@ class LiftoffCompiler {
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallIndirect(sig, call_descriptor, target);
DefineSafepoint();
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
EmitLandingPad(decoder);
__ FinishCall(sig, call_descriptor);
FinishCall(decoder, sig, call_descriptor);
}
}
......@@ -5820,10 +5822,7 @@ class LiftoffCompiler {
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallIndirect(sig, call_descriptor, target_reg);
DefineSafepoint();
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
EmitLandingPad(decoder);
__ FinishCall(sig, call_descriptor);
FinishCall(decoder, sig, call_descriptor);
}
}
......@@ -5948,6 +5947,22 @@ class LiftoffCompiler {
WASM_STRUCT_TYPE - WASM_ARRAY_TYPE);
}
void MaybeOSR() {
if (V8_UNLIKELY(for_debugging_)) {
__ MaybeOSR();
}
}
void FinishCall(FullDecoder* decoder, ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor) {
DefineSafepoint();
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
int pc_offset = __ pc_offset();
MaybeOSR();
EmitLandingPad(decoder, pc_offset);
__ FinishCall(sig, call_descriptor);
}
static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
static constexpr base::EnumSet<ValueKind> kUnconditionallySupported{
kI32, kI64, kF32, kF64};
......
......@@ -70,6 +70,8 @@ inline Operand GetStackSlot(int offset) { return Operand(rbp, -offset); }
// TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
inline Operand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
inline Operand GetOSRTargetSlot() { return GetStackSlot(kOSRTargetOffset); }
inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
uintptr_t offset_imm) {
if (is_uint31(offset_imm)) {
......@@ -249,7 +251,7 @@ void LiftoffAssembler::AbortCompilation() {}
// static
constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
return kOSRTargetOffset;
}
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
......@@ -322,6 +324,10 @@ void LiftoffAssembler::SpillInstance(Register instance) {
movq(liftoff::GetInstanceOperand(), instance);
}
void LiftoffAssembler::ResetOSRTarget() {
movq(liftoff::GetOSRTargetSlot(), Immediate(0));
}
void LiftoffAssembler::FillInstanceInto(Register dst) {
movq(dst, liftoff::GetInstanceOperand());
}
......@@ -4424,6 +4430,12 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
addq(rsp, Immediate(size));
}
void LiftoffAssembler::MaybeOSR() {
cmpq(liftoff::GetOSRTargetSlot(), Immediate(0));
j(not_equal, static_cast<Address>(WasmCode::kWasmOnStackReplace),
RelocInfo::WASM_STUB_CALL);
}
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
......
......@@ -94,7 +94,8 @@ struct WasmModule;
V(WasmAllocateArrayWithRtt) \
V(WasmAllocateRtt) \
V(WasmAllocateStructWithRtt) \
V(WasmSubtypeCheck)
V(WasmSubtypeCheck) \
V(WasmOnStackReplace)
// Sorted, disjoint and non-overlapping memory regions. A region is of the
// form [start, end). So there's no [start, end), [end, other_end),
......
......@@ -156,6 +156,10 @@ constexpr int kAnonymousFuncIndex = -1;
// often enough.
constexpr uint32_t kGenericWrapperBudget = 1000;
#if V8_TARGET_ARCH_X64
constexpr int32_t kOSRTargetOffset = 3 * kSystemPointerSize;
#endif
} // namespace wasm
} // namespace internal
} // namespace v8
......
......@@ -697,13 +697,19 @@ class DebugInfoImpl {
DCHECK_EQ(frame->function_index(), new_code->index());
DCHECK_EQ(frame->native_module(), new_code->native_module());
DCHECK(frame->wasm_code()->is_liftoff());
Address new_pc =
FindNewPC(frame, new_code, frame->byte_offset(), return_location);
#ifdef DEBUG
int old_position = frame->position();
#endif
Address new_pc =
FindNewPC(frame, new_code, frame->byte_offset(), return_location);
#if V8_TARGET_ARCH_X64
if (frame->wasm_code()->for_debugging()) {
base::Memory<Address>(frame->fp() - kOSRTargetOffset) = new_pc;
}
#else
PointerAuthentication::ReplacePC(frame->pc_address(), new_pc,
kSystemPointerSize);
#endif
// The frame position should still be the same after OSR.
DCHECK_EQ(old_position, frame->position());
}
......
......@@ -1281,6 +1281,18 @@ void WasmEngine::ReportLiveCodeFromStackForGC(Isolate* isolate) {
StackFrame* const frame = it.frame();
if (frame->type() != StackFrame::WASM) continue;
live_wasm_code.insert(WasmFrame::cast(frame)->wasm_code());
#if V8_TARGET_ARCH_X64
if (WasmFrame::cast(frame)->wasm_code()->for_debugging()) {
Address osr_target = base::Memory<Address>(WasmFrame::cast(frame)->fp() -
kOSRTargetOffset);
if (osr_target) {
WasmCode* osr_code =
isolate->wasm_engine()->code_manager()->LookupCode(osr_target);
DCHECK_NOT_NULL(osr_code);
live_wasm_code.insert(osr_code);
}
}
#endif
}
CheckNoArchivedThreads(isolate);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment