Commit c9941af2 authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

[Intepreter] Add poisoning to bytecode operand reads.

BUG=chromium:798964

Change-Id: I63c373ef3f27a3295fc79f5c82d78b5fd89a83da
Reviewed-on: https://chromium-review.googlesource.com/888752
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50925}
parent 8a27c7d3
......@@ -300,7 +300,8 @@ void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
......
......@@ -24,6 +24,7 @@ constexpr Register kInterpreterAccumulatorRegister = r0;
constexpr Register kInterpreterBytecodeOffsetRegister = r5;
constexpr Register kInterpreterBytecodeArrayRegister = r6;
constexpr Register kInterpreterDispatchTableRegister = r8;
constexpr Register kInterpreterTargetBytecodeRegister = r4;
constexpr Register kJavaScriptCallArgCountRegister = r0;
constexpr Register kJavaScriptCallNewTargetRegister = r3;
constexpr Register kRuntimeCallFunctionRegister = r1;
......
......@@ -311,7 +311,8 @@ void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
......
......@@ -51,6 +51,7 @@ namespace internal {
#define kInterpreterBytecodeOffsetRegister x19
#define kInterpreterBytecodeArrayRegister x20
#define kInterpreterDispatchTableRegister x21
#define kInterpreterTargetBytecodeRegister x18
#define kJavaScriptCallArgCountRegister x0
#define kJavaScriptCallNewTargetRegister x3
#define kRuntimeCallFunctionRegister x1
......
......@@ -1019,11 +1019,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ldr(r4, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
kPointerSizeLog2));
__ Call(r4);
__ ldrb(kInterpreterTargetBytecodeRegister,
MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ldr(r1,
MemOperand(kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, LSL, kPointerSizeLog2));
__ Call(r1);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
......@@ -1223,12 +1225,14 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ldrb(kInterpreterTargetBytecodeRegister,
MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ ldr(scratch, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
kPointerSizeLog2));
__ ldr(scratch,
MemOperand(kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, LSL, kPointerSizeLog2));
__ Jump(scratch);
}
......
......@@ -1112,9 +1112,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
__ Ldrb(kInterpreterTargetBytecodeRegister,
MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ Mov(x1,
Operand(kInterpreterTargetBytecodeRegister, LSL, kPointerSizeLog2));
__ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
__ Call(ip0);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
......@@ -1344,9 +1346,11 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
__ Ldrb(kInterpreterTargetBytecodeRegister,
MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ Mov(x1,
Operand(kInterpreterTargetBytecodeRegister, LSL, kPointerSizeLog2));
__ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
__ Jump(ip0);
}
......
......@@ -941,11 +941,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(kInterpreterDispatchTableRegister,
Immediate(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
times_pointer_size, 0));
__ call(ebx);
__ movzx_b(kInterpreterTargetBytecodeRegister,
Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ mov(edx,
Operand(kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, times_pointer_size, 0));
__ call(edx);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
......@@ -1269,11 +1271,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
times_pointer_size, 0));
__ jmp(ebx);
__ movzx_b(kInterpreterTargetBytecodeRegister,
Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ mov(edx,
Operand(kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, times_pointer_size, 0));
__ jmp(edx);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
......
......@@ -1003,8 +1003,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
masm->isolate())));
__ Addu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(a0, MemOperand(a0));
__ Lsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
__ lbu(kInterpreterTargetBytecodeRegister, MemOperand(a0));
__ Lsa(at, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, kPointerSizeLog2);
__ lw(at, MemOperand(at));
__ Call(at);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
......@@ -1226,8 +1227,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Dispatch to the target bytecode.
__ Addu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(a1, MemOperand(a1));
__ Lsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
__ lbu(kInterpreterTargetBytecodeRegister, MemOperand(a1));
__ Lsa(a1, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, kPointerSizeLog2);
__ lw(a1, MemOperand(a1));
__ Jump(a1);
}
......
......@@ -1003,8 +1003,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
masm->isolate())));
__ Daddu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ Lbu(a0, MemOperand(a0));
__ Dlsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
__ Lbu(kInterpreterTargetBytecodeRegister, MemOperand(a0));
__ Dlsa(at, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, kPointerSizeLog2);
__ Ld(at, MemOperand(at));
__ Call(at);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
......@@ -1227,8 +1228,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Dispatch to the target bytecode.
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ Lbu(a1, MemOperand(a1));
__ Dlsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
__ Lbu(kInterpreterTargetBytecodeRegister, MemOperand(a1));
__ Dlsa(a1, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, kPointerSizeLog2);
__ Ld(a1, MemOperand(a1));
__ Jump(a1);
}
......
......@@ -1012,10 +1012,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Move(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
__ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
times_pointer_size, 0));
__ movzxbp(kInterpreterTargetBytecodeRegister,
Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ movp(rbx,
Operand(kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, times_pointer_size, 0));
__ call(rbx);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
......@@ -1243,10 +1245,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
__ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
times_pointer_size, 0));
__ movzxbp(kInterpreterTargetBytecodeRegister,
Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ movp(rbx,
Operand(kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister, times_pointer_size, 0));
__ jmp(rbx);
}
......
......@@ -1139,7 +1139,7 @@ Node* CodeAssembler::TailCallBytecodeDispatch(
// CSA-generated code
template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallBytecodeDispatch(
const CallInterfaceDescriptor& descriptor, Node* target, Node*, Node*,
Node*, Node*);
Node*, Node*, Node*);
Node* CodeAssembler::CallCFunctionN(Signature<MachineType>* signature,
int input_count, Node* const* inputs) {
......
......@@ -492,6 +492,7 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(Float64RoundTruncate, Float64T, Float64T) \
V(Word32Clz, Int32T, Word32T) \
V(Word32Not, Word32T, Word32T) \
V(WordNot, WordT, WordT) \
V(Int32AbsWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T) \
V(Int64AbsWithOverflow, PAIR_TYPE(Int64T, BoolT), Int64T) \
V(IntPtrAbsWithOverflow, PAIR_TYPE(IntPtrT, BoolT), IntPtrT) \
......
......@@ -296,7 +296,8 @@ void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
......
......@@ -24,6 +24,7 @@ constexpr Register kInterpreterAccumulatorRegister = eax;
constexpr Register kInterpreterBytecodeOffsetRegister = ecx;
constexpr Register kInterpreterBytecodeArrayRegister = edi;
constexpr Register kInterpreterDispatchTableRegister = esi;
constexpr Register kInterpreterTargetBytecodeRegister = ebx;
constexpr Register kJavaScriptCallArgCountRegister = eax;
constexpr Register kJavaScriptCallNewTargetRegister = edx;
constexpr Register kRuntimeCallFunctionRegister = ebx;
......
......@@ -593,10 +593,11 @@ void ApiCallbackDescriptor::InitializePlatformIndependent(
void InterpreterDispatchDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kAccumulator, kBytecodeOffset, kBytecodeArray, kDispatchTable
// kAccumulator, kBytecodeOffset, kBytecodeArray, kDispatchTable,
// kTargetBytecode
MachineType machine_types[] = {
MachineType::AnyTagged(), MachineType::IntPtr(), MachineType::AnyTagged(),
MachineType::IntPtr()};
MachineType::IntPtr(), MachineType::IntPtr()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
......
......@@ -832,7 +832,7 @@ class V8_EXPORT_PRIVATE InterpreterDispatchDescriptor
: public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kAccumulator, kBytecodeOffset, kBytecodeArray,
kDispatchTable)
kDispatchTable, kTargetBytecode)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(InterpreterDispatchDescriptor,
CallInterfaceDescriptor)
};
......
This diff is collapsed.
......@@ -56,9 +56,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Returns the smi immediate for bytecode operand |operand_index| in the
// current bytecode.
compiler::Node* BytecodeOperandImmSmi(int operand_index);
// Returns the word-size sign-extended register index for bytecode operand
// |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandReg(int operand_index);
// Returns the 32-bit unsigned runtime id immediate for bytecode operand
// |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandRuntimeId(int operand_index);
......@@ -238,14 +235,14 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Dispatch to the bytecode.
compiler::Node* Dispatch();
// Dispatch to bytecode handler.
compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler) {
return DispatchToBytecodeHandler(handler, BytecodeOffset());
}
// Dispatch bytecode as wide operand variant.
void DispatchWide(OperandScale operand_scale);
// Dispatch to |target_bytecode| at |new_bytecode_offset|.
// |target_bytecode| should be equivalent to loading from the offset.
compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode,
compiler::Node* new_bytecode_offset);
// Abort with the given abort reason.
void Abort(AbortReason abort_reason);
void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
......@@ -269,6 +266,9 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Lazily deserializes the current bytecode's handler and tail-calls into it.
void DeserializeLazyAndDispatch();
// Disables poisoning on speculative execution.
void DisableSpeculationPoisoning();
private:
// Returns a tagged pointer to the current function's BytecodeArray object.
compiler::Node* BytecodeArrayTaggedPointer();
......@@ -292,6 +292,14 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* LoadRegister(Node* reg_index);
void StoreRegister(compiler::Node* value, compiler::Node* reg_index);
// Generates a poison which can be used to mask values on speculative paths.
compiler::Node* GenerateSpeculationPoison(Node* current_bytecode);
// Poison |value| on speculative paths.
compiler::Node* PoisonOnSpeculationTagged(Node* value);
compiler::Node* PoisonOnSpeculationWord(Node* value);
compiler::Node* PoisonOnSpeculationInt32(Node* value);
// Saves and restores interpreter bytecode offset to the interpreter stack
// frame when performing a call.
void CallPrologue();
......@@ -319,16 +327,21 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// The |result_type| determines the size and signedness. of the
// value read. This method should only be used on architectures that
// do not support unaligned memory accesses.
compiler::Node* BytecodeOperandReadUnaligned(int relative_offset,
MachineType result_type);
// Returns zero- or sign-extended to word32 value of the operand.
compiler::Node* BytecodeOperandUnsignedByte(int operand_index);
compiler::Node* BytecodeOperandSignedByte(int operand_index);
compiler::Node* BytecodeOperandUnsignedShort(int operand_index);
compiler::Node* BytecodeOperandSignedShort(int operand_index);
compiler::Node* BytecodeOperandUnsignedQuad(int operand_index);
compiler::Node* BytecodeOperandSignedQuad(int operand_index);
compiler::Node* BytecodeOperandReadUnalignedUnpoisoned(
int relative_offset, MachineType result_type);
// Returns zero- or sign-extended to word32 value of the operand. Values are
// not poisoned on speculation - should be used with care.
compiler::Node* BytecodeOperandUnsignedByteUnpoisoned(int operand_index);
compiler::Node* BytecodeOperandSignedByteUnpoisoned(int operand_index);
compiler::Node* BytecodeOperandUnsignedShortUnpoisoned(int operand_index);
compiler::Node* BytecodeOperandSignedShortUnpoisoned(int operand_index);
compiler::Node* BytecodeOperandUnsignedQuadUnpoisoned(int operand_index);
compiler::Node* BytecodeOperandSignedQuadUnpoisoned(int operand_index);
compiler::Node* BytecodeSignedOperandUnpoisoned(int operand_index,
OperandSize operand_size);
compiler::Node* BytecodeUnsignedOperandUnpoisoned(int operand_index,
OperandSize operand_size);
// Returns zero- or sign-extended to word32 value of the operand of
// given size.
......@@ -337,6 +350,15 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* BytecodeUnsignedOperand(int operand_index,
OperandSize operand_size);
// Returns the word-size sign-extended register index for bytecode operand
// |operand_index| in the current bytecode. Value is not poisoned on
// speculation since the value loaded from the register is poisoned instead.
compiler::Node* BytecodeOperandRegUnpoisoned(int operand_index);
// Returns the word zero-extended index immediate for bytecode operand
// |operand_index| in the current bytecode for use when loading a .
compiler::Node* BytecodeOperandConstantPoolIdxUnpoisoned(int operand_index);
// Jump relative to the current bytecode by the |jump_offset|. If |backward|,
// then jump backward (subtract the offset), otherwise jump forward (add the
// offset). Helper function for Jump and JumpBackward.
......@@ -372,18 +394,15 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// next dispatch offset.
void InlineStar();
// Dispatch to |target_bytecode| at |new_bytecode_offset|.
// |target_bytecode| should be equivalent to loading from the offset.
compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode,
compiler::Node* new_bytecode_offset);
// Dispatch to the bytecode handler with code offset |handler|.
compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler,
compiler::Node* bytecode_offset);
compiler::Node* bytecode_offset,
compiler::Node* target_bytecode);
// Dispatch to the bytecode handler with code entry point |handler_entry|.
compiler::Node* DispatchToBytecodeHandlerEntry(
compiler::Node* handler_entry, compiler::Node* bytecode_offset);
compiler::Node* handler_entry, compiler::Node* bytecode_offset,
compiler::Node* target_bytecode);
int CurrentBytecodeSize() const;
......@@ -401,6 +420,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
bool reloaded_frame_ptr_;
bool bytecode_array_valid_;
Node* speculation_poison_;
bool disable_stack_check_across_call_;
compiler::Node* stack_pointer_before_call_;
......
......@@ -2742,10 +2742,10 @@ IGNITION_HANDLER(Debugger, InterpreterAssembler) {
Node* result_pair = \
CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
Node* return_value = Projection(0, result_pair); \
Node* original_handler = Projection(1, result_pair); \
Node* original_bytecode = SmiUntag(Projection(1, result_pair)); \
MaybeDropFrames(context); \
SetAccumulator(return_value); \
DispatchToBytecodeHandler(original_handler); \
DispatchToBytecode(original_bytecode, BytecodeOffset()); \
}
DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
#undef DEBUG_BREAK
......@@ -3095,7 +3095,10 @@ class DeserializeLazyAssembler : public InterpreterAssembler {
explicit DeserializeLazyAssembler(compiler::CodeAssemblerState* state,
OperandScale operand_scale)
: InterpreterAssembler(state, kFakeBytecode, operand_scale) {}
: InterpreterAssembler(state, kFakeBytecode, operand_scale) {
// Disable speculation poisoning for this handler since we use kFakeBytecode
DisableSpeculationPoisoning();
}
static void Generate(compiler::CodeAssemblerState* state,
OperandScale operand_scale) {
......
......@@ -291,7 +291,8 @@ void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
......
......@@ -23,6 +23,7 @@ constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t4;
constexpr Register kInterpreterBytecodeArrayRegister = t5;
constexpr Register kInterpreterDispatchTableRegister = t6;
constexpr Register kInterpreterTargetBytecodeRegister = t3;
constexpr Register kJavaScriptCallArgCountRegister = a0;
constexpr Register kJavaScriptCallNewTargetRegister = a3;
constexpr Register kRuntimeCallFunctionRegister = a1;
......
......@@ -291,7 +291,8 @@ void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
......
......@@ -23,6 +23,7 @@ constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t0;
constexpr Register kInterpreterBytecodeArrayRegister = t1;
constexpr Register kInterpreterDispatchTableRegister = t2;
constexpr Register kInterpreterTargetBytecodeRegister = t3;
constexpr Register kJavaScriptCallArgCountRegister = a0;
constexpr Register kJavaScriptCallNewTargetRegister = a3;
constexpr Register kRuntimeCallFunctionRegister = a1;
......
......@@ -64,11 +64,14 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
// We do not have to deal with operand scale here. If the bytecode at the
// break is prefixed by operand scaling, we would have patched over the
// scaling prefix. We now simply dispatch to the handler for the prefix.
// We need to deserialize now to ensure we don't hit the debug break again
// after deserializing.
OperandScale operand_scale = OperandScale::kSingle;
Code* code = isolate->interpreter()->GetAndMaybeDeserializeBytecodeHandler(
bytecode, operand_scale);
isolate->interpreter()->GetAndMaybeDeserializeBytecodeHandler(bytecode,
operand_scale);
return MakePair(isolate->debug()->return_value(), code);
return MakePair(isolate->debug()->return_value(),
Smi::FromInt(static_cast<uint8_t>(bytecode)));
}
......
......@@ -296,7 +296,8 @@ void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister,
kInterpreterTargetBytecodeRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
......
......@@ -24,6 +24,7 @@ constexpr Register kInterpreterAccumulatorRegister = rax;
constexpr Register kInterpreterBytecodeOffsetRegister = r12;
constexpr Register kInterpreterBytecodeArrayRegister = r14;
constexpr Register kInterpreterDispatchTableRegister = r15;
constexpr Register kInterpreterTargetBytecodeRegister = r11;
constexpr Register kJavaScriptCallArgCountRegister = rax;
constexpr Register kJavaScriptCallNewTargetRegister = rdx;
constexpr Register kRuntimeCallFunctionRegister = rbx;
......
......@@ -2123,9 +2123,7 @@ IS_BINOP_MATCHER(Float64InsertHighWord32)
return MakeMatcher(new IsUnopMatcher(IrOpcode::k##Name, input_matcher)); \
}
IS_UNOP_MATCHER(BooleanNot)
IS_UNOP_MATCHER(BitcastTaggedToWord)
IS_UNOP_MATCHER(BitcastWordToTagged)
IS_UNOP_MATCHER(BitcastWordToTaggedSigned)
IS_UNOP_MATCHER(TruncateFloat64ToWord32)
IS_UNOP_MATCHER(ChangeFloat64ToInt32)
IS_UNOP_MATCHER(ChangeFloat64ToUint32)
......@@ -2189,6 +2187,27 @@ IS_UNOP_MATCHER(Word32Popcnt)
IS_UNOP_MATCHER(Word32ReverseBytes)
#undef IS_UNOP_MATCHER
// Special-case Bitcast operators which are disabled when ENABLE_VERIFY_CSA is
// not enabled.
Matcher<Node*> IsBitcastTaggedToWord(const Matcher<Node*>& input_matcher) {
#ifdef ENABLE_VERIFY_CSA
return MakeMatcher(
new IsUnopMatcher(IrOpcode::kBitcastTaggedToWord, input_matcher));
#else
return input_matcher;
#endif
}
Matcher<Node*> IsBitcastWordToTaggedSigned(
const Matcher<Node*>& input_matcher) {
#ifdef ENABLE_VERIFY_CSA
return MakeMatcher(
new IsUnopMatcher(IrOpcode::kBitcastWordToTaggedSigned, input_matcher));
#else
return input_matcher;
#endif
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -336,6 +336,7 @@ Matcher<Node*> IsUnalignedStore(
const Matcher<Node*>& value_matcher, const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStackSlot(const Matcher<StackSlotRepresentation>& rep_matcher);
Matcher<Node*> IsWord32Popcnt(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord32Or(const Matcher<Node*>& lhs_matcher,
......
......@@ -63,6 +63,12 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
value_matcher, _, _);
}
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsWordNot(
const Matcher<Node*>& value_matcher) {
return kPointerSize == 8 ? IsWord64Xor(value_matcher, c::IsInt64Constant(-1))
: IsWord32Xor(value_matcher, c::IsInt32Constant(-1));
}
Matcher<Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedByteOperand(
int offset) {
......@@ -231,9 +237,8 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand(
}
}
Matcher<Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedOperand(
int offset, OperandSize operand_size) {
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
IsUnpoisonedSignedOperand(int offset, OperandSize operand_size) {
switch (operand_size) {
case OperandSize::kByte:
return IsSignedByteOperand(offset);
......@@ -247,9 +252,8 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedOperand(
return nullptr;
}
Matcher<Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand(
int offset, OperandSize operand_size) {
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
IsUnpoisonedUnsignedOperand(int offset, OperandSize operand_size) {
switch (operand_size) {
case OperandSize::kByte:
return IsUnsignedByteOperand(offset);
......@@ -263,6 +267,63 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand(
return nullptr;
}
Matcher<compiler::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSpeculationPoison() {
Matcher<compiler::Node*> current =
c::IsParameter(InterpreterDispatchDescriptor::kTargetBytecode);
int bytecode_int = static_cast<int>(bytecode());
Matcher<compiler::Node*> expected = c::IsIntPtrConstant(bytecode_int);
Matcher<compiler::Node*> diff = c::IsWordOr(
bytecode_int == 0 ? current : c::IsIntPtrSub(current, expected),
c::IsIntPtrSub(expected, current));
return IsWordNot(
c::IsWordSar(diff, c::IsIntPtrConstant(kBitsPerPointer - 1)));
}
Matcher<compiler::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsPoisonTagged(
const Matcher<compiler::Node*> value_matcher) {
return IsBitcastWordToTagged(
IsWordAnd(IsSpeculationPoison(), IsBitcastTaggedToWord(value_matcher)));
}
Matcher<compiler::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsPoisonWord(
const Matcher<compiler::Node*> value_matcher) {
return IsWordAnd(IsSpeculationPoison(), value_matcher);
}
Matcher<compiler::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsPoisonInt32(
const Matcher<compiler::Node*> value_matcher) {
Matcher<compiler::Node*> truncated_speculation_poison =
Is64() ? c::IsTruncateInt64ToInt32(IsSpeculationPoison())
: IsSpeculationPoison();
return IsWord32And(truncated_speculation_poison, value_matcher);
}
Matcher<Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedOperand(
int offset, OperandSize operand_size) {
return IsPoisonInt32(IsUnpoisonedSignedOperand(offset, operand_size));
}
Matcher<Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand(
int offset, OperandSize operand_size) {
return IsPoisonInt32(IsUnpoisonedUnsignedOperand(offset, operand_size));
}
Matcher<compiler::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoadRegisterOperand(
int offset, OperandSize operand_size) {
Matcher<compiler::Node*> reg_operand =
IsChangeInt32ToIntPtr(IsUnpoisonedSignedOperand(offset, operand_size));
return IsPoisonTagged(
IsLoad(MachineType::AnyTagged(), c::IsLoadParentFramePointer(),
c::IsWordShl(reg_operand, c::IsIntPtrConstant(kPointerSizeLog2))));
}
TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
// If debug code is enabled we emit extra code in Jump.
if (FLAG_debug_code) return;
......@@ -345,17 +406,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
m.IsSignedOperand(offset, operand_size));
break;
}
case interpreter::OperandType::kRegList:
case interpreter::OperandType::kReg:
case interpreter::OperandType::kRegOut:
case interpreter::OperandType::kRegOutList:
case interpreter::OperandType::kRegOutPair:
case interpreter::OperandType::kRegOutTriple:
case interpreter::OperandType::kRegPair:
EXPECT_THAT(m.BytecodeOperandReg(i),
c::IsChangeInt32ToIntPtr(
m.IsSignedOperand(offset, operand_size)));
break;
case interpreter::OperandType::kRuntimeId:
EXPECT_THAT(m.BytecodeOperandRuntimeId(i),
m.IsUnsignedOperand(offset, operand_size));
......@@ -364,6 +414,16 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
EXPECT_THAT(m.BytecodeOperandIntrinsicId(i),
m.IsUnsignedOperand(offset, operand_size));
break;
case interpreter::OperandType::kRegList:
case interpreter::OperandType::kReg:
case interpreter::OperandType::kRegPair:
case interpreter::OperandType::kRegOut:
case interpreter::OperandType::kRegOutList:
case interpreter::OperandType::kRegOutPair:
case interpreter::OperandType::kRegOutTriple:
EXPECT_THAT(m.LoadRegisterAtOperandIndex(i),
m.IsLoadRegisterOperand(offset, operand_size));
break;
case interpreter::OperandType::kNone:
UNREACHABLE();
break;
......@@ -397,11 +457,11 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
kHeapObjectTag));
EXPECT_THAT(
load_constant,
m.IsLoad(MachineType::AnyTagged(), constant_pool_matcher,
c::IsIntPtrConstant(FixedArray::OffsetOfElementAt(2) -
kHeapObjectTag)));
EXPECT_THAT(load_constant,
m.IsPoisonTagged(m.IsLoad(
MachineType::AnyTagged(), constant_pool_matcher,
c::IsIntPtrConstant(FixedArray::OffsetOfElementAt(2) -
kHeapObjectTag))));
}
{
Node* index = m.Parameter(2);
......@@ -413,11 +473,12 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
kHeapObjectTag));
EXPECT_THAT(
load_constant,
m.IsLoad(
m.IsPoisonTagged(m.IsLoad(
MachineType::AnyTagged(), constant_pool_matcher,
c::IsIntPtrAdd(
c::IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
c::IsWordShl(index, c::IsIntPtrConstant(kPointerSizeLog2)))));
c::IsWordShl(index,
c::IsIntPtrConstant(kPointerSizeLog2))))));
}
}
}
......
......@@ -49,6 +49,16 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
const Matcher<compiler::Node*>& index_matcher,
const Matcher<compiler::Node*>& value_matcher);
Matcher<Node*> IsWordNot(const Matcher<Node*>& value_matcher);
Matcher<compiler::Node*> IsSpeculationPoison();
Matcher<compiler::Node*> IsPoisonTagged(
const Matcher<compiler::Node*> value_matcher);
Matcher<compiler::Node*> IsPoisonInt32(
const Matcher<compiler::Node*> value_matcher);
Matcher<compiler::Node*> IsPoisonWord(
const Matcher<compiler::Node*> value_matcher);
Matcher<compiler::Node*> IsUnsignedByteOperand(int offset);
Matcher<compiler::Node*> IsSignedByteOperand(int offset);
Matcher<compiler::Node*> IsUnsignedShortOperand(int offset);
......@@ -56,11 +66,19 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
Matcher<compiler::Node*> IsUnsignedQuadOperand(int offset);
Matcher<compiler::Node*> IsSignedQuadOperand(int offset);
Matcher<compiler::Node*> IsUnpoisonedSignedOperand(
int offset, OperandSize operand_size);
Matcher<compiler::Node*> IsUnpoisonedUnsignedOperand(
int offset, OperandSize operand_size);
Matcher<compiler::Node*> IsSignedOperand(int offset,
OperandSize operand_size);
Matcher<compiler::Node*> IsUnsignedOperand(int offset,
OperandSize operand_size);
Matcher<compiler::Node*> IsLoadRegisterOperand(int offset,
OperandSize operand_size);
private:
DISALLOW_COPY_AND_ASSIGN(InterpreterAssemblerForTest);
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment