Commit e70331f9 authored by Milad Farazmand's avatar Milad Farazmand Committed by Commit Bot

PPC/s390: [interpreter] Make IterationBody StackChecks implicit within JumpLoop

Port a447a44f

Original Commit Message:

    Since now the IterationBody StackChecks are implicit within JumpLoops,
    we are able to eagerly deopt in them. If we do that, whenever we advance
    to the next bytecode we don't have to advance to the next literal
    bytecode, but instead "advance" in the sense of doing the JumpLoop.

    Adding tests that test this advancing for wide and extra wide JumpLoops.

    Also, marking JumpLoop as needing source positions since now it has
    the ability of causing an interrupt.

R=solanes@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=
LOG=N

Change-Id: I5bec2212d040801d67426a8639d20fe96035d813
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2111832Reviewed-by: 's avatarJunliang Yan <jyan@ca.ibm.com>
Commit-Queue: Junliang Yan <jyan@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#66814}
parent fa3aada5
...@@ -957,18 +957,27 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -957,18 +957,27 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// Advance the current bytecode offset. This simulates what all bytecode // Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation. Will bail out to a // handlers do upon completion of the underlying operation. Will bail out to a
// label if the bytecode (without prefix) is a return bytecode. // label if the bytecode (without prefix) is a return bytecode. Will not advance
// the bytecode offset if the current bytecode is a JumpLoop, instead just
// re-executing the JumpLoop to jump to the correct bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array, Register bytecode_array,
Register bytecode_offset, Register bytecode_offset,
Register bytecode, Register scratch1, Register bytecode, Register scratch1,
Label* if_return) { Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1; Register bytecode_size_table = scratch1;
Register scratch2 = bytecode; Register scratch3 = bytecode;
// The bytecode offset value will be increased by one in wide and extra wide
// cases. In the case of having a wide or extra wide JumpLoop bytecode, we
// will restore the original bytecode. In order to simplify the code, we have
// a backup of it.
Register original_bytecode_offset = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table, DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode)); bytecode, original_bytecode_offset));
__ Move(bytecode_size_table, __ Move(bytecode_size_table,
ExternalReference::bytecode_size_table_address()); ExternalReference::bytecode_size_table_address());
__ Move(original_bytecode_offset, bytecode_offset);
// Check if the bytecode is a Wide or ExtraWide prefix bytecode. // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode, extra_wide; Label process_bytecode, extra_wide;
...@@ -999,7 +1008,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ...@@ -999,7 +1008,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// Load the size of the current bytecode. // Load the size of the current bytecode.
__ bind(&process_bytecode); __ bind(&process_bytecode);
// Bailout to the return label if this is a return bytecode. // Bailout to the return label if this is a return bytecode.
#define JUMP_IF_EQUAL(NAME) \ #define JUMP_IF_EQUAL(NAME) \
__ cmpi(bytecode, \ __ cmpi(bytecode, \
Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \ Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
...@@ -1007,10 +1016,24 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ...@@ -1007,10 +1016,24 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL) RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL #undef JUMP_IF_EQUAL
// If this is a JumpLoop, re-execute it to perform the jump to the beginning
// of the loop.
Label end, not_jump_loop;
__ cmpi(bytecode,
Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
__ bne(&not_jump_loop);
// We need to restore the original bytecode_offset since we might have
// increased it to skip the wide / extra-wide prefix bytecode.
__ Move(bytecode_offset, original_bytecode_offset);
__ b(&end);
__ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset. // Otherwise, load the size of the current bytecode and advance the offset.
__ ShiftLeftImm(scratch2, bytecode, Operand(2)); __ ShiftLeftImm(scratch3, bytecode, Operand(2));
__ lwzx(scratch2, MemOperand(bytecode_size_table, scratch2)); __ lwzx(scratch3, MemOperand(bytecode_size_table, scratch3));
__ add(bytecode_offset, bytecode_offset, scratch2); __ add(bytecode_offset, bytecode_offset, scratch3);
__ bind(&end);
} }
// Generate code for entering a JS function with the interpreter. // Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the // On entry to the function the receiver and arguments have been pushed on the
...@@ -1196,7 +1219,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1196,7 +1219,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister, __ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister)); kInterpreterBytecodeOffsetRegister));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, r4, r5, kInterpreterBytecodeOffsetRegister, r4, r5, r6,
&do_return); &do_return);
__ b(&do_dispatch); __ b(&do_dispatch);
...@@ -1476,7 +1499,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { ...@@ -1476,7 +1499,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Advance to the next bytecode. // Advance to the next bytecode.
Label if_return; Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, r4, r5, kInterpreterBytecodeOffsetRegister, r4, r5, r6,
&if_return); &if_return);
__ bind(&enter_bytecode); __ bind(&enter_bytecode);
......
...@@ -1017,18 +1017,27 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -1017,18 +1017,27 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// Advance the current bytecode offset. This simulates what all bytecode // Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation. Will bail out to a // handlers do upon completion of the underlying operation. Will bail out to a
// label if the bytecode (without prefix) is a return bytecode. // label if the bytecode (without prefix) is a return bytecode. Will not advance
// the bytecode offset if the current bytecode is a JumpLoop, instead just
// re-executing the JumpLoop to jump to the correct bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array, Register bytecode_array,
Register bytecode_offset, Register bytecode_offset,
Register bytecode, Register scratch1, Register bytecode, Register scratch1,
Label* if_return) { Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1; Register bytecode_size_table = scratch1;
Register scratch2 = bytecode; Register scratch3 = bytecode;
// The bytecode offset value will be increased by one in wide and extra wide
// cases. In the case of having a wide or extra wide JumpLoop bytecode, we
// will restore the original bytecode. In order to simplify the code, we have
// a backup of it.
Register original_bytecode_offset = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table, DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode)); bytecode, original_bytecode_offset));
__ Move(bytecode_size_table, __ Move(bytecode_size_table,
ExternalReference::bytecode_size_table_address()); ExternalReference::bytecode_size_table_address());
__ Move(original_bytecode_offset, bytecode_offset);
// Check if the bytecode is a Wide or ExtraWide prefix bytecode. // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode, extra_wide; Label process_bytecode, extra_wide;
...@@ -1059,7 +1068,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ...@@ -1059,7 +1068,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// Load the size of the current bytecode. // Load the size of the current bytecode.
__ bind(&process_bytecode); __ bind(&process_bytecode);
// Bailout to the return label if this is a return bytecode. // Bailout to the return label if this is a return bytecode.
#define JUMP_IF_EQUAL(NAME) \ #define JUMP_IF_EQUAL(NAME) \
__ CmpP(bytecode, \ __ CmpP(bytecode, \
Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \ Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
...@@ -1067,10 +1076,24 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ...@@ -1067,10 +1076,24 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL) RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL #undef JUMP_IF_EQUAL
// If this is a JumpLoop, re-execute it to perform the jump to the beginning
// of the loop.
Label end, not_jump_loop;
__ CmpP(bytecode,
Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
__ bne(&not_jump_loop);
// We need to restore the original bytecode_offset since we might have
// increased it to skip the wide / extra-wide prefix bytecode.
__ Move(bytecode_offset, original_bytecode_offset);
__ b(&end);
__ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset. // Otherwise, load the size of the current bytecode and advance the offset.
__ ShiftLeftP(scratch2, bytecode, Operand(2)); __ ShiftLeftP(scratch3, bytecode, Operand(2));
__ LoadlW(scratch2, MemOperand(bytecode_size_table, scratch2)); __ LoadlW(scratch3, MemOperand(bytecode_size_table, scratch3));
__ AddP(bytecode_offset, bytecode_offset, scratch2); __ AddP(bytecode_offset, bytecode_offset, scratch3);
__ bind(&end);
} }
// Generate code for entering a JS function with the interpreter. // Generate code for entering a JS function with the interpreter.
...@@ -1255,7 +1278,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1255,7 +1278,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister, __ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister)); kInterpreterBytecodeOffsetRegister));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, r3, r4, kInterpreterBytecodeOffsetRegister, r3, r4, r5,
&do_return); &do_return);
__ b(&do_dispatch); __ b(&do_dispatch);
...@@ -1532,7 +1555,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { ...@@ -1532,7 +1555,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Advance to the next bytecode. // Advance to the next bytecode.
Label if_return; Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, r3, r4, kInterpreterBytecodeOffsetRegister, r3, r4, r5,
&if_return); &if_return);
__ bind(&enter_bytecode); __ bind(&enter_bytecode);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment