Commit 56b55f3f authored by Michael Achenbach's avatar Michael Achenbach Committed by Commit Bot

Revert "[TurboProp] Avoid marking the output of a call live in its catch handler"

This reverts commit cdc8d9a5.

Reason for revert: The regression test is too slow:
https://ci.chromium.org/p/v8/builders/ci/V8%20Linux%20-%20gc%20stress/30454

Also gcc failures:
https://ci.chromium.org/p/v8/builders/ci/V8%20Linux64%20gcc%20-%20debug/9528

Original change's description:
> [TurboProp] Avoid marking the output of a call live in its catch handler
>
> The output of a call won't be live if an exception is thrown while the
> call is on the stack and we unwind to a catch handler.
>
> BUG=chromium:1138075,v8:9684
>
> Change-Id: I95bf535bac388940869eb213e25565d64fe96df1
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2476317
> Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
> Reviewed-by: Georg Neis <neis@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#70562}

TBR=rmcilroy@chromium.org,neis@chromium.org

Change-Id: I0f6b9378d516a70401fc429fb3612bbf962b0fb2
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: chromium:1138075
Bug: v8:9684
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2479007Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70564}
parent 8557840b
......@@ -755,7 +755,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ CallCodeObject(reg);
}
......@@ -797,7 +797,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeObject(reg);
}
......@@ -825,7 +825,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ Jump(reg);
unwinding_info_writer_.MarkBlockWillExit();
......
......@@ -691,7 +691,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ CallCodeObject(reg);
}
......@@ -732,7 +732,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeObject(reg);
}
......@@ -762,7 +762,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
UseScratchRegisterScope temps(tasm());
temps.Exclude(x17);
......
......@@ -257,6 +257,17 @@ class OutOfLineCode : public ZoneObject {
OutOfLineCode* const next_;
};
inline bool HasCallDescriptorFlag(Instruction* instr,
CallDescriptor::Flag flag) {
STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode == 10);
#ifdef DEBUG
static constexpr int kInstructionCodeFlagsMask =
((1 << CallDescriptor::kFlagsBitsEncodedInInstructionCode) - 1);
DCHECK_EQ(static_cast<int>(flag) & kInstructionCodeFlagsMask, flag);
#endif
return MiscField::decode(instr->opcode()) & flag;
}
} // namespace compiler
} // namespace internal
} // namespace v8
......
......@@ -974,12 +974,12 @@ Label* CodeGenerator::AddJumpTable(Label** targets, size_t target_count) {
void CodeGenerator::RecordCallPosition(Instruction* instr) {
const bool needs_frame_state =
instr->HasCallDescriptorFlag(CallDescriptor::kNeedsFrameState);
HasCallDescriptorFlag(instr, CallDescriptor::kNeedsFrameState);
RecordSafepoint(instr->reference_map(), needs_frame_state
? Safepoint::kLazyDeopt
: Safepoint::kNoLazyDeopt);
if (instr->HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler)) {
if (HasCallDescriptorFlag(instr, CallDescriptor::kHasExceptionHandler)) {
InstructionOperandConverter i(this, instr);
RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler());
......
......@@ -695,10 +695,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
__ call(reg);
......@@ -723,7 +723,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
__ wasm_call(wasm_code, constant.rmode());
} else {
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineCall(wasm_code, constant.rmode());
} else {
__ call(wasm_code, constant.rmode());
......@@ -731,7 +731,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
} else {
Register reg = i.InputRegister(0);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
__ call(reg);
......@@ -753,10 +753,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
......@@ -773,7 +773,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ jmp(wasm_code, constant.rmode());
} else {
Register reg = i.InputRegister(0);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
......@@ -787,9 +787,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
......
This diff is collapsed.
......@@ -2731,7 +2731,6 @@ constexpr InstructionCode EncodeCallDescriptorFlags(
// Note: Not all bits of `flags` are preserved.
STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode ==
MiscField::kSize);
DCHECK(Instruction::IsCallWithDescriptorFlags(opcode));
return opcode | MiscField::encode(flags & MiscField::kMax);
}
......
......@@ -927,23 +927,6 @@ class V8_EXPORT_PRIVATE Instruction final {
return arch_opcode() == ArchOpcode::kArchThrowTerminator;
}
static constexpr bool IsCallWithDescriptorFlags(InstructionCode arch_opcode) {
return arch_opcode <= ArchOpcode::kArchCallBuiltinPointer;
}
bool IsCallWithDescriptorFlags() const {
return IsCallWithDescriptorFlags(arch_opcode());
}
bool HasCallDescriptorFlag(CallDescriptor::Flag flag) const {
DCHECK(IsCallWithDescriptorFlags());
STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode == 10);
#ifdef DEBUG
static constexpr int kInstructionCodeFlagsMask =
((1 << CallDescriptor::kFlagsBitsEncodedInInstructionCode) - 1);
DCHECK_EQ(static_cast<int>(flag) & kInstructionCodeFlagsMask, flag);
#endif
return MiscField::decode(opcode()) & flag;
}
enum GapPosition {
START,
END,
......
......@@ -168,7 +168,8 @@ const InstructionBlock* MidTierRegisterAllocationData::GetBlock(
}
const BitVector* MidTierRegisterAllocationData::GetBlocksDominatedBy(
const InstructionBlock* block) {
int instr_index) {
const InstructionBlock* block = GetBlock(instr_index);
return block_state(block->rpo_number()).dominated_blocks();
}
......@@ -282,12 +283,10 @@ class VirtualRegisterData final {
// Define VirtualRegisterData with the type of output that produces this
// virtual register.
void DefineAsUnallocatedOperand(int virtual_register, int instr_index,
bool is_deferred_block,
bool is_exceptional_call_output);
bool is_deferred_block);
void DefineAsFixedSpillOperand(AllocatedOperand* operand,
int virtual_register, int instr_index,
bool is_deferred_block,
bool is_exceptional_call_output);
bool is_deferred_block);
void DefineAsConstantOperand(ConstantOperand* operand, int instr_index,
bool is_deferred_block);
void DefineAsPhi(int virtual_register, int instr_index,
......@@ -365,9 +364,6 @@ class VirtualRegisterData final {
bool is_defined_in_deferred_block() const {
return is_defined_in_deferred_block_;
}
bool is_exceptional_call_output() const {
return is_exceptional_call_output_;
}
struct DeferredSpillSlotOutput {
public:
......@@ -385,11 +381,9 @@ class VirtualRegisterData final {
class SpillRange : public ZoneObject {
public:
// Defines a spill range for an output operand.
SpillRange(int definition_instr_index,
const InstructionBlock* definition_block,
MidTierRegisterAllocationData* data)
SpillRange(int definition_instr_index, MidTierRegisterAllocationData* data)
: live_range_(definition_instr_index, definition_instr_index),
live_blocks_(data->GetBlocksDominatedBy(definition_block)),
live_blocks_(data->GetBlocksDominatedBy(definition_instr_index)),
deferred_spill_outputs_(nullptr) {}
// Defines a spill range for a Phi variable.
......@@ -397,7 +391,8 @@ class VirtualRegisterData final {
MidTierRegisterAllocationData* data)
: live_range_(phi_block->first_instruction_index(),
phi_block->first_instruction_index()),
live_blocks_(data->GetBlocksDominatedBy(phi_block)),
live_blocks_(
data->GetBlocksDominatedBy(phi_block->first_instruction_index())),
deferred_spill_outputs_(nullptr) {
// For phis, add the gap move instructions in the predecssor blocks to
// the live range.
......@@ -474,8 +469,7 @@ class VirtualRegisterData final {
private:
void Initialize(int virtual_register, InstructionOperand* spill_operand,
int instr_index, bool is_phi, bool is_constant,
bool is_defined_in_deferred_block,
bool is_exceptional_call_output);
bool is_defined_in_deferred_block);
void AddSpillUse(int instr_index, MidTierRegisterAllocationData* data);
void AddPendingSpillOperand(PendingOperand* pending_operand);
......@@ -491,7 +485,6 @@ class VirtualRegisterData final {
bool is_constant_ : 1;
bool is_defined_in_deferred_block_ : 1;
bool needs_spill_at_output_ : 1;
bool is_exceptional_call_output_ : 1;
};
VirtualRegisterData& MidTierRegisterAllocationData::VirtualRegisterDataFor(
......@@ -505,8 +498,7 @@ void VirtualRegisterData::Initialize(int virtual_register,
InstructionOperand* spill_operand,
int instr_index, bool is_phi,
bool is_constant,
bool is_defined_in_deferred_block,
bool is_exceptional_call_output) {
bool is_defined_in_deferred_block) {
vreg_ = virtual_register;
spill_operand_ = spill_operand;
spill_range_ = nullptr;
......@@ -515,34 +507,34 @@ void VirtualRegisterData::Initialize(int virtual_register,
is_constant_ = is_constant;
is_defined_in_deferred_block_ = is_defined_in_deferred_block;
needs_spill_at_output_ = !is_constant_ && spill_operand_ != nullptr;
is_exceptional_call_output_ = is_exceptional_call_output;
}
void VirtualRegisterData::DefineAsConstantOperand(ConstantOperand* operand,
int instr_index,
bool is_deferred_block) {
Initialize(operand->virtual_register(), operand, instr_index, false, true,
is_deferred_block, false);
is_deferred_block);
}
void VirtualRegisterData::DefineAsFixedSpillOperand(
AllocatedOperand* operand, int virtual_register, int instr_index,
bool is_deferred_block, bool is_exceptional_call_output) {
void VirtualRegisterData::DefineAsFixedSpillOperand(AllocatedOperand* operand,
int virtual_register,
int instr_index,
bool is_deferred_block) {
Initialize(virtual_register, operand, instr_index, false, false,
is_deferred_block, is_exceptional_call_output);
is_deferred_block);
}
void VirtualRegisterData::DefineAsUnallocatedOperand(
int virtual_register, int instr_index, bool is_deferred_block,
bool is_exceptional_call_output) {
void VirtualRegisterData::DefineAsUnallocatedOperand(int virtual_register,
int instr_index,
bool is_deferred_block) {
Initialize(virtual_register, nullptr, instr_index, false, false,
is_deferred_block, is_exceptional_call_output);
is_deferred_block);
}
void VirtualRegisterData::DefineAsPhi(int virtual_register, int instr_index,
bool is_deferred_block) {
Initialize(virtual_register, nullptr, instr_index, true, false,
is_deferred_block, false);
is_deferred_block);
}
void VirtualRegisterData::EnsureSpillRange(
......@@ -550,27 +542,16 @@ void VirtualRegisterData::EnsureSpillRange(
DCHECK(!is_constant());
if (HasSpillRange()) return;
const InstructionBlock* definition_block =
data->GetBlock(output_instr_index_);
if (is_phi()) {
// Define a spill slot that is defined for the phi's range.
const InstructionBlock* definition_block =
data->code()->InstructionAt(output_instr_index_)->block();
spill_range_ =
data->allocation_zone()->New<SpillRange>(definition_block, data);
} else {
if (is_exceptional_call_output()) {
// If this virtual register is output by a call which has an exception
// catch handler, then the output will only be live in the IfSuccess
// successor block, not the IfException side, so make the definition block
// the IfSuccess successor block explicitly.
DCHECK_EQ(output_instr_index_,
definition_block->last_instruction_index() - 1);
DCHECK_EQ(definition_block->SuccessorCount(), 2);
DCHECK(data->GetBlock(definition_block->successors()[1])->IsHandler());
definition_block = data->GetBlock(definition_block->successors()[0]);
}
// The spill slot will be defined after the instruction that outputs it.
spill_range_ = data->allocation_zone()->New<SpillRange>(
output_instr_index_ + 1, definition_block, data);
spill_range_ =
data->allocation_zone()->New<SpillRange>(output_instr_index_ + 1, data);
}
data->spilled_virtual_registers().Add(vreg());
}
......@@ -2594,7 +2575,6 @@ void MidTierOutputProcessor::InitializeBlockState(
void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
int block_start = block->first_instruction_index();
bool is_deferred = block->IsDeferred();
for (int index = block->last_instruction_index(); index >= block_start;
index--) {
Instruction* instr = code()->InstructionAt(index);
......@@ -2613,9 +2593,6 @@ void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
UnallocatedOperand* unallocated_operand =
UnallocatedOperand::cast(output);
int virtual_register = unallocated_operand->virtual_register();
bool is_exceptional_call_output =
instr->IsCallWithDescriptorFlags() &&
instr->HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler);
if (unallocated_operand->HasFixedSlotPolicy()) {
// If output has a fixed slot policy, allocate its spill operand now
// so that the register allocator can use this knowledge.
......@@ -2625,12 +2602,10 @@ void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
unallocated_operand->fixed_slot_index());
VirtualRegisterDataFor(virtual_register)
.DefineAsFixedSpillOperand(fixed_spill_operand, virtual_register,
index, is_deferred,
is_exceptional_call_output);
index, is_deferred);
} else {
VirtualRegisterDataFor(virtual_register)
.DefineAsUnallocatedOperand(virtual_register, index, is_deferred,
is_exceptional_call_output);
.DefineAsUnallocatedOperand(virtual_register, index, is_deferred);
}
}
}
......
......@@ -57,8 +57,8 @@ class MidTierRegisterAllocationData final : public RegisterAllocationData {
const InstructionBlock* GetBlock(int instr_index);
// Returns a bitvector representing all the blocks that are dominated by the
// output of the instruction in |block|.
const BitVector* GetBlocksDominatedBy(const InstructionBlock* block);
// output of the instruction at |instr_index|.
const BitVector* GetBlocksDominatedBy(int instr_index);
// List of all instruction indexs that require a reference map.
ZoneVector<int>& reference_map_instructions() {
......
......@@ -711,7 +711,7 @@ void AdjustStackPointerForTailCall(Instruction* instr,
int new_slot_above_sp,
bool allow_shrinkage = true) {
int stack_slot_delta;
if (instr->HasCallDescriptorFlag(CallDescriptor::kIsTailCallForTierUp)) {
if (HasCallDescriptorFlag(instr, CallDescriptor::kIsTailCallForTierUp)) {
// For this special tail-call mode, the callee has the same arguments and
// linkage as the caller, and arguments adapter frames must be preserved.
// Thus we simply have reset the stack pointer register to its original
......@@ -757,7 +757,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
if (!pushes.empty() &&
(LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
first_unused_stack_slot)) {
DCHECK(!instr->HasCallDescriptorFlag(CallDescriptor::kIsTailCallForTierUp));
DCHECK(!HasCallDescriptorFlag(instr, CallDescriptor::kIsTailCallForTierUp));
X64OperandConverter g(this, instr);
for (auto move : pushes) {
LocationOperand destination_location(
......@@ -847,10 +847,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
__ call(reg);
......@@ -875,7 +875,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
__ near_call(wasm_code, constant.rmode());
} else {
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineCall(wasm_code, constant.rmode());
} else {
__ Call(wasm_code, constant.rmode());
......@@ -883,7 +883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
} else {
Register reg = i.InputRegister(0);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
__ call(reg);
......@@ -894,7 +894,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchTailCallCodeObjectFromJSFunction:
if (!instr->HasCallDescriptorFlag(CallDescriptor::kIsTailCallForTierUp)) {
if (!HasCallDescriptorFlag(instr, CallDescriptor::kIsTailCallForTierUp)) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
i.TempRegister(2));
......@@ -907,10 +907,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
......@@ -933,7 +933,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
} else {
Register reg = i.InputRegister(0);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
......@@ -948,9 +948,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --turboprop --max-semi-space-size=1
function runNearStackLimit(f) {
function t() {
try {
return t();
} catch (e) {
return f();
}
}
%PrepareFunctionForOptimization(t);
%OptimizeFunctionOnNextCall(t);
return t();
}
function foo(a) {}
function bar(a, b) {}
for (let i = 0; i < 150; i++) {
runNearStackLimit(() => {
return foo(bar(3, 4) === false);
});
}
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --turboprop --gc-interval=1000
function runNearStackLimit(f) {
function t() {
try {
return t();
} catch (e) {
return f();
}
}
%PrepareFunctionForOptimization(t);
%OptimizeFunctionOnNextCall(t);
return t();
}
function foo() {
runNearStackLimit(() => {});
}
(function () {
var a = 42;
var b = 153;
try {
Object.defineProperty({});
} catch (e) {}
foo();
foo();
})();
runNearStackLimit(() => {});
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment