Commit c652a9eb authored by Anisha Rohra's avatar Anisha Rohra Committed by Commit Bot

PPC/s390: Cleanup of deoptimization structures, removal of lazy deopt patching.

Port aa7bf1cf

Original Commit Message:

  This CL:
  - removes the trampoline pc from deoptimization input
  data and deoptimization state. This is no longer needed given
  that we added this information to the safepoint table in
  https://chromium-review.googlesource.com/c/v8/v8/+/596027).
  This should also fixed the regression mentioned in
  https://bugs.chromium.org/p/chromium/issues/detail?id=752873
  - searches for the  exception handler in the safepoint table.
  - removes the code used for patching which is no longer needed.

R=bjaideep@ca.ibm.com, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=
LOG=N

Change-Id: I27672961ec9b20bbbff7a2e994080065d01c85c0
Reviewed-on: https://chromium-review.googlesource.com/627197Reviewed-by: 's avatarJaideep Bajwa <bjaideep@ca.ibm.com>
Commit-Queue: Jaideep Bajwa <bjaideep@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#47523}
parent 1385f95f
......@@ -943,7 +943,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallCodeObject: {
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
tasm());
EnsureSpaceForLazyDeopt();
if (HasRegisterInput(instr, 0)) {
__ addi(ip, i.InputRegister(0),
Operand(Code::kHeaderSize - kHeapObjectTag));
......@@ -988,7 +987,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallJSFunction: {
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
tasm());
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
......@@ -2554,28 +2552,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
}
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = tasm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
// Block tramoline pool emission for duration of padding.
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
tasm());
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
while (padding_size > 0) {
__ nop();
padding_size -= v8::internal::Assembler::kInstrSize;
}
}
}
#undef __
} // namespace compiler
......
......@@ -1153,7 +1153,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (HasRegisterInput(instr, 0)) {
__ AddP(ip, i.InputRegister(0),
Operand(Code::kHeaderSize - kHeapObjectTag));
......@@ -1194,7 +1193,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchCallJSFunction: {
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
......@@ -2901,24 +2899,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
}
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
}
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = tasm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK_EQ(0, padding_size % 2);
while (padding_size > 0) {
__ nop();
padding_size -= 2;
}
}
}
#undef __
......
......@@ -12,78 +12,6 @@ namespace internal {
const int Deoptimizer::table_entry_size_ = 8;
int Deoptimizer::patch_size() {
#if V8_TARGET_ARCH_PPC64
const int kCallInstructionSizeInWords = 7;
#else
const int kCallInstructionSizeInWords = 4;
#endif
return kCallInstructionSizeInWords * Assembler::kInstrSize;
}
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// Empty because there is no need for relocation information for the code
// patching in Deoptimizer::PatchCodeForDeoptimization below.
}
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
// Fail hard and early if we enter this code object again.
byte* pointer = code->FindCodeAgeSequence();
if (pointer != NULL) {
pointer += kNoCodeAgeSequenceLength;
} else {
pointer = code->instruction_start();
}
CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->bkpt(0);
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
CodePatcher osr_patcher(isolate, code_start_address + osr_offset, 1);
osr_patcher.masm()->bkpt(0);
}
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
// For each LLazyBailout instruction insert a call to the corresponding
// deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
// We need calls to have a predictable size in the unoptimized code, but
// this is optimized code, so we don't have to have a predictable size.
int call_size_in_bytes = MacroAssembler::CallSizeNotPredictableCodeSize(
deopt_entry, kRelocInfo_NONEPTR);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
DCHECK(call_size_in_bytes <= patch_size());
CodePatcher patcher(isolate, call_address, call_size_in_words);
patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR);
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
DCHECK(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
}
}
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
......
......@@ -13,72 +13,6 @@ namespace internal {
// LAY + LGHI/LHI + BRCL
const int Deoptimizer::table_entry_size_ = 16;
int Deoptimizer::patch_size() {
#if V8_TARGET_ARCH_S390X
const int kCallInstructionSize = 16;
#else
const int kCallInstructionSize = 10;
#endif
return kCallInstructionSize;
}
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// Empty because there is no need for relocation information for the code
// patching in Deoptimizer::PatchCodeForDeoptimization below.
}
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
// Fail hard and early if we enter this code object again.
byte* pointer = code->FindCodeAgeSequence();
if (pointer != NULL) {
pointer += kNoCodeAgeSequenceLength;
} else {
pointer = code->instruction_start();
}
CodePatcher patcher(isolate, pointer, 2);
patcher.masm()->bkpt(0);
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
CodePatcher osr_patcher(isolate, code_start_address + osr_offset, 2);
osr_patcher.masm()->bkpt(0);
}
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
// For each LLazyBailout instruction insert a call to the corresponding
// deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
// We need calls to have a predictable size in the unoptimized code, but
// this is optimized code, so we don't have to have a predictable size.
int call_size_in_bytes = MacroAssembler::CallSizeNotPredictableCodeSize(
deopt_entry, kRelocInfo_NONEPTR);
DCHECK(call_size_in_bytes <= patch_size());
CodePatcher patcher(isolate, call_address, call_size_in_bytes);
patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR);
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
DCHECK(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
}
}
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment