Commit 6848a341 authored by Hao Xu's avatar Hao Xu Committed by V8 LUCI CQ

[ptr-compr][x64] Support pointer decompressing by addressing mode,

pt.1

Compiler generates the below instructions for compressed pointer:

  [ DecompressTaggedPointer
  movl r8,[r8+0x13]
  REX.W addq r8,r14
  ]
  addl [r8+0x7],0xe6

This CL optimizes the pointer decompression by using complex
addressing mode in x64:

  movl r8,[r8+0x13]
  addl [r14+r8*1+0x7],0xe6

Bug: v8:13056, v8:7703
Change-Id: I755cdac407bab4ff2e78d4a6a164f13385f7c361
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3765067Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Hao A Xu <hao.a.xu@intel.com>
Cr-Commit-Position: refs/heads/main@{#81967}
parent 9ea588d5
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/baseline/baseline-assembler.h" #include "src/baseline/baseline-assembler.h"
#include "src/codegen/arm/assembler-arm-inl.h" #include "src/codegen/arm/assembler-arm-inl.h"
#include "src/codegen/interface-descriptors.h" #include "src/codegen/interface-descriptors.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -474,6 +475,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ...@@ -474,6 +475,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
if (skip_interrupt_label) __ b(ge, skip_interrupt_label); if (skip_interrupt_label) __ b(ge, skip_interrupt_label);
} }
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
LoadFixedArrayElement(context, context, cell_index);
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ add(lhs, lhs, Operand(rhs)); __ add(lhs, lhs, Operand(rhs));
} }
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/baseline/baseline-assembler.h" #include "src/baseline/baseline-assembler.h"
#include "src/codegen/arm64/macro-assembler-arm64-inl.h" #include "src/codegen/arm64/macro-assembler-arm64-inl.h"
#include "src/codegen/interface-descriptors.h" #include "src/codegen/interface-descriptors.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -544,6 +545,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ...@@ -544,6 +545,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
if (skip_interrupt_label) __ B(ge, skip_interrupt_label); if (skip_interrupt_label) __ B(ge, skip_interrupt_label);
} }
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
LoadFixedArrayElement(context, context, cell_index);
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (SmiValuesAre31Bits()) { if (SmiValuesAre31Bits()) {
__ Add(lhs.W(), lhs.W(), Immediate(rhs)); __ Add(lhs.W(), lhs.W(), Immediate(rhs));
......
...@@ -166,6 +166,26 @@ class BaselineAssembler { ...@@ -166,6 +166,26 @@ class BaselineAssembler {
int32_t index); int32_t index);
inline void LoadPrototype(Register prototype, Register object); inline void LoadPrototype(Register prototype, Register object);
// Loads compressed pointer or loads from compressed pointer. This is because
// X64 supports complex addressing mode, pointer decompression can be done by
// [%compressed_base + %r1 + K].
#if V8_TARGET_ARCH_X64
inline void LoadTaggedPointerField(TaggedRegister output, Register source,
int offset);
inline void LoadTaggedPointerField(TaggedRegister output,
TaggedRegister source, int offset);
inline void LoadTaggedPointerField(Register output, TaggedRegister source,
int offset);
inline void LoadTaggedAnyField(Register output, TaggedRegister source,
int offset);
inline void LoadTaggedAnyField(TaggedRegister output, TaggedRegister source,
int offset);
inline void LoadFixedArrayElement(Register output, TaggedRegister array,
int32_t index);
inline void LoadFixedArrayElement(TaggedRegister output, TaggedRegister array,
int32_t index);
#endif
// Falls through and sets scratch_and_result to 0 on failure, jumps to // Falls through and sets scratch_and_result to 0 on failure, jumps to
// on_result on success. // on_result on success.
inline void TryLoadOptimizedOsrCode(Register scratch_and_result, inline void TryLoadOptimizedOsrCode(Register scratch_and_result,
...@@ -180,6 +200,14 @@ class BaselineAssembler { ...@@ -180,6 +200,14 @@ class BaselineAssembler {
inline void AddToInterruptBudgetAndJumpIfNotExceeded( inline void AddToInterruptBudgetAndJumpIfNotExceeded(
Register weight, Label* skip_interrupt_label); Register weight, Label* skip_interrupt_label);
inline void LdaContextSlot(Register context, uint32_t index, uint32_t depth);
inline void StaContextSlot(Register context, Register value, uint32_t index,
uint32_t depth);
inline void LdaModuleVariable(Register context, int cell_index,
uint32_t depth);
inline void StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth);
inline void AddSmi(Register lhs, Smi rhs); inline void AddSmi(Register lhs, Smi rhs);
inline void SmiUntag(Register value); inline void SmiUntag(Register value);
inline void SmiUntag(Register output, Register value); inline void SmiUntag(Register output, Register value);
......
...@@ -727,12 +727,9 @@ void BaselineCompiler::VisitLdaContextSlot() { ...@@ -727,12 +727,9 @@ void BaselineCompiler::VisitLdaContextSlot() {
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_); BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register context = scratch_scope.AcquireScratch(); Register context = scratch_scope.AcquireScratch();
LoadRegister(context, 0); LoadRegister(context, 0);
int depth = Uint(2); uint32_t index = Index(1);
for (; depth > 0; --depth) { uint32_t depth = Uint(2);
__ LoadTaggedPointerField(context, context, Context::kPreviousOffset); __ LdaContextSlot(context, index, depth);
}
__ LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(Index(1)));
} }
void BaselineCompiler::VisitLdaImmutableContextSlot() { VisitLdaContextSlot(); } void BaselineCompiler::VisitLdaImmutableContextSlot() { VisitLdaContextSlot(); }
...@@ -755,13 +752,9 @@ void BaselineCompiler::VisitStaContextSlot() { ...@@ -755,13 +752,9 @@ void BaselineCompiler::VisitStaContextSlot() {
DCHECK(!AreAliased(value, context, kInterpreterAccumulatorRegister)); DCHECK(!AreAliased(value, context, kInterpreterAccumulatorRegister));
__ Move(value, kInterpreterAccumulatorRegister); __ Move(value, kInterpreterAccumulatorRegister);
LoadRegister(context, 0); LoadRegister(context, 0);
int depth = Uint(2); uint32_t index = Index(1);
for (; depth > 0; --depth) { uint32_t depth = Uint(2);
__ LoadTaggedPointerField(context, context, Context::kPreviousOffset); __ StaContextSlot(context, value, index, depth);
}
__ StoreTaggedFieldWithWriteBarrier(
context, Context::OffsetOfElementAt(iterator().GetIndexOperand(1)),
value);
} }
void BaselineCompiler::VisitStaCurrentContextSlot() { void BaselineCompiler::VisitStaCurrentContextSlot() {
...@@ -871,26 +864,9 @@ void BaselineCompiler::VisitLdaModuleVariable() { ...@@ -871,26 +864,9 @@ void BaselineCompiler::VisitLdaModuleVariable() {
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_); BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register scratch = scratch_scope.AcquireScratch(); Register scratch = scratch_scope.AcquireScratch();
__ LoadContext(scratch); __ LoadContext(scratch);
int depth = Uint(1);
for (; depth > 0; --depth) {
__ LoadTaggedPointerField(scratch, scratch, Context::kPreviousOffset);
}
__ LoadTaggedPointerField(scratch, scratch, Context::kExtensionOffset);
int cell_index = Int(0); int cell_index = Int(0);
if (cell_index > 0) { int depth = Uint(1);
__ LoadTaggedPointerField(scratch, scratch, __ LdaModuleVariable(scratch, cell_index, depth);
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
__ LoadTaggedPointerField(scratch, scratch,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
__ LoadFixedArrayElement(scratch, scratch, cell_index);
__ LoadTaggedAnyField(kInterpreterAccumulatorRegister, scratch,
Cell::kValueOffset);
} }
void BaselineCompiler::VisitStaModuleVariable() { void BaselineCompiler::VisitStaModuleVariable() {
...@@ -908,17 +884,7 @@ void BaselineCompiler::VisitStaModuleVariable() { ...@@ -908,17 +884,7 @@ void BaselineCompiler::VisitStaModuleVariable() {
__ Move(value, kInterpreterAccumulatorRegister); __ Move(value, kInterpreterAccumulatorRegister);
__ LoadContext(scratch); __ LoadContext(scratch);
int depth = Uint(1); int depth = Uint(1);
for (; depth > 0; --depth) { __ StaModuleVariable(scratch, value, cell_index, depth);
__ LoadTaggedPointerField(scratch, scratch, Context::kPreviousOffset);
}
__ LoadTaggedPointerField(scratch, scratch, Context::kExtensionOffset);
__ LoadTaggedPointerField(scratch, scratch,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
__ LoadFixedArrayElement(scratch, scratch, cell_index);
__ StoreTaggedFieldWithWriteBarrier(scratch, Cell::kValueOffset, value);
} }
void BaselineCompiler::VisitSetNamedProperty() { void BaselineCompiler::VisitSetNamedProperty() {
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include "src/codegen/ia32/register-ia32.h" #include "src/codegen/ia32/register-ia32.h"
#include "src/codegen/interface-descriptors.h" #include "src/codegen/interface-descriptors.h"
#include "src/objects/feedback-vector.h" #include "src/objects/feedback-vector.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -434,6 +435,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ...@@ -434,6 +435,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label); if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
} }
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
LoadFixedArrayElement(context, context, cell_index);
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (rhs.value() == 0) return; if (rhs.value() == 0) return;
__ add(lhs, Immediate(rhs)); __ add(lhs, Immediate(rhs));
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/baseline/baseline-assembler.h" #include "src/baseline/baseline-assembler.h"
#include "src/codegen/interface-descriptors.h" #include "src/codegen/interface-descriptors.h"
#include "src/codegen/loong64/assembler-loong64-inl.h" #include "src/codegen/loong64/assembler-loong64-inl.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -442,6 +443,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ...@@ -442,6 +443,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg)); __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
} }
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
LoadFixedArrayElement(context, context, cell_index);
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ Add_d(lhs, lhs, Operand(rhs)); __ Add_d(lhs, lhs, Operand(rhs));
} }
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/baseline/baseline-assembler.h" #include "src/baseline/baseline-assembler.h"
#include "src/codegen/interface-descriptors.h" #include "src/codegen/interface-descriptors.h"
#include "src/codegen/mips/assembler-mips-inl.h" #include "src/codegen/mips/assembler-mips-inl.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -454,6 +455,24 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ...@@ -454,6 +455,24 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg)); __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
} }
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ Addu(lhs, lhs, Operand(rhs)); __ Addu(lhs, lhs, Operand(rhs));
} }
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/baseline/baseline-assembler.h" #include "src/baseline/baseline-assembler.h"
#include "src/codegen/interface-descriptors.h" #include "src/codegen/interface-descriptors.h"
#include "src/codegen/mips64/assembler-mips64-inl.h" #include "src/codegen/mips64/assembler-mips64-inl.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -452,6 +453,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ...@@ -452,6 +453,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg)); __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
} }
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
LoadFixedArrayElement(context, context, cell_index);
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ Daddu(lhs, lhs, Operand(rhs)); __ Daddu(lhs, lhs, Operand(rhs));
} }
......
...@@ -6,8 +6,9 @@ ...@@ -6,8 +6,9 @@
#define V8_BASELINE_PPC_BASELINE_ASSEMBLER_PPC_INL_H_ #define V8_BASELINE_PPC_BASELINE_ASSEMBLER_PPC_INL_H_
#include "src/baseline/baseline-assembler.h" #include "src/baseline/baseline-assembler.h"
#include "src/codegen/ppc/assembler-ppc-inl.h"
#include "src/codegen/interface-descriptors.h" #include "src/codegen/interface-descriptors.h"
#include "src/codegen/ppc/assembler-ppc-inl.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -614,6 +615,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ...@@ -614,6 +615,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
if (skip_interrupt_label) __ bge(skip_interrupt_label, cr0); if (skip_interrupt_label) __ bge(skip_interrupt_label, cr0);
} }
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
LoadFixedArrayElement(context, context, cell_index);
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (rhs.value() == 0) return; if (rhs.value() == 0) return;
__ LoadSmiLiteral(r0, rhs); __ LoadSmiLiteral(r0, rhs);
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/baseline/baseline-assembler.h" #include "src/baseline/baseline-assembler.h"
#include "src/codegen/assembler-inl.h" #include "src/codegen/assembler-inl.h"
#include "src/codegen/interface-descriptors.h" #include "src/codegen/interface-descriptors.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
namespace baseline { namespace baseline {
...@@ -449,6 +450,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ...@@ -449,6 +450,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg)); __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
} }
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
LoadFixedArrayElement(context, context, cell_index);
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
ASM_CODE_COMMENT(masm_); ASM_CODE_COMMENT(masm_);
if (SmiValuesAre31Bits()) { if (SmiValuesAre31Bits()) {
......
...@@ -6,8 +6,9 @@ ...@@ -6,8 +6,9 @@
#define V8_BASELINE_S390_BASELINE_ASSEMBLER_S390_INL_H_ #define V8_BASELINE_S390_BASELINE_ASSEMBLER_S390_INL_H_
#include "src/baseline/baseline-assembler.h" #include "src/baseline/baseline-assembler.h"
#include "src/codegen/s390/assembler-s390-inl.h"
#include "src/codegen/interface-descriptors.h" #include "src/codegen/interface-descriptors.h"
#include "src/codegen/s390/assembler-s390-inl.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -612,6 +613,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ...@@ -612,6 +613,61 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
if (skip_interrupt_label) __ b(ge, skip_interrupt_label); if (skip_interrupt_label) __ b(ge, skip_interrupt_label);
} }
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
if (cell_index > 0) {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(context, context, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
for (; depth > 0; --depth) {
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
}
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
LoadTaggedPointerField(context, context,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
LoadFixedArrayElement(context, context, cell_index);
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (rhs.value() == 0) return; if (rhs.value() == 0) return;
__ LoadSmiLiteral(r0, rhs); __ LoadSmiLiteral(r0, rhs);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include "src/baseline/baseline-assembler.h" #include "src/baseline/baseline-assembler.h"
#include "src/codegen/x64/register-x64.h" #include "src/codegen/x64/register-x64.h"
#include "src/objects/feedback-vector.h" #include "src/objects/feedback-vector.h"
#include "src/objects/literal-objects-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -373,6 +374,47 @@ void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, ...@@ -373,6 +374,47 @@ void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
__ StoreTaggedField(FieldOperand(target, offset), value); __ StoreTaggedField(FieldOperand(target, offset), value);
} }
void BaselineAssembler::LoadTaggedPointerField(TaggedRegister output,
Register source, int offset) {
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedPointerField(TaggedRegister output,
TaggedRegister source,
int offset) {
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedPointerField(Register output,
TaggedRegister source,
int offset) {
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedAnyField(Register output,
TaggedRegister source, int offset) {
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedAnyField(TaggedRegister output,
TaggedRegister source, int offset) {
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadFixedArrayElement(Register output,
TaggedRegister array,
int32_t index) {
LoadTaggedAnyField(output, array,
FixedArray::kHeaderSize + index * kTaggedSize);
}
void BaselineAssembler::LoadFixedArrayElement(TaggedRegister output,
TaggedRegister array,
int32_t index) {
LoadTaggedAnyField(output, array,
FixedArray::kHeaderSize + index * kTaggedSize);
}
void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Register feedback_vector, Register feedback_vector,
FeedbackSlot slot, FeedbackSlot slot,
...@@ -404,9 +446,11 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ...@@ -404,9 +446,11 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this); ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch(); Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell); LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell, // Decompresses pointer by complex addressing mode when necessary.
TaggedRegister tagged(feedback_cell);
LoadTaggedPointerField(tagged, feedback_cell,
JSFunction::kFeedbackCellOffset); JSFunction::kFeedbackCellOffset);
__ addl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset), __ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset),
Immediate(weight)); Immediate(weight));
if (skip_interrupt_label) { if (skip_interrupt_label) {
DCHECK_LT(weight, 0); DCHECK_LT(weight, 0);
...@@ -420,13 +464,114 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ...@@ -420,13 +464,114 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
ScratchRegisterScope scratch_scope(this); ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch(); Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell); LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell, // Decompresses pointer by complex addressing mode when necessary.
TaggedRegister tagged(feedback_cell);
LoadTaggedPointerField(tagged, feedback_cell,
JSFunction::kFeedbackCellOffset); JSFunction::kFeedbackCellOffset);
__ addl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset), __ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset), weight);
weight);
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label); if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
} }
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
uint32_t depth) {
// [context] is coming from interpreter frame so it is already decompressed
// when pointer compression is enabled. In order to make use of complex
// addressing mode, any intermediate context pointer is loaded in compressed
// form.
if (depth == 0) {
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
Context::OffsetOfElementAt(index));
} else {
TaggedRegister tagged(context);
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
--depth;
for (; depth > 0; --depth) {
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
}
LoadTaggedAnyField(kInterpreterAccumulatorRegister, tagged,
Context::OffsetOfElementAt(index));
}
}
void BaselineAssembler::StaContextSlot(Register context, Register value,
uint32_t index, uint32_t depth) {
// [context] is coming from interpreter frame so it is already decompressed
// when pointer compression is enabled. In order to make use of complex
// addressing mode, any intermediate context pointer is loaded in compressed
// form.
if (depth > 0) {
TaggedRegister tagged(context);
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
--depth;
for (; depth > 0; --depth) {
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
}
if (COMPRESS_POINTERS_BOOL) {
// Decompress tagged pointer.
__ addq(tagged.reg(), kPtrComprCageBaseRegister);
}
}
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
value);
}
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
uint32_t depth) {
// [context] is coming from interpreter frame so it is already decompressed.
// In order to make use of complex addressing mode when pointer compression is
// enabled, any intermediate context pointer is loaded in compressed form.
TaggedRegister tagged(context);
if (depth == 0) {
LoadTaggedPointerField(tagged, context, Context::kExtensionOffset);
} else {
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
--depth;
for (; depth > 0; --depth) {
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
}
LoadTaggedPointerField(tagged, tagged, Context::kExtensionOffset);
}
if (cell_index > 0) {
LoadTaggedPointerField(tagged, tagged,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
} else {
LoadTaggedPointerField(tagged, tagged,
SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
cell_index = -cell_index - 1;
}
LoadFixedArrayElement(tagged, tagged, cell_index);
LoadTaggedAnyField(kInterpreterAccumulatorRegister, tagged,
Cell::kValueOffset);
}
void BaselineAssembler::StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth) {
// [context] is coming from interpreter frame so it is already decompressed.
// In order to make use of complex addressing mode when pointer compression is
// enabled, any intermediate context pointer is loaded in compressed form.
TaggedRegister tagged(context);
if (depth == 0) {
LoadTaggedPointerField(tagged, context, Context::kExtensionOffset);
} else {
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
--depth;
for (; depth > 0; --depth) {
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
}
LoadTaggedPointerField(tagged, tagged, Context::kExtensionOffset);
}
LoadTaggedPointerField(tagged, tagged,
SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
cell_index -= 1;
LoadFixedArrayElement(context, tagged, cell_index);
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (rhs.value() == 0) return; if (rhs.value() == 0) return;
if (SmiValuesAre31Bits()) { if (SmiValuesAre31Bits()) {
......
...@@ -203,9 +203,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { ...@@ -203,9 +203,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[4*kSystemPointerSize]: context // -- sp[4*kSystemPointerSize]: context
// ----------------------------------- // -----------------------------------
const TaggedRegister shared_function_info(rbx);
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); shared_function_info,
__ movl(rbx, FieldOperand(rbx, SharedFunctionInfo::kFlagsOffset)); FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movl(rbx,
FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(rbx); __ DecodeField<SharedFunctionInfo::FunctionKindBits>(rbx);
__ JumpIfIsInRange( __ JumpIfIsInRange(
rbx, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor), rbx, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
...@@ -1171,12 +1174,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1171,12 +1174,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Get the bytecode array from the function object and load it into // Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister. // kInterpreterBytecodeArrayRegister.
const TaggedRegister shared_function_info(kScratchRegister);
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
kScratchRegister, shared_function_info,
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
kInterpreterBytecodeArrayRegister, kInterpreterBytecodeArrayRegister,
FieldOperand(kScratchRegister, SharedFunctionInfo::kFunctionDataOffset)); FieldOperand(shared_function_info,
SharedFunctionInfo::kFunctionDataOffset));
Label is_baseline; Label is_baseline;
GetSharedFunctionInfoBytecodeOrBaseline( GetSharedFunctionInfoBytecodeOrBaseline(
...@@ -1190,10 +1195,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1190,10 +1195,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ j(not_equal, &compile_lazy); __ j(not_equal, &compile_lazy);
// Load the feedback vector from the closure. // Load the feedback vector from the closure.
TaggedRegister feedback_cell(feedback_vector);
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset)); feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector, __ LoadTaggedPointerField(feedback_vector,
FieldOperand(feedback_vector, Cell::kValueOffset)); FieldOperand(feedback_cell, Cell::kValueOffset));
Label push_stack_frame; Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code // Check if feedback vector is valid. If valid, check for optimized code
...@@ -1356,11 +1362,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1356,11 +1362,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&is_baseline); __ bind(&is_baseline);
{ {
// Load the feedback vector from the closure. // Load the feedback vector from the closure.
TaggedRegister feedback_cell(feedback_vector);
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
feedback_vector, feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
FieldOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadTaggedPointerField(feedback_vector,
__ LoadTaggedPointerField( FieldOperand(feedback_cell, Cell::kValueOffset));
feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code; Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to // Check if feedback vector is valid. If not, call prepare for baseline to
...@@ -1554,10 +1560,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { ...@@ -1554,10 +1560,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// get the custom trampoline, otherwise grab the entry address of the global // get the custom trampoline, otherwise grab the entry address of the global
// trampoline. // trampoline.
__ movq(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset)); __ movq(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset));
const TaggedRegister shared_function_info(rbx);
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset)); shared_function_info,
FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
rbx, FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset)); rbx, FieldOperand(shared_function_info,
SharedFunctionInfo::kFunctionDataOffset));
__ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister); __ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister);
__ j(not_equal, &builtin_trampoline, Label::kNear); __ j(not_equal, &builtin_trampoline, Label::kNear);
...@@ -1688,10 +1697,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ...@@ -1688,10 +1697,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
Register closure = descriptor.GetRegisterParameter( Register closure = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kClosure); BaselineOutOfLinePrologueDescriptor::kClosure);
// Load the feedback vector from the closure. // Load the feedback vector from the closure.
TaggedRegister feedback_cell(feedback_vector);
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset)); feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector, __ LoadTaggedPointerField(feedback_vector,
FieldOperand(feedback_vector, Cell::kValueOffset)); FieldOperand(feedback_cell, Cell::kValueOffset));
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister); __ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kExpectedFeedbackVector); __ Assert(equal, AbortReason::kExpectedFeedbackVector);
...@@ -2607,9 +2617,11 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { ...@@ -2607,9 +2617,11 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
__ LoadRoot(rbx, RootIndex::kUndefinedValue); __ LoadRoot(rbx, RootIndex::kUndefinedValue);
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric. // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
const TaggedRegister shared_function_info(rcx);
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); shared_function_info,
__ testl(FieldOperand(rcx, SharedFunctionInfo::kFlagsOffset), FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask)); Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
__ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub), __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
RelocInfo::CODE_TARGET, not_zero); RelocInfo::CODE_TARGET, not_zero);
...@@ -2777,12 +2789,15 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, ...@@ -2777,12 +2789,15 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
} }
// Load deoptimization data from the code object. // Load deoptimization data from the code object.
const TaggedRegister deopt_data(rbx);
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
rbx, FieldOperand(rax, Code::kDeoptimizationDataOrInterpreterDataOffset)); deopt_data,
FieldOperand(rax, Code::kDeoptimizationDataOrInterpreterDataOffset));
// Load the OSR entrypoint offset from the deoptimization data. // Load the OSR entrypoint offset from the deoptimization data.
__ SmiUntagField( __ SmiUntagField(
rbx, FieldOperand(rbx, FixedArray::OffsetOfElementAt( rbx,
FieldOperand(deopt_data, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex))); DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset // Compute the target address = code_obj + header_size + osr_offset
...@@ -5114,11 +5129,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -5114,11 +5129,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Get the Code object from the shared function info. // Get the Code object from the shared function info.
Register code_obj = rbx; Register code_obj = rbx;
TaggedRegister shared_function_info(code_obj);
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
code_obj, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); shared_function_info,
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
code_obj, code_obj, FieldOperand(shared_function_info,
FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); SharedFunctionInfo::kFunctionDataOffset));
// Check if we have baseline code. For OSR entry it is safe to assume we // Check if we have baseline code. For OSR entry it is safe to assume we
// always have baseline code. // always have baseline code.
...@@ -5150,10 +5167,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -5150,10 +5167,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Load the feedback vector. // Load the feedback vector.
Register feedback_vector = r11; Register feedback_vector = r11;
TaggedRegister feedback_cell(feedback_vector);
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset)); feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector, __ LoadTaggedPointerField(feedback_vector,
FieldOperand(feedback_vector, Cell::kValueOffset)); FieldOperand(feedback_cell, Cell::kValueOffset));
Label install_baseline_code; Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to // Check if feedback vector is valid. If not, call prepare for baseline to
......
...@@ -214,6 +214,15 @@ void TurboAssembler::LoadTaggedPointerField(Register destination, ...@@ -214,6 +214,15 @@ void TurboAssembler::LoadTaggedPointerField(Register destination,
} }
} }
void TurboAssembler::LoadTaggedPointerField(TaggedRegister destination,
Operand field_operand) {
if (COMPRESS_POINTERS_BOOL) {
movl(destination.reg(), field_operand);
} else {
mov_tagged(destination.reg(), field_operand);
}
}
#ifdef V8_MAP_PACKING #ifdef V8_MAP_PACKING
void TurboAssembler::UnpackMapWord(Register r) { void TurboAssembler::UnpackMapWord(Register r) {
// Clear the top two bytes (which may include metadata). Must be in sync with // Clear the top two bytes (which may include metadata). Must be in sync with
...@@ -242,6 +251,15 @@ void TurboAssembler::LoadAnyTaggedField(Register destination, ...@@ -242,6 +251,15 @@ void TurboAssembler::LoadAnyTaggedField(Register destination,
} }
} }
void TurboAssembler::LoadAnyTaggedField(TaggedRegister destination,
Operand field_operand) {
if (COMPRESS_POINTERS_BOOL) {
movl(destination.reg(), field_operand);
} else {
mov_tagged(destination.reg(), field_operand);
}
}
void TurboAssembler::PushTaggedPointerField(Operand field_operand, void TurboAssembler::PushTaggedPointerField(Operand field_operand,
Register scratch) { Register scratch) {
if (COMPRESS_POINTERS_BOOL) { if (COMPRESS_POINTERS_BOOL) {
......
...@@ -572,6 +572,11 @@ class V8_EXPORT_PRIVATE TurboAssembler ...@@ -572,6 +572,11 @@ class V8_EXPORT_PRIVATE TurboAssembler
// compression is enabled. // compression is enabled.
void LoadTaggedPointerField(Register destination, Operand field_operand); void LoadTaggedPointerField(Register destination, Operand field_operand);
// Loads a field containing a HeapObject but does not decompress it when
// pointer compression is enabled.
void LoadTaggedPointerField(TaggedRegister destination,
Operand field_operand);
// Loads a field containing a Smi and decompresses it if pointer compression // Loads a field containing a Smi and decompresses it if pointer compression
// is enabled. // is enabled.
void LoadTaggedSignedField(Register destination, Operand field_operand); void LoadTaggedSignedField(Register destination, Operand field_operand);
...@@ -579,6 +584,10 @@ class V8_EXPORT_PRIVATE TurboAssembler ...@@ -579,6 +584,10 @@ class V8_EXPORT_PRIVATE TurboAssembler
// Loads a field containing any tagged value and decompresses it if necessary. // Loads a field containing any tagged value and decompresses it if necessary.
void LoadAnyTaggedField(Register destination, Operand field_operand); void LoadAnyTaggedField(Register destination, Operand field_operand);
// Loads a field containing any tagged value but does not decompress it when
// pointer compression is enabled.
void LoadAnyTaggedField(TaggedRegister destination, Operand field_operand);
// Loads a field containing a HeapObject, decompresses it if necessary and // Loads a field containing a HeapObject, decompresses it if necessary and
// pushes full pointer to the stack. When pointer compression is enabled, // pushes full pointer to the stack. When pointer compression is enabled,
// uses |scratch| to decompress the value. // uses |scratch| to decompress the value.
...@@ -936,6 +945,17 @@ inline Operand FieldOperand(Register object, int offset) { ...@@ -936,6 +945,17 @@ inline Operand FieldOperand(Register object, int offset) {
return Operand(object, offset - kHeapObjectTag); return Operand(object, offset - kHeapObjectTag);
} }
// Generate an Operand for loading a field from an object. Object pointer is a
// compressed pointer when pointer compression is enabled.
inline Operand FieldOperand(TaggedRegister object, int offset) {
if (COMPRESS_POINTERS_BOOL) {
return Operand(kPtrComprCageBaseRegister, object.reg(),
ScaleFactor::times_1, offset - kHeapObjectTag);
} else {
return Operand(object.reg(), offset - kHeapObjectTag);
}
}
// Generate an Operand for loading an indexed field from an object. // Generate an Operand for loading an indexed field from an object.
inline Operand FieldOperand(Register object, Register index, ScaleFactor scale, inline Operand FieldOperand(Register object, Register index, ScaleFactor scale,
int offset) { int offset) {
......
...@@ -73,6 +73,17 @@ class Register : public RegisterBase<Register, kRegAfterLast> { ...@@ -73,6 +73,17 @@ class Register : public RegisterBase<Register, kRegAfterLast> {
explicit constexpr Register(int code) : RegisterBase(code) {} explicit constexpr Register(int code) : RegisterBase(code) {}
}; };
// Register that store tagged value. Tagged value is in compressed form when
// pointer compression is enabled.
class TaggedRegister {
public:
explicit TaggedRegister(Register reg) : reg_(reg) {}
Register reg() { return reg_; }
private:
Register reg_;
};
ASSERT_TRIVIALLY_COPYABLE(Register); ASSERT_TRIVIALLY_COPYABLE(Register);
static_assert(sizeof(Register) <= sizeof(int), static_assert(sizeof(Register) <= sizeof(int),
"Register can efficiently be passed by value"); "Register can efficiently be passed by value");
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment