Commit 302f187b authored by Jakob Gruber's avatar Jakob Gruber Committed by Commit Bot

[nojit] Remaining cleanups related to CodeStub removal

This:

- documents removal of Code's stub key field.
- removes SerializedCodeData's CodeStubKeys field.
- removes masm's custom self-reference marker mechanism.

Bug: v8:7777
Change-Id: Ie5c51bc895e508acdeb3994cf5558a2cf4c21540
Reviewed-on: https://chromium-review.googlesource.com/c/1367744
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarYang Guo <yangguo@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58145}
parent fdcaa3d4
......@@ -38,17 +38,7 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate,
const AssemblerOptions& options, void* buffer,
int size, CodeObjectRequired create_code_object)
: TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
// marker in order to disambiguate between self-references during nested
// code generation (e.g.: codegen of the current object triggers stub
// compilation through CodeStub::GetCode()).
code_object_ = Handle<HeapObject>::New(
*isolate->factory()->NewSelfReferenceMarker(), isolate);
}
}
: TurboAssembler(isolate, options, buffer, size, create_code_object) {}
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
......
......@@ -34,17 +34,7 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate,
const AssemblerOptions& options, void* buffer,
int size, CodeObjectRequired create_code_object)
: TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
// marker in order to disambiguate between self-references during nested
// code generation (e.g.: codegen of the current object triggers stub
// compilation through CodeStub::GetCode()).
code_object_ = Handle<HeapObject>::New(
*isolate->factory()->NewSelfReferenceMarker(), isolate);
}
}
: TurboAssembler(isolate, options, buffer, size, create_code_object) {}
CPURegList TurboAssembler::DefaultTmpList() { return CPURegList(ip0, ip1); }
......
......@@ -69,11 +69,6 @@ void BuiltinsConstantsTableBuilder::PatchSelfReference(
DCHECK(self_reference->IsOddball());
DCHECK(Oddball::cast(*self_reference)->kind() ==
Oddball::kSelfReferenceMarker);
// During indirection generation, we always create a distinct marker for each
// macro assembler. The canonical marker is only used when not generating a
// snapshot.
DCHECK(*self_reference != ReadOnlyRoots(isolate_).self_reference_marker());
#endif
uint32_t key;
......
......@@ -36,17 +36,7 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate,
const AssemblerOptions& options, void* buffer,
int size, CodeObjectRequired create_code_object)
: TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
// marker in order to disambiguate between self-references during nested
// code generation (e.g.: codegen of the current object triggers stub
// compilation through CodeStub::GetCode()).
code_object_ = Handle<HeapObject>::New(
*isolate->factory()->NewSelfReferenceMarker(), isolate);
}
}
: TurboAssembler(isolate, options, buffer, size, create_code_object) {}
void TurboAssembler::InitializeRootRegister() {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
......
......@@ -35,17 +35,7 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate,
const AssemblerOptions& options, void* buffer,
int size, CodeObjectRequired create_code_object)
: TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
// marker in order to disambiguate between self-references during nested
// code generation (e.g.: codegen of the current object triggers stub
// compilation through CodeStub::GetCode()).
code_object_ = Handle<HeapObject>::New(
*isolate->factory()->NewSelfReferenceMarker(), isolate);
}
}
: TurboAssembler(isolate, options, buffer, size, create_code_object) {}
static inline bool IsZero(const Operand& rt) {
if (rt.is_reg()) {
......
......@@ -35,17 +35,7 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate,
const AssemblerOptions& options, void* buffer,
int size, CodeObjectRequired create_code_object)
: TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
// marker in order to disambiguate between self-references during nested
// code generation (e.g.: codegen of the current object triggers stub
// compilation through CodeStub::GetCode()).
code_object_ = Handle<HeapObject>::New(
*isolate->factory()->NewSelfReferenceMarker(), isolate);
}
}
: TurboAssembler(isolate, options, buffer, size, create_code_object) {}
static inline bool IsZero(const Operand& rt) {
if (rt.is_reg()) {
......
......@@ -535,7 +535,6 @@ void Code::set_deopt_already_counted(bool flag) {
code_data_container()->set_kind_specific_flags(updated);
}
bool Code::is_stub() const { return kind() == STUB; }
bool Code::is_optimized_code() const { return kind() == OPTIMIZED_FUNCTION; }
bool Code::is_wasm_code() const { return kind() == WASM_FUNCTION; }
......
......@@ -109,7 +109,6 @@ class Code : public HeapObjectPtr {
// [kind]: Access to specific code kind.
inline Kind kind() const;
inline bool is_stub() const;
inline bool is_optimized_code() const;
inline bool is_wasm_code() const;
......@@ -376,6 +375,36 @@ class Code : public HeapObjectPtr {
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, CODE_FIELDS)
#undef CODE_FIELDS
// This documents the amount of free space we have in each Code object header
// due to padding for code alignment.
#if V8_TARGET_ARCH_ARM64
static constexpr int kHeaderPaddingSize = 4;
STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
#elif V8_TARGET_ARCH_MIPS64
static constexpr int kHeaderPaddingSize = 4;
STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
#elif V8_TARGET_ARCH_X64
static constexpr int kHeaderPaddingSize = 4;
STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
#elif V8_TARGET_ARCH_ARM
static constexpr int kHeaderPaddingSize = 24;
STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
#elif V8_TARGET_ARCH_IA32
static constexpr int kHeaderPaddingSize = 24;
STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
#elif V8_TARGET_ARCH_MIPS
static constexpr int kHeaderPaddingSize = 24;
STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
#elif V8_TARGET_ARCH_PPC
// No static assert possible since padding size depends on the
// FLAG_enable_embedded_constant_pool runtime flag.
#elif V8_TARGET_ARCH_S390
static constexpr int kHeaderPaddingSize = 24;
STATIC_ASSERT(kHeaderSize - kHeaderPaddingStart == kHeaderPaddingSize);
#else
#error Unknown architecture.
#endif
inline int GetUnwindingInfoSizeOffset() const;
class BodyDescriptor;
......
......@@ -36,17 +36,7 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate,
const AssemblerOptions& options, void* buffer,
int size, CodeObjectRequired create_code_object)
: TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
// marker in order to disambiguate between self-references during nested
// code generation (e.g.: codegen of the current object triggers stub
// compilation through CodeStub::GetCode()).
code_object_ = Handle<HeapObject>::New(
*isolate->factory()->NewSelfReferenceMarker(), isolate);
}
}
: TurboAssembler(isolate, options, buffer, size, create_code_object) {}
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
......
......@@ -37,17 +37,7 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate,
const AssemblerOptions& options, void* buffer,
int size, CodeObjectRequired create_code_object)
: TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
// marker in order to disambiguate between self-references during nested
// code generation (e.g.: codegen of the current object triggers stub
// compilation through CodeStub::GetCode()).
code_object_ = Handle<HeapObject>::New(
*isolate->factory()->NewSelfReferenceMarker(), isolate);
}
}
: TurboAssembler(isolate, options, buffer, size, create_code_object) {}
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
......
......@@ -334,7 +334,6 @@ SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
SetHeaderValue(kFlagHashOffset, FlagList::Hash());
SetHeaderValue(kNumReservationsOffset,
static_cast<uint32_t>(reservations.size()));
SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
SetHeaderValue(kPayloadLengthOffset, static_cast<uint32_t>(payload->size()));
// Zero out any padding in the header.
......@@ -375,8 +374,7 @@ SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
uint32_t max_payload_length =
this->size_ -
POINTER_SIZE_ALIGN(kHeaderSize +
GetHeaderValue(kNumReservationsOffset) * kInt32Size +
GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size);
GetHeaderValue(kNumReservationsOffset) * kInt32Size);
if (payload_length > max_payload_length) return LENGTH_MISMATCH;
if (!Checksum(ChecksummedContent()).Check(c1, c2)) return CHECKSUM_MISMATCH;
return CHECK_SUCCESS;
......@@ -414,8 +412,7 @@ std::vector<SerializedData::Reservation> SerializedCodeData::Reservations()
Vector<const byte> SerializedCodeData::Payload() const {
int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
int payload_offset = kHeaderSize + reservations_size + code_stubs_size;
int payload_offset = kHeaderSize + reservations_size;
int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
const byte* payload = data_ + padded_payload_offset;
DCHECK(IsAligned(reinterpret_cast<intptr_t>(payload), kPointerAlignment));
......@@ -424,14 +421,6 @@ Vector<const byte> SerializedCodeData::Payload() const {
return Vector<const byte>(payload, length);
}
Vector<const uint32_t> SerializedCodeData::CodeStubKeys() const {
// TODO(jgruber): Remove.
int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
const byte* start = data_ + kHeaderSize + reservations_size;
return Vector<const uint32_t>(reinterpret_cast<const uint32_t*>(start),
GetHeaderValue(kNumCodeStubKeysOffset));
}
SerializedCodeData::SerializedCodeData(ScriptData* data)
: SerializedData(const_cast<byte*>(data->data()), data->length()) {}
......
......@@ -101,10 +101,9 @@ class SerializedCodeData : public SerializedData {
// [3] cpu features
// [4] flag hash
// [5] number of reservation size entries
// [6] number of code stub keys
// [7] payload length
// [8] payload checksum part A
// [9] payload checksum part B
// [6] payload length
// [7] payload checksum part A
// [8] payload checksum part B
// ... reservations
// ... code stub keys
// ... serialized payload
......@@ -113,10 +112,8 @@ class SerializedCodeData : public SerializedData {
static const uint32_t kCpuFeaturesOffset = kSourceHashOffset + kUInt32Size;
static const uint32_t kFlagHashOffset = kCpuFeaturesOffset + kUInt32Size;
static const uint32_t kNumReservationsOffset = kFlagHashOffset + kUInt32Size;
static const uint32_t kNumCodeStubKeysOffset =
kNumReservationsOffset + kUInt32Size;
static const uint32_t kPayloadLengthOffset =
kNumCodeStubKeysOffset + kUInt32Size;
kNumReservationsOffset + kUInt32Size;
static const uint32_t kChecksumPartAOffset =
kPayloadLengthOffset + kUInt32Size;
static const uint32_t kChecksumPartBOffset =
......@@ -141,8 +138,6 @@ class SerializedCodeData : public SerializedData {
std::vector<Reservation> Reservations() const;
Vector<const byte> Payload() const;
Vector<const uint32_t> CodeStubKeys() const;
static uint32_t SourceHash(Handle<String> source,
ScriptOriginOptions origin_options);
......
......@@ -72,19 +72,7 @@ StackArgumentsAccessor::StackArgumentsAccessor(
MacroAssembler::MacroAssembler(Isolate* isolate,
const AssemblerOptions& options, void* buffer,
int size, CodeObjectRequired create_code_object)
: TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
// marker in order to disambiguate between self-references during nested
// code generation (e.g.: codegen of the current object triggers stub
// compilation through CodeStub::GetCode()).
// TODO(jgruber): We can likely remove this now that code stubs are gone.
code_object_ = Handle<HeapObject>::New(
*isolate->factory()->NewSelfReferenceMarker(), isolate);
}
}
: TurboAssembler(isolate, options, buffer, size, create_code_object) {}
void MacroAssembler::Load(Register destination, ExternalReference source) {
if (root_array_available_ && options().enable_root_array_delta_access) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment