Commit 51f11f0f authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[snapshot][cleanup] Reassign bytecode values

... and update the list of unused bytecodes.

This CL also drops kInternalReferenceEncoded as all necessary details
are already encoded in RelocInfo.

Bug: v8:8794, v8:8562
Change-Id: Ia8aec1f4bcf9802fe15322bd6bb273218d8959fa
Reviewed-on: https://chromium-review.googlesource.com/c/1460459
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarYang Guo <yangguo@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59471}
parent ed5230a9
......@@ -459,22 +459,15 @@ void Deserializer::VisitExternalReference(Code host, RelocInfo* rinfo) {
void Deserializer::VisitInternalReference(Code host, RelocInfo* rinfo) {
byte data = source_.Get();
CHECK(data == kInternalReference || data == kInternalReferenceEncoded);
CHECK_EQ(data, kInternalReference);
// Internal reference address is not encoded via skip, but by offset
// from code entry.
int pc_offset = source_.GetInt();
// Internal reference target is encoded as an offset from code entry.
int target_offset = source_.GetInt();
DCHECK(0 <= pc_offset && pc_offset <= host->raw_instruction_size());
DCHECK(0 <= target_offset && target_offset <= host->raw_instruction_size());
Address pc = host->entry() + pc_offset;
// TODO(ishell): don't encode pc_offset as it can be taken from the rinfo.
DCHECK_EQ(pc, rinfo->pc());
DCHECK_LT(static_cast<unsigned>(target_offset),
static_cast<unsigned>(host->raw_instruction_size()));
Address target = host->entry() + target_offset;
Assembler::deserialization_set_target_internal_reference_at(
pc, target,
data == kInternalReference ? RelocInfo::INTERNAL_REFERENCE
: RelocInfo::INTERNAL_REFERENCE_ENCODED);
rinfo->pc(), target, rinfo->rmode());
}
void Deserializer::VisitOffHeapTarget(Code host, RelocInfo* rinfo) {
......@@ -535,32 +528,31 @@ bool Deserializer::ReadData(UnalignedSlot current, UnalignedSlot limit,
while (current < limit) {
byte data = source_.Get();
switch (data) {
#define CASE_STATEMENT(where, space_number) \
case where + space_number: \
STATIC_ASSERT((where & ~kWhereMask) == 0); \
#define CASE_STATEMENT(bytecode, space_number) \
case bytecode + space_number: \
STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
#define CASE_BODY(where, space_number_if_any) \
current = ReadDataCase<where, space_number_if_any>( \
#define CASE_BODY(bytecode, space_number_if_any) \
current = ReadDataCase<bytecode, space_number_if_any>( \
isolate, current, current_object_address, data, write_barrier_needed); \
break;
// This generates a case and a body for the new space (which has to do extra
// write barrier handling) and handles the other spaces with fall-through cases
// and one body.
#define ALL_SPACES(where) \
CASE_STATEMENT(where, NEW_SPACE) \
CASE_BODY(where, NEW_SPACE) \
CASE_STATEMENT(where, OLD_SPACE) \
V8_FALLTHROUGH; \
CASE_STATEMENT(where, CODE_SPACE) \
V8_FALLTHROUGH; \
CASE_STATEMENT(where, MAP_SPACE) \
V8_FALLTHROUGH; \
CASE_STATEMENT(where, LO_SPACE) \
V8_FALLTHROUGH; \
CASE_STATEMENT(where, RO_SPACE) \
CASE_BODY(where, kAnyOldSpace)
#define ALL_SPACES(bytecode) \
CASE_STATEMENT(bytecode, NEW_SPACE) \
CASE_BODY(bytecode, NEW_SPACE) \
CASE_STATEMENT(bytecode, OLD_SPACE) \
V8_FALLTHROUGH; \
CASE_STATEMENT(bytecode, CODE_SPACE) \
V8_FALLTHROUGH; \
CASE_STATEMENT(bytecode, MAP_SPACE) \
V8_FALLTHROUGH; \
CASE_STATEMENT(bytecode, LO_SPACE) \
V8_FALLTHROUGH; \
CASE_STATEMENT(bytecode, RO_SPACE) \
CASE_BODY(bytecode, kAnyOldSpace)
#define FOUR_CASES(byte_code) \
case byte_code: \
......@@ -574,9 +566,9 @@ bool Deserializer::ReadData(UnalignedSlot current, UnalignedSlot limit,
FOUR_CASES(byte_code + 8) \
FOUR_CASES(byte_code + 12)
#define SINGLE_CASE(where, space) \
CASE_STATEMENT(where, space) \
CASE_BODY(where, space)
#define SINGLE_CASE(bytecode, space) \
CASE_STATEMENT(bytecode, space) \
CASE_BODY(bytecode, space)
// Deserialize a new object and write a pointer to it to the current
// object.
......@@ -610,7 +602,6 @@ bool Deserializer::ReadData(UnalignedSlot current, UnalignedSlot limit,
break;
}
case kInternalReferenceEncoded:
case kInternalReference:
case kOffHeapTarget: {
// These bytecodes are expected only during RelocInfo iteration.
......@@ -801,7 +792,7 @@ Address Deserializer::ReadExternalReferenceCase() {
return external_reference_table_->address(reference_id);
}
template <SerializerDeserializer::Where where, int space_number_if_any>
template <SerializerDeserializer::Bytecode bytecode, int space_number_if_any>
UnalignedSlot Deserializer::ReadDataCase(Isolate* isolate,
UnalignedSlot current,
Address current_object_address,
......@@ -815,31 +806,31 @@ UnalignedSlot Deserializer::ReadDataCase(Isolate* isolate,
? HeapObjectReferenceType::WEAK
: HeapObjectReferenceType::STRONG;
if (where == kNewObject) {
if (bytecode == kNewObject) {
heap_object = ReadObject(space_number);
emit_write_barrier = (space_number == NEW_SPACE);
} else if (where == kBackref) {
} else if (bytecode == kBackref) {
heap_object = GetBackReferencedObject(space_number);
emit_write_barrier = (space_number == NEW_SPACE);
heap_object = GetBackReferencedObject(data & kSpaceMask);
} else if (where == kRootArray) {
} else if (bytecode == kRootArray) {
int id = source_.GetInt();
RootIndex root_index = static_cast<RootIndex>(id);
heap_object = HeapObject::cast(isolate->root(root_index));
emit_write_barrier = Heap::InYoungGeneration(heap_object);
hot_objects_.Add(heap_object);
} else if (where == kReadOnlyObjectCache) {
} else if (bytecode == kReadOnlyObjectCache) {
int cache_index = source_.GetInt();
heap_object =
HeapObject::cast(isolate->read_only_object_cache()->at(cache_index));
DCHECK(!Heap::InYoungGeneration(heap_object));
emit_write_barrier = false;
} else if (where == kPartialSnapshotCache) {
} else if (bytecode == kPartialSnapshotCache) {
int cache_index = source_.GetInt();
heap_object =
HeapObject::cast(isolate->partial_snapshot_cache()->at(cache_index));
emit_write_barrier = Heap::InYoungGeneration(heap_object);
} else {
DCHECK_EQ(where, kAttachedReference);
DCHECK_EQ(bytecode, kAttachedReference);
int index = source_.GetInt();
heap_object = *attached_objects_[index];
emit_write_barrier = Heap::InYoungGeneration(heap_object);
......
......@@ -126,7 +126,7 @@ class Deserializer : public SerializerDeserializer {
// A helper function for ReadData, templatized on the bytecode for efficiency.
// Returns the new value of {current}.
template <Where where, int space_number_if_any>
template <Bytecode bytecode, int space_number_if_any>
inline UnalignedSlot ReadDataCase(Isolate* isolate, UnalignedSlot current,
Address current_object_address, byte data,
bool write_barrier_needed);
......
......@@ -123,130 +123,142 @@ class SerializerDeserializer : public RootVisitor {
void RestoreExternalReferenceRedirectors(
const std::vector<CallHandlerInfo>& call_handler_infos);
// TODO(ishell): reassign bytecodes and update this list
// once HowToCode and WhereToPoint are gone.
#define UNUSED_SERIALIZER_BYTE_CODES(V) \
V(0x0e) \
V(0x2e) \
V(0x3e) \
V(0x3f) \
V(0x4e) \
V(0x58) \
V(0x59) \
V(0x5a) \
V(0x5b) \
V(0x5c) \
V(0x5d) \
V(0x5e) \
V(0x5f) \
V(0x67) \
V(0x6e) \
V(0x76) \
V(0x79) \
V(0x7a) \
V(0x7b) \
V(0x7c)
// ---------- byte code range 0x00..0x3f ----------
// Byte codes in this range represent Where.
// Where the pointed-to object can be found:
// clang-format off
#define UNUSED_SERIALIZER_BYTE_CODES(V) \
V(0x06) V(0x07) V(0x0e) V(0x0f) \
/* Free range 0x26..0x2f */ \
V(0x26) V(0x27) \
V(0x28) V(0x29) V(0x2a) V(0x2b) V(0x2c) V(0x2d) V(0x2e) V(0x2f) \
/* Free range 0x30..0x3f */ \
V(0x30) V(0x31) V(0x32) V(0x33) V(0x34) V(0x35) V(0x36) V(0x37) \
V(0x38) V(0x39) V(0x3a) V(0x3b) V(0x3c) V(0x3d) V(0x3e) V(0x3f) \
/* Free range 0x97..0x9f */ \
V(0x98) V(0x99) V(0x9a) V(0x9b) V(0x9c) V(0x9d) V(0x9e) V(0x9f) \
/* Free range 0xa0..0xaf */ \
V(0xa0) V(0xa1) V(0xa2) V(0xa3) V(0xa4) V(0xa5) V(0xa6) V(0xa7) \
V(0xa8) V(0xa9) V(0xaa) V(0xab) V(0xac) V(0xad) V(0xae) V(0xaf) \
/* Free range 0xb0..0xbf */ \
V(0xb0) V(0xb1) V(0xb2) V(0xb3) V(0xb4) V(0xb5) V(0xb6) V(0xb7) \
V(0xb8) V(0xb9) V(0xba) V(0xbb) V(0xbc) V(0xbd) V(0xbe) V(0xbf) \
/* Free range 0xc0..0xcf */ \
V(0xc0) V(0xc1) V(0xc2) V(0xc3) V(0xc4) V(0xc5) V(0xc6) V(0xc7) \
V(0xc8) V(0xc9) V(0xca) V(0xcb) V(0xcc) V(0xcd) V(0xce) V(0xcf) \
/* Free range 0xd0..0xdf */ \
V(0xd0) V(0xd1) V(0xd2) V(0xd3) V(0xd4) V(0xd5) V(0xd6) V(0xd7) \
V(0xd8) V(0xd9) V(0xda) V(0xdb) V(0xdc) V(0xdd) V(0xde) V(0xdf) \
/* Free range 0xe0..0xef */ \
V(0xe0) V(0xe1) V(0xe2) V(0xe3) V(0xe4) V(0xe5) V(0xe6) V(0xe7) \
V(0xe8) V(0xe9) V(0xea) V(0xeb) V(0xec) V(0xed) V(0xee) V(0xef) \
/* Free range 0xf0..0xff */ \
V(0xf0) V(0xf1) V(0xf2) V(0xf3) V(0xf4) V(0xf5) V(0xf6) V(0xf7) \
V(0xf8) V(0xf9) V(0xfa) V(0xfb) V(0xfc) V(0xfd) V(0xfe) V(0xff)
// clang-format on
// The static assert below will trigger when the number of preallocated spaces
// changed. If that happens, update the bytecode ranges in the comments below.
// changed. If that happens, update the kNewObject and kBackref bytecode
// ranges in the comments below.
STATIC_ASSERT(6 == kNumberOfSpaces);
enum Where {
// 0x00..0x05 Allocate new object, in specified space.
kNewObject = 0x00,
// 0x08..0x0d Reference to previous object from space.
kBackref = 0x08,
// 0x06 Object in the partial snapshot cache.
kPartialSnapshotCache = 0x06,
// 0x07 External reference referenced by id.
kExternalReference = 0x07,
// 0x16 Root array item.
kRootArray = 0x16,
// 0x17 Object provided in the attached list.
kAttachedReference = 0x17,
// 0x18 Object in the read-only object cache.
kReadOnlyObjectCache = 0x18,
// 0x0f Misc, see below (incl. 0x2f, 0x4f, 0x6f).
// 0x18..0x1f Misc, see below (incl. 0x38..0x3f, 0x58..0x5f, 0x78..0x7f).
};
static const int kWhereMask = 0x1f;
static const int kSpaceMask = 7;
STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1);
// ---------- Misc ----------
// Do nothing, used for padding.
static const int kNop = 0x2f;
// Move to next reserved chunk.
static const int kNextChunk = 0x4f;
// Deferring object content.
static const int kDeferred = 0x6f;
// Alignment prefixes 0x19..0x1b
static const int kAlignmentPrefix = 0x19;
// A tag emitted at strategic points in the snapshot to delineate sections.
// If the deserializer does not find these at the expected moments then it
// is an indication that the snapshot and the VM do not fit together.
// Examine the build process for architecture, version or configuration
// mismatches.
static const int kSynchronize = 0x1c;
// Repeats of variable length.
static const int kVariableRepeat = 0x1d;
// Raw data of variable length.
// Used for embedder-allocated backing stores for TypedArrays.
static const int kOffHeapBackingStore = 0x1e;
// Used for embedder-provided serialization data for embedder fields.
static const int kEmbedderFieldsData = 0x1f;
static const int kVariableRawCode = 0x39;
static const int kVariableRawData = 0x3a;
static const int kInternalReference = 0x3b;
static const int kInternalReferenceEncoded = 0x3c;
// Used to encode external references provided through the API.
static const int kApiReference = 0x3d;
// In-place weak references
static const int kClearedWeakReference = 0x7d;
static const int kWeakPrefix = 0x7e;
// Encodes an off-heap instruction stream target.
static const int kOffHeapTarget = 0x7f;
// ---------- byte code range 0x80..0xff ----------
// First 32 root array items.
static const int kNumberOfRootArrayConstants = 0x20;
// 0x80..0x9f
static const int kRootArrayConstants = 0x80;
static const int kRootArrayConstantsMask = 0x1f;
// 32 common raw data lengths.
static const int kNumberOfFixedRawData = 0x20;
// 0xc0..0xdf
static const int kFixedRawData = 0xc0;
static const int kOnePointerRawData = kFixedRawData;
static const int kFixedRawDataStart = kFixedRawData - 1;
// 16 repeats lengths.
static const int kNumberOfFixedRepeat = 0x10;
// 0xe0..0xef
static const int kFixedRepeat = 0xe0;
// 8 hot (recently seen or back-referenced) objects with optional skip.
static const int kNumberOfHotObjects = 8;
STATIC_ASSERT(kNumberOfHotObjects == HotObjectsList::kSize);
// 0xf0..0xf7
static const int kHotObject = 0xf0;
static const int kHotObjectMask = 0x07;
// ---------- special values ----------
enum Bytecode {
//
// ---------- byte code range 0x00..0x0f ----------
//
// 0x00..0x05 Allocate new object, in specified space.
kNewObject = 0x00,
// 0x08..0x0d Reference to previous object from specified space.
kBackref = 0x08,
//
// ---------- byte code range 0x10..0x25 ----------
//
// Object in the partial snapshot cache.
kPartialSnapshotCache = 0x10,
// Root array item.
kRootArray,
// Object provided in the attached list.
kAttachedReference,
// Object in the read-only object cache.
kReadOnlyObjectCache,
// Do nothing, used for padding.
kNop,
// Move to next reserved chunk.
kNextChunk,
// Deferring object content.
kDeferred,
// 3 alignment prefixes 0x17..0x19
kAlignmentPrefix = 0x17,
// A tag emitted at strategic points in the snapshot to delineate sections.
// If the deserializer does not find these at the expected moments then it
// is an indication that the snapshot and the VM do not fit together.
// Examine the build process for architecture, version or configuration
// mismatches.
kSynchronize = 0x1a,
// Repeats of variable length.
kVariableRepeat,
// Used for embedder-allocated backing stores for TypedArrays.
kOffHeapBackingStore,
// Used for embedder-provided serialization data for embedder fields.
kEmbedderFieldsData,
// Raw data of variable length.
kVariableRawCode,
kVariableRawData,
// Used to encode external references provided through the API.
kApiReference,
// External reference referenced by id.
kExternalReference,
// Internal reference of a code objects in code stream.
kInternalReference,
// In-place weak references.
kClearedWeakReference,
kWeakPrefix,
// Encodes an off-heap instruction stream target.
kOffHeapTarget,
//
// ---------- byte code range 0x40..0x7f ----------
//
// 0x40..0x5f
kRootArrayConstants = 0x40,
// 0x60..0x7f
kFixedRawData = 0x60,
kOnePointerRawData = kFixedRawData,
kFixedRawDataStart = kFixedRawData - 1,
//
// ---------- byte code range 0x80..0x9f ----------
//
// 0x80..0x8f
kFixedRepeat = 0x80,
// 0x90..0x97
kHotObject = 0x90,
};
//
// Some other constants.
//
static const int kAnyOldSpace = -1;
// Sentinel after a new object to indicate that double alignment is needed.
......@@ -268,8 +280,8 @@ class SerializerDeserializer : public RootVisitor {
// Decodes repeat count from a fixed repeat bytecode.
static int DecodeFixedRepeatCount(int bytecode) {
DCHECK(
IsInRange(bytecode, kFixedRepeat, kFixedRepeat + kNumberOfFixedRepeat));
DCHECK(IsInRange(bytecode, kFixedRepeat + 0,
kFixedRepeat + kNumberOfFixedRepeat));
return bytecode - kFixedRepeat + kFirstEncodableRepeatCount;
}
......
......@@ -737,24 +737,11 @@ void Serializer::ObjectSerializer::VisitExternalReference(Code host,
void Serializer::ObjectSerializer::VisitInternalReference(Code host,
RelocInfo* rinfo) {
// We do not use skip from last patched pc to find the pc to patch, since
// target_address_address may not return addresses in ascending order when
// used for internal references. External references may be stored at the
// end of the code in the constant pool, whereas internal references are
// inline. That would cause the skip to be negative. Instead, we store the
// offset from code entry.
Address entry = Code::cast(object_)->entry();
DCHECK_GE(rinfo->target_internal_reference_address(), entry);
uintptr_t pc_offset = rinfo->target_internal_reference_address() - entry;
DCHECK_LE(pc_offset, Code::cast(object_)->raw_instruction_size());
DCHECK_GE(rinfo->target_internal_reference(), entry);
uintptr_t target_offset = rinfo->target_internal_reference() - entry;
DCHECK_LE(target_offset, Code::cast(object_)->raw_instruction_size());
sink_->Put(rinfo->rmode() == RelocInfo::INTERNAL_REFERENCE
? kInternalReference
: kInternalReferenceEncoded,
"InternalRef");
sink_->PutInt(pc_offset, "internal ref address");
sink_->Put(kInternalReference, "InternalRef");
sink_->PutInt(target_offset, "internal ref value");
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment