Commit 0d0a3416 authored by Jakob Gruber's avatar Jakob Gruber Committed by Commit Bot

[code] Prepare to move metadata out of the instructions area

No major functional changes in this CL, mostly it moves code around
to make follow-up CLs less messy.

 - Document Code layout.
 - New concepts: 'body' and 'metadata' areas of Code objects. The
   metadata area contains metadata tables, the body area includes
   both instructions and metadata (this is currently the 'instructions'
   area). Add accessors for these new areas.
 - An interesting detail: embedded builtins will have non-adjacent
   instruction and metadata areas, thus a concept of 'body' doesn't
   make sense there.
 - Also add raw_instruction_X_future accessors; these are used where
   we are actually interested in the instructions range, not the entire
   body. In a follow-up, current raw_instruction_X accessors will be
   replaced by raw_body_X, and raw_instruction_X_future by
   raw_instruction_X.

Bug: v8:11036
Change-Id: I1d85146b652e0c097c3602d4db1862d5d3898a7e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2491023
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70701}
parent 729235c2
......@@ -257,8 +257,8 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
flush_icache = true;
}
if (flush_icache) {
FlushInstructionCache(code.raw_instruction_start(),
code.raw_instruction_size());
FlushInstructionCache(code.raw_instruction_start_future(),
code.raw_instruction_size_future());
}
}
}
......
......@@ -519,8 +519,9 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
CHECK_IMPLIES(info()->IsNativeContextIndependent(),
code->IsNativeContextIndependent(isolate()));
// Counts both compiled code and metadata.
isolate()->counters()->total_compiled_code_size()->Increment(
code->raw_instruction_size());
code->raw_body_size());
LOG_CODE_EVENT(isolate(),
CodeLinePosInfoRecordEvent(code->raw_instruction_start(),
......
......@@ -959,7 +959,8 @@ void Code::CodeVerify(Isolate* isolate) {
CHECK_LE(safepoint_table_offset(), handler_table_offset());
CHECK_LE(handler_table_offset(), constant_pool_offset());
CHECK_LE(constant_pool_offset(), code_comments_offset());
CHECK_LE(code_comments_offset(), InstructionSize());
CHECK_LE(code_comments_offset(), unwinding_info_offset());
CHECK_LE(unwinding_info_offset(), BodySize());
CHECK_IMPLIES(!ReadOnlyHeap::Contains(*this),
IsAligned(raw_instruction_start(), kCodeAlignment));
// TODO(delphick): Refactor Factory::CodeBuilder::BuildInternal, so that the
......@@ -969,8 +970,7 @@ void Code::CodeVerify(Isolate* isolate) {
// CHECK_EQ(ReadOnlyHeap::Contains(*this), !IsExecutable());
relocation_info().ObjectVerify(isolate);
CHECK(V8_ENABLE_THIRD_PARTY_HEAP_BOOL ||
Code::SizeFor(body_size()) <=
MemoryChunkLayout::MaxRegularCodeObjectSize() ||
CodeSize() <= MemoryChunkLayout::MaxRegularCodeObjectSize() ||
isolate->heap()->InSpace(*this, CODE_LO_SPACE));
Address last_gc_pc = kNullAddress;
......
......@@ -194,14 +194,12 @@ void CodeStatistics::CollectCommentStatistics(Isolate* isolate,
EnterComment(isolate, comment_txt, flat_delta);
}
// Collects code comment statistics
// Collects code comment statistics.
void CodeStatistics::CollectCodeCommentStatistics(HeapObject obj,
Isolate* isolate) {
// Bytecode objects do not contain RelocInfo. Only process code objects
// for code comment statistics.
if (!obj.IsCode()) {
return;
}
if (!obj.IsCode()) return;
Code code = Code::cast(obj);
CodeCommentsIterator cit(code.code_comments(), code.code_comments_size());
......@@ -214,8 +212,11 @@ void CodeStatistics::CollectCodeCommentStatistics(HeapObject obj,
cit.Next();
}
DCHECK(0 <= prev_pc_offset && prev_pc_offset <= code.raw_instruction_size());
delta += static_cast<int>(code.raw_instruction_size() - prev_pc_offset);
// TODO(jgruber,v8:11036): Revisit this when separating instruction- and
// metadata areas. The logic will become a bit more complex since these areas
// will no longer be adjacent in some cases.
DCHECK(0 <= prev_pc_offset && prev_pc_offset <= code.raw_body_size());
delta += static_cast<int>(code.raw_body_size() - prev_pc_offset);
EnterComment(isolate, "NoComment", delta);
}
#endif
......
......@@ -67,18 +67,6 @@
namespace v8 {
namespace internal {
namespace {
int ComputeCodeObjectSize(const CodeDesc& desc) {
// TODO(jgruber,v8:11036): Distinguish instruction and metadata areas.
int object_size = Code::SizeFor(
Code::AlignedBodySizeFor(desc.instr_size + desc.unwinding_info_size));
DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
return object_size;
}
} // namespace
Factory::CodeBuilder::CodeBuilder(Isolate* isolate, const CodeDesc& desc,
CodeKind kind)
: isolate_(isolate),
......@@ -131,9 +119,12 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
isolate_->heap()->SetBasicBlockProfilingData(new_list);
}
// TODO(jgruber,v8:11036): Distinguish instruction and metadata areas.
const int body_size = code_desc_.instr_size + code_desc_.unwinding_info_size;
const int object_size = Code::SizeFor(body_size);
Handle<Code> code;
{
int object_size = ComputeCodeObjectSize(code_desc_);
Heap* heap = isolate_->heap();
CodePageCollectionMemoryModificationScope code_allocation(heap);
......@@ -167,8 +158,7 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
constexpr bool kIsNotOffHeapTrampoline = false;
// TODO(jgruber,v8:11036): Distinguish instruction and metadata areas.
code->set_raw_instruction_size(code_desc_.instr_size +
code_desc_.unwinding_info_size);
code->set_raw_instruction_size(body_size);
code->set_relocation_info(*reloc_info);
code->initialize_flags(kind_, is_turbofanned_, stack_slots_,
kIsNotOffHeapTrampoline);
......
......@@ -70,13 +70,6 @@ int AbstractCode::SizeIncludingMetadata() {
return GetBytecodeArray().SizeIncludingMetadata();
}
}
int AbstractCode::ExecutableSize() {
if (IsCode()) {
return GetCode().ExecutableSize();
} else {
return GetBytecodeArray().BytecodeArraySize();
}
}
Address AbstractCode::raw_instruction_start() {
if (IsCode()) {
......@@ -199,19 +192,16 @@ void Code::WipeOutHeader() {
}
void Code::clear_padding() {
// Clear the padding between the header and `raw_instruction_start`.
// Clear the padding between the header and `raw_body_start`.
if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
memset(reinterpret_cast<void*>(address() + kOptionalPaddingOffset), 0,
FIELD_SIZE(kOptionalPaddingOffset));
}
// Clear the padding after `raw_instruction_end`.
// TODO(jgruber,v8:11036): Distinguish instruction and metadata areas.
DCHECK_EQ(unwinding_info_offset() + unwinding_info_size(), InstructionSize());
// Clear the padding after `raw_body_end`.
size_t trailing_padding_size =
CodeSize() - Code::kHeaderSize - raw_instruction_size();
memset(reinterpret_cast<void*>(raw_instruction_end()), 0,
trailing_padding_size);
CodeSize() - Code::kHeaderSize - raw_body_size();
memset(reinterpret_cast<void*>(raw_body_end()), 0, trailing_padding_size);
}
ByteArray Code::SourcePositionTable() const {
......@@ -230,18 +220,34 @@ void Code::set_next_code_link(Object value) {
code_data_container(kAcquireLoad).set_next_code_link(value);
}
int Code::InstructionSize() const {
if (is_off_heap_trampoline()) return OffHeapInstructionSize();
Address Code::raw_body_start() const { return raw_instruction_start(); }
Address Code::raw_body_end() const { return raw_instruction_end(); }
int Code::raw_body_size() const {
// TODO(jgruber,v8:11036): Distinguish instruction and metadata areas.
DCHECK_EQ(unwinding_info_offset() + unwinding_info_size(), InstructionSize());
return raw_instruction_size();
}
int Code::BodySize() const {
// TODO(jgruber,v8:11036): Distinguish instruction and metadata areas.
DCHECK_EQ(unwinding_info_offset() + unwinding_info_size(), InstructionSize());
return InstructionSize();
}
int Code::InstructionSize() const {
return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapInstructionSize()
: raw_instruction_size();
}
Address Code::raw_instruction_start() const {
return FIELD_ADDR(*this, kHeaderSize);
}
Address Code::InstructionStart() const {
if (is_off_heap_trampoline()) return OffHeapInstructionStart();
return raw_instruction_start();
return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapInstructionStart()
: raw_instruction_start();
}
Address Code::raw_instruction_end() const {
......@@ -249,14 +255,31 @@ Address Code::raw_instruction_end() const {
}
Address Code::InstructionEnd() const {
if (is_off_heap_trampoline()) return OffHeapInstructionEnd();
return raw_instruction_end();
return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapInstructionEnd()
: raw_instruction_end();
}
int Code::body_size() const {
// TODO(jgruber,v8:11036): Distinguish instruction and metadata areas.
DCHECK_EQ(unwinding_info_offset() + unwinding_info_size(), InstructionSize());
return AlignedBodySizeFor(raw_instruction_size());
Address Code::raw_instruction_start_future() const {
return raw_instruction_start();
}
Address Code::raw_instruction_end_future() const {
return raw_metadata_start();
}
int Code::raw_instruction_size_future() const {
return raw_instruction_size() - raw_metadata_size();
}
Address Code::raw_metadata_start() const {
return raw_instruction_start() + safepoint_table_offset();
}
Address Code::raw_metadata_end() const { return raw_instruction_end(); }
int Code::raw_metadata_size() const {
DCHECK_LE(raw_metadata_start(), raw_metadata_end());
return static_cast<int>(raw_metadata_end() - raw_metadata_start());
}
int Code::SizeIncludingMetadata() const {
......@@ -296,17 +319,6 @@ bool Code::contains(Address inner_pointer) {
return (address() <= inner_pointer) && (inner_pointer < address() + Size());
}
int Code::ExecutableSize() const {
// Check that the assumptions about the layout of the code object holds.
// TODO(jgruber,v8:11036): It's unclear what this function should return.
// Currently, it counts the header, instructions, and metadata tables as
// 'executable'. See also ExecutableInstructionSize which counts only
// instructions.
DCHECK_EQ(static_cast<int>(raw_instruction_start() - address()),
Code::kHeaderSize);
return raw_instruction_size() + Code::kHeaderSize;
}
// static
void Code::CopyRelocInfoToByteArray(ByteArray dest, const CodeDesc& desc) {
DCHECK_EQ(dest.length(), desc.reloc_size);
......@@ -315,7 +327,7 @@ void Code::CopyRelocInfoToByteArray(ByteArray dest, const CodeDesc& desc) {
static_cast<size_t>(desc.reloc_size));
}
int Code::CodeSize() const { return SizeFor(body_size()); }
int Code::CodeSize() const { return SizeFor(raw_body_size()); }
CodeKind Code::kind() const {
STATIC_ASSERT(FIELD_SIZE(kFlagsOffset) == kInt32Size);
......
This diff is collapsed.
......@@ -172,7 +172,7 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
DisallowHeapAllocation no_gc;
Address old_pc = PointerAuthentication::AuthenticatePC(return_address, 0);
DCHECK_LE(re_code.raw_instruction_start(), old_pc);
DCHECK_LE(old_pc, re_code.raw_instruction_end());
DCHECK_LE(old_pc, re_code.raw_instruction_end_future());
StackLimitCheck check(isolate);
bool js_has_overflowed = check.JsHasOverflowed();
......
......@@ -660,8 +660,11 @@ void Deserializer::RelocInfoVisitor::VisitInternalReference(Code host,
// Internal reference target is encoded as an offset from code entry.
int target_offset = source().GetInt();
// TODO(jgruber,v8:11036): We are being permissive for this DCHECK, but
// consider using raw_instruction_size() instead of raw_body_size() in the
// future.
DCHECK_LT(static_cast<unsigned>(target_offset),
static_cast<unsigned>(host.raw_instruction_size()));
static_cast<unsigned>(host.raw_body_size()));
Address target = host.entry() + target_offset;
Assembler::deserialization_set_target_internal_reference_at(
rinfo->pc(), target, rinfo->rmode());
......
......@@ -224,7 +224,7 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
Builtins::name(i));
}
uint32_t length = static_cast<uint32_t>(code.raw_instruction_size());
uint32_t length = static_cast<uint32_t>(code.raw_body_size());
DCHECK_EQ(0, raw_code_size % kCodeAlignment);
metadata[i].instructions_offset = raw_code_size;
......@@ -268,10 +268,9 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
Code code = builtins->builtin(i);
uint32_t offset = metadata[i].instructions_offset;
uint8_t* dst = raw_code_start + offset;
DCHECK_LE(RawCodeOffset() + offset + code.raw_instruction_size(),
blob_code_size);
std::memcpy(dst, reinterpret_cast<uint8_t*>(code.raw_instruction_start()),
code.raw_instruction_size());
DCHECK_LE(RawCodeOffset() + offset + code.raw_body_size(), blob_code_size);
std::memcpy(dst, reinterpret_cast<uint8_t*>(code.raw_body_start()),
code.raw_body_size());
}
EmbeddedData d(blob_code, blob_code_size, blob_metadata, blob_metadata_size);
......
......@@ -975,7 +975,10 @@ void Serializer::ObjectSerializer::VisitInternalReference(Code host,
Address entry = Handle<Code>::cast(object_)->entry();
DCHECK_GE(rinfo->target_internal_reference(), entry);
uintptr_t target_offset = rinfo->target_internal_reference() - entry;
DCHECK_LE(target_offset, Handle<Code>::cast(object_)->raw_instruction_size());
// TODO(jgruber,v8:11036): We are being permissive for this DCHECK, but
// consider using raw_instruction_size() instead of raw_body_size() in the
// future.
DCHECK_LE(target_offset, Handle<Code>::cast(object_)->raw_body_size());
sink_->Put(kInternalReference, "InternalRef");
sink_->PutInt(target_offset, "internal ref value");
}
......
......@@ -1151,7 +1151,7 @@ void TriggerTierUp(Isolate* isolate, NativeModule* native_module,
namespace {
void RecordStats(const Code code, Counters* counters) {
counters->wasm_generated_code_size()->Increment(code.body_size());
counters->wasm_generated_code_size()->Increment(code.raw_body_size());
counters->wasm_reloc_size()->Increment(code.relocation_info().length());
}
......
......@@ -247,10 +247,11 @@ TEST(TickEvents) {
profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, frame3_code, "ddd");
EnqueueTickSampleEvent(processor, frame1_code->raw_instruction_start());
EnqueueTickSampleEvent(
processor,
frame2_code->raw_instruction_start() + frame2_code->ExecutableSize() / 2,
frame1_code->raw_instruction_start() + frame1_code->ExecutableSize() / 2);
EnqueueTickSampleEvent(processor,
frame2_code->raw_instruction_start() +
frame2_code->raw_instruction_size() / 2,
frame1_code->raw_instruction_start() +
frame1_code->raw_instruction_size() / 2);
EnqueueTickSampleEvent(processor, frame3_code->raw_instruction_end() - 1,
frame2_code->raw_instruction_end() - 1,
frame1_code->raw_instruction_end() - 1);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment