Commit 4299cb69 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[wasm] Introduce declared_function_index helper

We often only need to store information about declared (i.e.
non-imported) functions in a wasm module. Thus we remove the number of
imported function from a function index. We do this in several places,
with different amount of checking.

This CL extracts this logic to a new {declared_function_index} helper
and uses it wherever we do this translation. This more or less
establishes the concept of "declared function index" and hopefully
prevents errors in the future.

R=jkummerow@chromium.org

Bug: chromium:1045767
Change-Id: I7e957401495a2a8cb5d2c51031f9c69fe46195d8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2020763
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65994}
parent dd11f979
...@@ -5549,7 +5549,7 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) { ...@@ -5549,7 +5549,7 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
if (is_asmjs_module(module)) { if (is_asmjs_module(module)) {
std::pair<int, int> offsets = std::pair<int, int> offsets =
module->asm_js_offset_information->GetFunctionOffsets( module->asm_js_offset_information->GetFunctionOffsets(
function_data->function_index() - module->num_imported_functions); declared_function_index(module, function_data->function_index()));
Handle<String> source( Handle<String> source(
String::cast(Script::cast(shared_info->script()).source()), isolate); String::cast(Script::cast(shared_info->script()).source()), isolate);
return isolate->factory()->NewSubString(source, offsets.first, return isolate->factory()->NewSubString(source, offsets.first,
......
...@@ -642,7 +642,7 @@ ExecutionTier ApplyHintToExecutionTier(WasmCompilationHintTier hint, ...@@ -642,7 +642,7 @@ ExecutionTier ApplyHintToExecutionTier(WasmCompilationHintTier hint,
const WasmCompilationHint* GetCompilationHint(const WasmModule* module, const WasmCompilationHint* GetCompilationHint(const WasmModule* module,
uint32_t func_index) { uint32_t func_index) {
DCHECK_LE(module->num_imported_functions, func_index); DCHECK_LE(module->num_imported_functions, func_index);
uint32_t hint_index = func_index - module->num_imported_functions; uint32_t hint_index = declared_function_index(module, func_index);
const std::vector<WasmCompilationHint>& compilation_hints = const std::vector<WasmCompilationHint>& compilation_hints =
module->compilation_hints; module->compilation_hints;
if (hint_index < compilation_hints.size()) { if (hint_index < compilation_hints.size()) {
...@@ -2560,7 +2560,7 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) { ...@@ -2560,7 +2560,7 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
// compiled code. Any lazily compiled function does not contribute to the // compiled code. Any lazily compiled function does not contribute to the
// compilation progress but may publish code to the code manager. // compilation progress but may publish code to the code manager.
int slot_index = int slot_index =
code->index() - native_module_->module()->num_imported_functions; declared_function_index(native_module_->module(), code->index());
uint8_t function_progress = compilation_progress_[slot_index]; uint8_t function_progress = compilation_progress_[slot_index];
ExecutionTier required_baseline_tier = ExecutionTier required_baseline_tier =
RequiredBaselineTierField::decode(function_progress); RequiredBaselineTierField::decode(function_progress);
......
...@@ -917,7 +917,7 @@ void NativeModule::UseLazyStub(uint32_t func_index) { ...@@ -917,7 +917,7 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
} }
// Add jump table entry for jump to the lazy compile stub. // Add jump table entry for jump to the lazy compile stub.
uint32_t slot_index = func_index - module_->num_imported_functions; uint32_t slot_index = declared_function_index(module(), func_index);
DCHECK_NULL(code_table_[slot_index]); DCHECK_NULL(code_table_[slot_index]);
Address lazy_compile_target = Address lazy_compile_target =
lazy_compile_table_->instruction_start() + lazy_compile_table_->instruction_start() +
...@@ -1050,7 +1050,7 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) { ...@@ -1050,7 +1050,7 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
// Unless tier down to Liftoff: update code table but avoid to fall back to // Unless tier down to Liftoff: update code table but avoid to fall back to
// less optimized code. We use the new code if it was compiled with a higher // less optimized code. We use the new code if it was compiled with a higher
// tier. // tier.
uint32_t slot_idx = code->index() - module_->num_imported_functions; uint32_t slot_idx = declared_function_index(module(), code->index());
WasmCode* prior_code = code_table_[slot_idx]; WasmCode* prior_code = code_table_[slot_idx];
bool update_code_table = bool update_code_table =
tier_down_ ? !prior_code || code->tier() == ExecutionTier::kLiftoff tier_down_ ? !prior_code || code->tier() == ExecutionTier::kLiftoff
...@@ -1122,18 +1122,14 @@ std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const { ...@@ -1122,18 +1122,14 @@ std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
WasmCode* NativeModule::GetCode(uint32_t index) const { WasmCode* NativeModule::GetCode(uint32_t index) const {
base::MutexGuard guard(&allocation_mutex_); base::MutexGuard guard(&allocation_mutex_);
DCHECK_LT(index, num_functions()); WasmCode* code = code_table_[declared_function_index(module(), index)];
DCHECK_LE(module_->num_imported_functions, index);
WasmCode* code = code_table_[index - module_->num_imported_functions];
if (code) WasmCodeRefScope::AddRef(code); if (code) WasmCodeRefScope::AddRef(code);
return code; return code;
} }
bool NativeModule::HasCode(uint32_t index) const { bool NativeModule::HasCode(uint32_t index) const {
base::MutexGuard guard(&allocation_mutex_); base::MutexGuard guard(&allocation_mutex_);
DCHECK_LT(index, num_functions()); return code_table_[declared_function_index(module(), index)] != nullptr;
DCHECK_LE(module_->num_imported_functions, index);
return code_table_[index - module_->num_imported_functions] != nullptr;
} }
void NativeModule::SetWasmSourceMap( void NativeModule::SetWasmSourceMap(
...@@ -1346,8 +1342,7 @@ WasmCode* NativeModule::Lookup(Address pc) const { ...@@ -1346,8 +1342,7 @@ WasmCode* NativeModule::Lookup(Address pc) const {
} }
uint32_t NativeModule::GetJumpTableOffset(uint32_t func_index) const { uint32_t NativeModule::GetJumpTableOffset(uint32_t func_index) const {
uint32_t slot_idx = func_index - module_->num_imported_functions; uint32_t slot_idx = declared_function_index(module(), func_index);
DCHECK_GT(module_->num_declared_functions, slot_idx);
return JumpTableAssembler::JumpSlotIndexToOffset(slot_idx); return JumpTableAssembler::JumpSlotIndexToOffset(slot_idx);
} }
......
...@@ -618,7 +618,7 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -618,7 +618,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
DCHECK_LT(func_index, num_functions()); DCHECK_LT(func_index, num_functions());
DCHECK_LE(module_->num_imported_functions, func_index); DCHECK_LE(module_->num_imported_functions, func_index);
if (!interpreter_redirections_) return false; if (!interpreter_redirections_) return false;
uint32_t bitset_idx = func_index - module_->num_imported_functions; uint32_t bitset_idx = declared_function_index(module(), func_index);
uint8_t byte = interpreter_redirections_[bitset_idx / kBitsPerByte]; uint8_t byte = interpreter_redirections_[bitset_idx / kBitsPerByte];
return byte & (1 << (bitset_idx % kBitsPerByte)); return byte & (1 << (bitset_idx % kBitsPerByte));
} }
...@@ -632,7 +632,7 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -632,7 +632,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
new uint8_t[RoundUp<kBitsPerByte>(module_->num_declared_functions) / new uint8_t[RoundUp<kBitsPerByte>(module_->num_declared_functions) /
kBitsPerByte]{}); kBitsPerByte]{});
} }
uint32_t bitset_idx = func_index - module_->num_imported_functions; uint32_t bitset_idx = declared_function_index(module(), func_index);
uint8_t& byte = interpreter_redirections_[bitset_idx / kBitsPerByte]; uint8_t& byte = interpreter_redirections_[bitset_idx / kBitsPerByte];
byte |= 1 << (bitset_idx % kBitsPerByte); byte |= 1 << (bitset_idx % kBitsPerByte);
} }
......
...@@ -117,14 +117,15 @@ AsmJsOffsetInformation::AsmJsOffsetInformation( ...@@ -117,14 +117,15 @@ AsmJsOffsetInformation::AsmJsOffsetInformation(
AsmJsOffsetInformation::~AsmJsOffsetInformation() = default; AsmJsOffsetInformation::~AsmJsOffsetInformation() = default;
int AsmJsOffsetInformation::GetSourcePosition(int func_index, int byte_offset, int AsmJsOffsetInformation::GetSourcePosition(int declared_func_index,
int byte_offset,
bool is_at_number_conversion) { bool is_at_number_conversion) {
EnsureDecodedOffsets(); EnsureDecodedOffsets();
DCHECK_LE(0, func_index); DCHECK_LE(0, declared_func_index);
DCHECK_GT(decoded_offsets_->functions.size(), func_index); DCHECK_GT(decoded_offsets_->functions.size(), declared_func_index);
std::vector<AsmJsOffsetEntry>& function_offsets = std::vector<AsmJsOffsetEntry>& function_offsets =
decoded_offsets_->functions[func_index].entries; decoded_offsets_->functions[declared_func_index].entries;
auto byte_offset_less = [](const AsmJsOffsetEntry& a, auto byte_offset_less = [](const AsmJsOffsetEntry& a,
const AsmJsOffsetEntry& b) { const AsmJsOffsetEntry& b) {
...@@ -141,13 +142,14 @@ int AsmJsOffsetInformation::GetSourcePosition(int func_index, int byte_offset, ...@@ -141,13 +142,14 @@ int AsmJsOffsetInformation::GetSourcePosition(int func_index, int byte_offset,
: it->source_position_call; : it->source_position_call;
} }
std::pair<int, int> AsmJsOffsetInformation::GetFunctionOffsets(int func_index) { std::pair<int, int> AsmJsOffsetInformation::GetFunctionOffsets(
int declared_func_index) {
EnsureDecodedOffsets(); EnsureDecodedOffsets();
DCHECK_LE(0, func_index); DCHECK_LE(0, declared_func_index);
DCHECK_GT(decoded_offsets_->functions.size(), func_index); DCHECK_GT(decoded_offsets_->functions.size(), declared_func_index);
AsmJsOffsetFunctionEntries& function_info = AsmJsOffsetFunctionEntries& function_info =
decoded_offsets_->functions[func_index]; decoded_offsets_->functions[declared_func_index];
return {function_info.start_offset, function_info.end_offset}; return {function_info.start_offset, function_info.end_offset};
} }
...@@ -616,11 +618,8 @@ int GetSourcePosition(const WasmModule* module, uint32_t func_index, ...@@ -616,11 +618,8 @@ int GetSourcePosition(const WasmModule* module, uint32_t func_index,
} }
// asm.js modules have an additional offset table that must be searched. // asm.js modules have an additional offset table that must be searched.
// Note: {AsmJsOffsetInformation::GetSourcePosition} expects the function
// index relative to the first non-imported function.
DCHECK_LE(module->num_imported_functions, func_index);
return module->asm_js_offset_information->GetSourcePosition( return module->asm_js_offset_information->GetSourcePosition(
func_index - module->num_imported_functions, byte_offset, declared_function_index(module, func_index), byte_offset,
is_at_number_conversion); is_at_number_conversion);
} }
......
...@@ -370,6 +370,15 @@ Handle<JSArray> GetCustomSections(Isolate* isolate, ...@@ -370,6 +370,15 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
int GetSourcePosition(const WasmModule*, uint32_t func_index, int GetSourcePosition(const WasmModule*, uint32_t func_index,
uint32_t byte_offset, bool is_at_number_conversion); uint32_t byte_offset, bool is_at_number_conversion);
// Translate function index to the index relative to the first declared (i.e.
// non-imported) function.
inline int declared_function_index(const WasmModule* module, int func_index) {
DCHECK_LE(module->num_imported_functions, func_index);
int declared_idx = func_index - module->num_imported_functions;
DCHECK_GT(module->num_declared_functions, declared_idx);
return declared_idx;
}
// TruncatedUserString makes it easy to output names up to a certain length, and // TruncatedUserString makes it easy to output names up to a certain length, and
// output a truncation followed by '...' if they exceed a limit. // output a truncation followed by '...' if they exceed a limit.
// Use like this: // Use like this:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment