Commit af1ccea7 authored by Victor Gomes's avatar Victor Gomes Committed by V8 LUCI CQ

[heap] Support registering code on the background thread

We use a mutex to avoid data race when reading/writing to the
code object registry.

Functions called only by the sweeper happens during safepoints and
do not need to be protected by the mutex.

Bug: v8:12054
Change-Id: Ie85bf0422622eee7f2836ecae132397a6aa4ed59
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3234721
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Auto-Submit: Victor Gomes <victorgomes@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77490}
parent eaf40c20
...@@ -43,6 +43,10 @@ class BaselineCompilerTask { ...@@ -43,6 +43,10 @@ class BaselineCompilerTask {
compiler.GenerateCode(); compiler.GenerateCode();
maybe_code_ = local_isolate->heap()->NewPersistentMaybeHandle( maybe_code_ = local_isolate->heap()->NewPersistentMaybeHandle(
compiler.Build(local_isolate)); compiler.Build(local_isolate));
Handle<Code> code;
if (maybe_code_.ToHandle(&code)) {
local_isolate->heap()->RegisterCodeObject(code);
}
} }
// Executed in the main thread. // Executed in the main thread.
...@@ -52,7 +56,6 @@ class BaselineCompilerTask { ...@@ -52,7 +56,6 @@ class BaselineCompilerTask {
if (FLAG_print_code) { if (FLAG_print_code) {
code->Print(); code->Print();
} }
isolate->heap()->RegisterCodeObject(code);
shared_function_info_->set_baseline_code(*code, kReleaseStore); shared_function_info_->set_baseline_code(*code, kReleaseStore);
if (V8_LIKELY(FLAG_use_osr)) { if (V8_LIKELY(FLAG_use_osr)) {
// Arm back edges for OSR // Arm back edges for OSR
......
...@@ -12,6 +12,7 @@ namespace v8 { ...@@ -12,6 +12,7 @@ namespace v8 {
namespace internal { namespace internal {
void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) { void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) {
base::MutexGuard guard(&code_object_registry_mutex_);
if (is_sorted_) { if (is_sorted_) {
is_sorted_ = is_sorted_ =
(code_object_registry_.empty() || code_object_registry_.back() < code); (code_object_registry_.empty() || code_object_registry_.back() < code);
...@@ -20,22 +21,29 @@ void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) { ...@@ -20,22 +21,29 @@ void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) {
} }
void CodeObjectRegistry::RegisterAlreadyExistingCodeObject(Address code) { void CodeObjectRegistry::RegisterAlreadyExistingCodeObject(Address code) {
// This function is not protected by the mutex, and should only be called
// by the sweeper.
DCHECK(is_sorted_); DCHECK(is_sorted_);
DCHECK(code_object_registry_.empty() || code_object_registry_.back() < code); DCHECK(code_object_registry_.empty() || code_object_registry_.back() < code);
code_object_registry_.push_back(code); code_object_registry_.push_back(code);
} }
void CodeObjectRegistry::Clear() { void CodeObjectRegistry::Clear() {
// This function is not protected by the mutex, and should only be called
// by the sweeper.
code_object_registry_.clear(); code_object_registry_.clear();
is_sorted_ = true; is_sorted_ = true;
} }
void CodeObjectRegistry::Finalize() { void CodeObjectRegistry::Finalize() {
// This function is not protected by the mutex, and should only be called
// by the sweeper.
DCHECK(is_sorted_); DCHECK(is_sorted_);
code_object_registry_.shrink_to_fit(); code_object_registry_.shrink_to_fit();
} }
bool CodeObjectRegistry::Contains(Address object) const { bool CodeObjectRegistry::Contains(Address object) const {
base::MutexGuard guard(&code_object_registry_mutex_);
if (!is_sorted_) { if (!is_sorted_) {
std::sort(code_object_registry_.begin(), code_object_registry_.end()); std::sort(code_object_registry_.begin(), code_object_registry_.end());
is_sorted_ = true; is_sorted_ = true;
...@@ -46,6 +54,7 @@ bool CodeObjectRegistry::Contains(Address object) const { ...@@ -46,6 +54,7 @@ bool CodeObjectRegistry::Contains(Address object) const {
Address CodeObjectRegistry::GetCodeObjectStartFromInnerAddress( Address CodeObjectRegistry::GetCodeObjectStartFromInnerAddress(
Address address) const { Address address) const {
base::MutexGuard guard(&code_object_registry_mutex_);
if (!is_sorted_) { if (!is_sorted_) {
std::sort(code_object_registry_.begin(), code_object_registry_.end()); std::sort(code_object_registry_.begin(), code_object_registry_.end());
is_sorted_ = true; is_sorted_ = true;
......
...@@ -33,6 +33,7 @@ class V8_EXPORT_PRIVATE CodeObjectRegistry { ...@@ -33,6 +33,7 @@ class V8_EXPORT_PRIVATE CodeObjectRegistry {
// that it can be lazily sorted during GetCodeObjectStartFromInnerAddress. // that it can be lazily sorted during GetCodeObjectStartFromInnerAddress.
mutable std::vector<Address> code_object_registry_; mutable std::vector<Address> code_object_registry_;
mutable bool is_sorted_ = true; mutable bool is_sorted_ = true;
mutable base::Mutex code_object_registry_mutex_;
}; };
} // namespace internal } // namespace internal
......
...@@ -100,6 +100,10 @@ class V8_EXPORT_PRIVATE LocalHeap { ...@@ -100,6 +100,10 @@ class V8_EXPORT_PRIVATE LocalHeap {
return code_space_allocator_.get(); return code_space_allocator_.get();
} }
void RegisterCodeObject(Handle<Code> code) {
heap()->RegisterCodeObject(code);
}
// Mark/Unmark linear allocation areas black. Used for black allocation. // Mark/Unmark linear allocation areas black. Used for black allocation.
void MarkLinearAllocationAreaBlack(); void MarkLinearAllocationAreaBlack();
void UnmarkLinearAllocationArea(); void UnmarkLinearAllocationArea();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment