Commit 6d86857c authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[wasm] Remove AddressRange, use base::AddressRegion

R=ahaas@chromium.org

Bug: v8:8015
Change-Id: Ic449b76ab3957bb989bbb1fc9cc1fb4782db7acf
Reviewed-on: https://chromium-review.googlesource.com/1240119Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56240}
parent 980a9602
......@@ -5,14 +5,14 @@
#ifndef V8_BASE_ADDRESS_REGION_H_
#define V8_BASE_ADDRESS_REGION_H_
#include <type_traits>
#include <iostream>
#include "src/base/macros.h"
namespace v8 {
namespace base {
// Helper class representing an address region of certian size.
// Helper class representing an address region of certain size.
class AddressRegion {
public:
typedef uintptr_t Address;
......@@ -41,14 +41,28 @@ class AddressRegion {
return (offset < size_) && (offset <= size_ - size);
}
bool contains(const AddressRegion& region) const {
bool contains(AddressRegion region) const {
return contains(region.address_, region.size_);
}
bool operator==(AddressRegion other) const {
return address_ == other.address_ && size_ == other.size_;
}
bool operator!=(AddressRegion other) const {
return address_ != other.address_ || size_ != other.size_;
}
private:
Address address_ = 0;
size_t size_ = 0;
};
ASSERT_TRIVIALLY_COPYABLE(AddressRegion);
inline std::ostream& operator<<(std::ostream& out, AddressRegion region) {
return out << "[" << reinterpret_cast<void*>(region.begin()) << "+"
<< region.size() << "]";
}
} // namespace base
} // namespace v8
......
......@@ -44,53 +44,56 @@ struct WasmCodeUniquePtrComparator {
} // namespace
void DisjointAllocationPool::Merge(AddressRange range) {
auto dest_it = ranges_.begin();
auto dest_end = ranges_.end();
void DisjointAllocationPool::Merge(base::AddressRegion region) {
auto dest_it = regions_.begin();
auto dest_end = regions_.end();
// Skip over dest ranges strictly before {range}.
while (dest_it != dest_end && dest_it->end < range.start) ++dest_it;
// Skip over dest regions strictly before {region}.
while (dest_it != dest_end && dest_it->end() < region.begin()) ++dest_it;
// After last dest range: insert and done.
// After last dest region: insert and done.
if (dest_it == dest_end) {
ranges_.push_back(range);
regions_.push_back(region);
return;
}
// Adjacent (from below) to dest: merge and done.
if (dest_it->start == range.end) {
dest_it->start = range.start;
if (dest_it->begin() == region.end()) {
base::AddressRegion merged_region{region.begin(),
region.size() + dest_it->size()};
DCHECK_EQ(merged_region.end(), dest_it->end());
*dest_it = merged_region;
return;
}
// Before dest: insert and done.
if (dest_it->start > range.end) {
ranges_.insert(dest_it, range);
if (dest_it->begin() > region.end()) {
regions_.insert(dest_it, region);
return;
}
// Src is adjacent from above. Merge and check whether the merged range is now
// adjacent to the next range.
DCHECK_EQ(dest_it->end, range.start);
dest_it->end = range.end;
// Src is adjacent from above. Merge and check whether the merged region is
// now adjacent to the next region.
DCHECK_EQ(dest_it->end(), region.begin());
dest_it->set_size(dest_it->size() + region.size());
DCHECK_EQ(dest_it->end(), region.end());
auto next_dest = dest_it;
++next_dest;
if (next_dest != dest_end && dest_it->end == next_dest->start) {
dest_it->end = next_dest->end;
ranges_.erase(next_dest);
if (next_dest != dest_end && dest_it->end() == next_dest->begin()) {
dest_it->set_size(dest_it->size() + next_dest->size());
DCHECK_EQ(dest_it->end(), next_dest->end());
regions_.erase(next_dest);
}
}
AddressRange DisjointAllocationPool::Allocate(size_t size) {
for (auto it = ranges_.begin(), end = ranges_.end(); it != end; ++it) {
size_t range_size = it->size();
if (size > range_size) continue;
AddressRange ret{it->start, it->start + size};
if (size == range_size) {
ranges_.erase(it);
base::AddressRegion DisjointAllocationPool::Allocate(size_t size) {
for (auto it = regions_.begin(), end = regions_.end(); it != end; ++it) {
if (size > it->size()) continue;
base::AddressRegion ret{it->begin(), size};
if (size == it->size()) {
regions_.erase(it);
} else {
it->start += size;
DCHECK_LT(it->start, it->end);
*it = base::AddressRegion{it->begin() + size, it->size() - size};
}
return ret;
}
......@@ -325,7 +328,7 @@ NativeModule::NativeModule(Isolate* isolate, const WasmFeatures& enabled,
: enabled_features_(enabled),
module_(std::move(module)),
compilation_state_(NewCompilationState(isolate, env)),
free_code_space_({code_space.address(), code_space.end()}),
free_code_space_(code_space.region()),
wasm_code_manager_(code_manager),
can_request_more_memory_(can_request_more),
use_trap_handler_(env.use_trap_handler) {
......@@ -653,10 +656,11 @@ void NativeModule::InstallCode(WasmCode* code) {
}
Address NativeModule::AllocateForCode(size_t size) {
DCHECK_LT(0, size);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
// This happens under a lock assumed by the caller.
size = RoundUp(size, kCodeAlignment);
AddressRange mem = free_code_space_.Allocate(size);
base::AddressRegion mem = free_code_space_.Allocate(size);
if (mem.is_empty()) {
if (!can_request_more_memory_) return kNullAddress;
......@@ -668,13 +672,14 @@ Address NativeModule::AllocateForCode(size_t size) {
if (!new_mem.IsReserved()) return kNullAddress;
wasm_code_manager_->AssignRanges(new_mem.address(), new_mem.end(), this);
free_code_space_.Merge({new_mem.address(), new_mem.end()});
free_code_space_.Merge(new_mem.region());
owned_code_space_.emplace_back(std::move(new_mem));
mem = free_code_space_.Allocate(size);
DCHECK(!mem.is_empty());
}
Address commit_start = RoundUp(mem.start, page_allocator->AllocatePageSize());
Address commit_end = RoundUp(mem.end, page_allocator->AllocatePageSize());
Address commit_start =
RoundUp(mem.begin(), page_allocator->AllocatePageSize());
Address commit_end = RoundUp(mem.end(), page_allocator->AllocatePageSize());
// {commit_start} will be either mem.start or the start of the next page.
// {commit_end} will be the start of the page after the one in which
// the allocation ends.
......@@ -685,7 +690,7 @@ Address NativeModule::AllocateForCode(size_t size) {
// The end needs to be committed all through the end of the page.
if (commit_start < commit_end) {
#if V8_OS_WIN
// On Windows, we cannot commit a range that straddles different
// On Windows, we cannot commit a region that straddles different
// reservations of virtual memory. Because we bump-allocate, and because, if
// we need more memory, we append that memory at the end of the
// owned_code_space_ list, we traverse that list in reverse order to find
......@@ -712,10 +717,10 @@ Address NativeModule::AllocateForCode(size_t size) {
committed_code_space_.fetch_add(commit_size);
#endif
}
DCHECK(IsAligned(mem.start, kCodeAlignment));
DCHECK(IsAligned(mem.begin(), kCodeAlignment));
allocated_code_space_.Merge(mem);
TRACE_HEAP("Code alloc for %p: %" PRIuPTR ",+%zu\n", this, mem.start, size);
return mem.start;
TRACE_HEAP("Code alloc for %p: %" PRIuPTR ",+%zu\n", this, mem.begin(), size);
return mem.begin();
}
WasmCode* NativeModule::Lookup(Address pc) const {
......@@ -976,18 +981,18 @@ bool NativeModule::SetExecutable(bool executable) {
return true;
}
#endif
for (auto& range : allocated_code_space_.ranges()) {
for (auto& region : allocated_code_space_.regions()) {
// allocated_code_space_ is fine-grained, so we need to
// page-align it.
size_t range_size =
RoundUp(range.size(), page_allocator->AllocatePageSize());
if (!SetPermissions(page_allocator, range.start, range_size,
size_t region_size =
RoundUp(region.size(), page_allocator->AllocatePageSize());
if (!SetPermissions(page_allocator, region.begin(), region_size,
permission)) {
return false;
}
TRACE_HEAP("Set %p:%p to executable:%d\n",
reinterpret_cast<void*>(range.start),
reinterpret_cast<void*>(range.end), executable);
reinterpret_cast<void*>(region.begin()),
reinterpret_cast<void*>(region.end()), executable);
}
}
is_executable_ = executable;
......@@ -1035,12 +1040,12 @@ NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {
auto iter = lookup_map_.upper_bound(pc);
if (iter == lookup_map_.begin()) return nullptr;
--iter;
Address range_start = iter->first;
Address range_end = iter->second.first;
Address region_start = iter->first;
Address region_end = iter->second.first;
NativeModule* candidate = iter->second.second;
DCHECK_NOT_NULL(candidate);
return range_start <= pc && pc < range_end ? candidate : nullptr;
return region_start <= pc && pc < region_end ? candidate : nullptr;
}
WasmCode* WasmCodeManager::LookupCode(Address pc) const {
......
......@@ -33,47 +33,34 @@ class WasmCodeManager;
class WasmMemoryTracker;
struct WasmModule;
struct AddressRange {
Address start;
Address end;
AddressRange(Address s, Address e) : start(s), end(e) {
DCHECK_LE(start, end);
DCHECK_IMPLIES(start == kNullAddress, end == kNullAddress);
}
AddressRange() : AddressRange(kNullAddress, kNullAddress) {}
size_t size() const { return static_cast<size_t>(end - start); }
bool is_empty() const { return start == end; }
operator bool() const { return start == kNullAddress; }
};
// Sorted, disjoint and non-overlapping memory ranges. A range is of the
// Sorted, disjoint and non-overlapping memory regions. A region is of the
// form [start, end). So there's no [start, end), [end, other_end),
// because that should have been reduced to [start, other_end).
class V8_EXPORT_PRIVATE DisjointAllocationPool final {
public:
DisjointAllocationPool() = default;
explicit DisjointAllocationPool(AddressRange range) : ranges_({range}) {}
explicit DisjointAllocationPool(base::AddressRegion region)
: regions_({region}) {}
DisjointAllocationPool(DisjointAllocationPool&& other) = default;
DisjointAllocationPool& operator=(DisjointAllocationPool&& other) = default;
// Merge the parameter range into this object while preserving ordering of the
// ranges. The assumption is that the passed parameter is not intersecting
// this object - for example, it was obtained from a previous Allocate.
void Merge(AddressRange);
// Merge the parameter region into this object while preserving ordering of
// the regions. The assumption is that the passed parameter is not
// intersecting this object - for example, it was obtained from a previous
// Allocate.
void Merge(base::AddressRegion);
// Allocate a contiguous range of size {size}. Return an empty pool on
// Allocate a contiguous region of size {size}. Return an empty pool on
// failure.
AddressRange Allocate(size_t size);
base::AddressRegion Allocate(size_t size);
bool IsEmpty() const { return ranges_.empty(); }
const std::list<AddressRange>& ranges() const { return ranges_; }
bool IsEmpty() const { return regions_.empty(); }
const std::list<base::AddressRegion>& regions() const { return regions_; }
private:
std::list<AddressRange> ranges_;
std::list<base::AddressRegion> regions_;
DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool)
};
......
......@@ -17,35 +17,34 @@ namespace wasm_heap_unittest {
class DisjointAllocationPoolTest : public ::testing::Test {
public:
Address A(size_t n) { return static_cast<Address>(n); }
void CheckPool(const DisjointAllocationPool& mem,
std::initializer_list<AddressRange> expected_ranges);
void CheckRange(AddressRange range1, AddressRange range2);
DisjointAllocationPool Make(std::initializer_list<AddressRange> ranges);
std::initializer_list<base::AddressRegion> expected_regions);
void CheckRange(base::AddressRegion region1, base::AddressRegion region2);
DisjointAllocationPool Make(
std::initializer_list<base::AddressRegion> regions);
};
void DisjointAllocationPoolTest::CheckPool(
const DisjointAllocationPool& mem,
std::initializer_list<AddressRange> expected_ranges) {
const auto& ranges = mem.ranges();
CHECK_EQ(ranges.size(), expected_ranges.size());
auto iter = expected_ranges.begin();
for (auto it = ranges.begin(), e = ranges.end(); it != e; ++it, ++iter) {
std::initializer_list<base::AddressRegion> expected_regions) {
const auto& regions = mem.regions();
CHECK_EQ(regions.size(), expected_regions.size());
auto iter = expected_regions.begin();
for (auto it = regions.begin(), e = regions.end(); it != e; ++it, ++iter) {
CHECK_EQ(*it, *iter);
}
}
void DisjointAllocationPoolTest::CheckRange(AddressRange range1,
AddressRange range2) {
CHECK_EQ(range1.start, range2.start);
CHECK_EQ(range1.end, range2.end);
void DisjointAllocationPoolTest::CheckRange(base::AddressRegion region1,
base::AddressRegion region2) {
CHECK_EQ(region1, region2);
}
DisjointAllocationPool DisjointAllocationPoolTest::Make(
std::initializer_list<AddressRange> ranges) {
std::initializer_list<base::AddressRegion> regions) {
DisjointAllocationPool ret;
for (auto& range : ranges) {
ret.Merge(range);
for (auto& region : regions) {
ret.Merge(region);
}
return ret;
}
......@@ -54,89 +53,89 @@ TEST_F(DisjointAllocationPoolTest, ConstructEmpty) {
DisjointAllocationPool a;
CHECK(a.IsEmpty());
CheckPool(a, {});
a.Merge({1, 5});
CheckPool(a, {{1, 5}});
a.Merge({1, 4});
CheckPool(a, {{1, 4}});
}
TEST_F(DisjointAllocationPoolTest, ConstructWithRange) {
DisjointAllocationPool a({1, 5});
DisjointAllocationPool a({1, 4});
CHECK(!a.IsEmpty());
CheckPool(a, {{1, 5}});
CheckPool(a, {{1, 4}});
}
TEST_F(DisjointAllocationPoolTest, SimpleExtract) {
DisjointAllocationPool a = Make({{1, 5}});
AddressRange b = a.Allocate(2);
CheckPool(a, {{3, 5}});
CheckRange(b, {1, 3});
DisjointAllocationPool a = Make({{1, 4}});
base::AddressRegion b = a.Allocate(2);
CheckPool(a, {{3, 2}});
CheckRange(b, {1, 2});
a.Merge(b);
CheckPool(a, {{1, 5}});
CHECK_EQ(a.ranges().size(), 1);
CHECK_EQ(a.ranges().front().start, A(1));
CHECK_EQ(a.ranges().front().end, A(5));
CheckPool(a, {{1, 4}});
CHECK_EQ(a.regions().size(), 1);
CHECK_EQ(a.regions().front().begin(), 1);
CHECK_EQ(a.regions().front().end(), 5);
}
TEST_F(DisjointAllocationPoolTest, ExtractAll) {
DisjointAllocationPool a({A(1), A(5)});
AddressRange b = a.Allocate(4);
CheckRange(b, {1, 5});
DisjointAllocationPool a({1, 4});
base::AddressRegion b = a.Allocate(4);
CheckRange(b, {1, 4});
CHECK(a.IsEmpty());
a.Merge(b);
CheckPool(a, {{1, 5}});
CheckPool(a, {{1, 4}});
}
TEST_F(DisjointAllocationPoolTest, FailToExtract) {
DisjointAllocationPool a = Make({{1, 5}});
AddressRange b = a.Allocate(5);
CheckPool(a, {{1, 5}});
DisjointAllocationPool a = Make({{1, 4}});
base::AddressRegion b = a.Allocate(5);
CheckPool(a, {{1, 4}});
CHECK(b.is_empty());
}
TEST_F(DisjointAllocationPoolTest, FailToExtractExact) {
DisjointAllocationPool a = Make({{1, 5}, {10, 14}});
AddressRange b = a.Allocate(5);
CheckPool(a, {{1, 5}, {10, 14}});
DisjointAllocationPool a = Make({{1, 4}, {10, 4}});
base::AddressRegion b = a.Allocate(5);
CheckPool(a, {{1, 4}, {10, 4}});
CHECK(b.is_empty());
}
TEST_F(DisjointAllocationPoolTest, ExtractExact) {
DisjointAllocationPool a = Make({{1, 5}, {10, 15}});
AddressRange b = a.Allocate(5);
CheckPool(a, {{1, 5}});
CheckRange(b, {10, 15});
DisjointAllocationPool a = Make({{1, 4}, {10, 5}});
base::AddressRegion b = a.Allocate(5);
CheckPool(a, {{1, 4}});
CheckRange(b, {10, 5});
}
TEST_F(DisjointAllocationPoolTest, Merging) {
DisjointAllocationPool a = Make({{10, 15}, {20, 25}});
a.Merge({15, 20});
CheckPool(a, {{10, 25}});
DisjointAllocationPool a = Make({{10, 5}, {20, 5}});
a.Merge({15, 5});
CheckPool(a, {{10, 15}});
}
TEST_F(DisjointAllocationPoolTest, MergingMore) {
DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
a.Merge({15, 20});
a.Merge({25, 30});
CheckPool(a, {{10, 35}});
DisjointAllocationPool a = Make({{10, 5}, {20, 5}, {30, 5}});
a.Merge({15, 5});
a.Merge({25, 5});
CheckPool(a, {{10, 25}});
}
TEST_F(DisjointAllocationPoolTest, MergingSkip) {
DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
a.Merge({25, 30});
CheckPool(a, {{10, 15}, {20, 35}});
DisjointAllocationPool a = Make({{10, 5}, {20, 5}, {30, 5}});
a.Merge({25, 5});
CheckPool(a, {{10, 5}, {20, 15}});
}
TEST_F(DisjointAllocationPoolTest, MergingSkipLargerSrc) {
DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
a.Merge({25, 30});
a.Merge({35, 40});
CheckPool(a, {{10, 15}, {20, 40}});
DisjointAllocationPool a = Make({{10, 5}, {20, 5}, {30, 5}});
a.Merge({25, 5});
a.Merge({35, 5});
CheckPool(a, {{10, 5}, {20, 20}});
}
TEST_F(DisjointAllocationPoolTest, MergingSkipLargerSrcWithGap) {
DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
a.Merge({25, 30});
a.Merge({36, 40});
CheckPool(a, {{10, 15}, {20, 35}, {36, 40}});
DisjointAllocationPool a = Make({{10, 5}, {20, 5}, {30, 5}});
a.Merge({25, 5});
a.Merge({36, 4});
CheckPool(a, {{10, 5}, {20, 15}, {36, 4}});
}
enum ModuleStyle : int { Fixed = 0, Growable = 1 };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment