Commit d5d116af authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[wasm] [cleanup] Refactor and clean up {DisjointAllocationPool}

Since we never extract pools from a {DisjointAllocationPool}, the
{Allocate} method can just return an {AddressRange}, and also {Merge}
just needs to merge a single {AddressRange}.

Drive-by: Make {AddressRange} a proper struct, for DCHECKs and better
accessors.

R=mstarzinger@chromium.org

Bug: v8:7754
Change-Id: I19fd02b2c6d8eb5316a5e994835b89be9cfa792b
Reviewed-on: https://chromium-review.googlesource.com/1090723
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53610}
parent 699a91f2
......@@ -91,91 +91,57 @@ void RelocateCode(WasmCode* code, const WasmCode* orig,
} // namespace
DisjointAllocationPool::DisjointAllocationPool(Address start, Address end) {
ranges_.push_back({start, end});
}
void DisjointAllocationPool::Merge(DisjointAllocationPool&& other) {
void DisjointAllocationPool::Merge(AddressRange range) {
auto dest_it = ranges_.begin();
auto dest_end = ranges_.end();
for (auto src_it = other.ranges_.begin(), src_end = other.ranges_.end();
src_it != src_end;) {
// Skip over dest ranges strictly before {range}.
while (dest_it != dest_end && dest_it->end < range.start) ++dest_it;
// After last dest range: insert and done.
if (dest_it == dest_end) {
// everything else coming from src will be inserted
// at the back of ranges_ from now on.
ranges_.push_back(*src_it);
++src_it;
continue;
}
// Before or adjacent to dest. Insert or merge, and advance
// just src.
if (dest_it->first >= src_it->second) {
if (dest_it->first == src_it->second) {
dest_it->first = src_it->first;
} else {
ranges_.insert(dest_it, {src_it->first, src_it->second});
}
++src_it;
continue;
}
// Src is strictly after dest. Skip over this dest.
if (dest_it->second < src_it->first) {
++dest_it;
continue;
}
// Src is adjacent from above. Merge and advance
// just src, because the next src, if any, is bound to be
// strictly above the newly-formed range.
DCHECK_EQ(dest_it->second, src_it->first);
dest_it->second = src_it->second;
++src_it;
// Now that we merged, maybe this new range is adjacent to
// the next. Since we assume src to have come from the
// same original memory pool, it follows that the next src
// must be above or adjacent to the new bubble.
ranges_.push_back(range);
return;
}
// Adjacent (from below) to dest: merge and done.
if (dest_it->start == range.end) {
dest_it->start = range.start;
return;
}
// Before dest: insert and done.
if (dest_it->start > range.end) {
ranges_.insert(dest_it, range);
return;
}
// Src is adjacent from above. Merge and check whether the merged range is now
// adjacent to the next range.
DCHECK_EQ(dest_it->end, range.start);
dest_it->end = range.end;
auto next_dest = dest_it;
++next_dest;
if (next_dest != dest_end && dest_it->second == next_dest->first) {
dest_it->second = next_dest->second;
if (next_dest != dest_end && dest_it->end == next_dest->start) {
dest_it->end = next_dest->end;
ranges_.erase(next_dest);
}
// src_it points now at the next, if any, src
DCHECK_IMPLIES(src_it != src_end, src_it->first >= dest_it->second);
}
}
DisjointAllocationPool DisjointAllocationPool::Extract(size_t size,
ExtractionMode mode) {
DisjointAllocationPool ret;
for (auto it = ranges_.begin(), end = ranges_.end(); it != end;) {
auto current = it;
++it;
DCHECK_LT(current->first, current->second);
size_t current_size = static_cast<size_t>(current->second - current->first);
if (size == current_size) {
ret.ranges_.push_back(*current);
ranges_.erase(current);
return ret;
AddressRange DisjointAllocationPool::Allocate(size_t size) {
for (auto it = ranges_.begin(), end = ranges_.end(); it != end; ++it) {
size_t range_size = it->size();
if (size > range_size) continue;
AddressRange ret{it->start, it->start + size};
if (size == range_size) {
ranges_.erase(it);
} else {
it->start += size;
DCHECK_LT(it->start, it->end);
}
if (size < current_size) {
ret.ranges_.push_back({current->first, current->first + size});
current->first += size;
DCHECK(current->first < current->second);
return ret;
}
if (mode != kContiguous) {
size -= current_size;
ret.ranges_.push_back(*current);
ranges_.erase(current);
}
}
if (size > 0) {
Merge(std::move(ret));
return {};
}
return ret;
}
Address WasmCode::constant_pool() const {
......@@ -367,7 +333,7 @@ NativeModule::NativeModule(uint32_t num_functions, uint32_t num_imports,
num_imported_functions_(num_imports),
compilation_state_(NewCompilationState(
reinterpret_cast<Isolate*>(code_manager->isolate_), env)),
free_code_space_(code_space->address(), code_space->end()),
free_code_space_({code_space->address(), code_space->end()}),
wasm_code_manager_(code_manager),
can_request_more_memory_(can_request_more),
use_trap_handler_(env.use_trap_handler) {
......@@ -675,10 +641,10 @@ Address NativeModule::GetLocalAddressFor(Handle<Code> code) {
}
Address NativeModule::AllocateForCode(size_t size) {
// this happens under a lock assumed by the caller.
// This happens under a lock assumed by the caller.
size = RoundUp(size, kCodeAlignment);
DisjointAllocationPool mem = free_code_space_.Allocate(size);
if (mem.IsEmpty()) {
AddressRange mem = free_code_space_.Allocate(size);
if (mem.is_empty()) {
if (!can_request_more_memory_) return kNullAddress;
Address hint = owned_code_space_.empty() ? kNullAddress
......@@ -689,18 +655,15 @@ Address NativeModule::AllocateForCode(size_t size) {
wasm_code_manager_->TryAllocate(size, &new_mem,
reinterpret_cast<void*>(hint));
if (!new_mem.IsReserved()) return kNullAddress;
DisjointAllocationPool mem_pool(new_mem.address(), new_mem.end());
wasm_code_manager_->AssignRanges(new_mem.address(), new_mem.end(), this);
free_code_space_.Merge(std::move(mem_pool));
free_code_space_.Merge({new_mem.address(), new_mem.end()});
mem = free_code_space_.Allocate(size);
if (mem.IsEmpty()) return kNullAddress;
if (mem.is_empty()) return kNullAddress;
}
Address ret = mem.ranges().front().first;
Address end = ret + size;
Address commit_start = RoundUp(ret, AllocatePageSize());
Address commit_end = RoundUp(end, AllocatePageSize());
// {commit_start} will be either ret or the start of the next page.
Address commit_start = RoundUp(mem.start, AllocatePageSize());
Address commit_end = RoundUp(mem.end, AllocatePageSize());
// {commit_start} will be either mem.start or the start of the next page.
// {commit_end} will be the start of the page after the one in which
// the allocation ends.
// We start from an aligned start, and we know we allocated vmem in
......@@ -737,11 +700,11 @@ Address NativeModule::AllocateForCode(size_t size) {
committed_code_space_ += commit_size;
#endif
}
DCHECK(IsAligned(ret, kCodeAlignment));
DCHECK(IsAligned(mem.start, kCodeAlignment));
allocated_code_space_.Merge(std::move(mem));
TRACE_HEAP("ID: %zu. Code alloc: %p,+%zu\n", instance_id,
reinterpret_cast<void*>(ret), size);
return ret;
reinterpret_cast<void*>(mem.start), size);
return mem.start;
}
WasmCode* NativeModule::Lookup(Address pc) {
......@@ -1002,14 +965,13 @@ bool NativeModule::SetExecutable(bool executable) {
for (auto& range : allocated_code_space_.ranges()) {
// allocated_code_space_ is fine-grained, so we need to
// page-align it.
size_t range_size = RoundUp(
static_cast<size_t>(range.second - range.first), AllocatePageSize());
if (!SetPermissions(range.first, range_size, permission)) {
size_t range_size = RoundUp(range.size(), AllocatePageSize());
if (!SetPermissions(range.start, range_size, permission)) {
return false;
}
TRACE_HEAP("Set %p:%p to executable:%d\n",
reinterpret_cast<void*>(range.first),
reinterpret_cast<void*>(range.second), executable);
reinterpret_cast<void*>(range.start),
reinterpret_cast<void*>(range.end), executable);
}
}
is_executable_ = executable;
......
......@@ -36,47 +36,46 @@ struct WasmModule;
FOREACH_WASM_TRAPREASON(VTRAP) \
V(WasmStackGuard)
struct AddressRange {
Address start;
Address end;
AddressRange(Address s, Address e) : start(s), end(e) {
DCHECK_LE(start, end);
DCHECK_IMPLIES(start == kNullAddress, end == kNullAddress);
}
AddressRange() : AddressRange(kNullAddress, kNullAddress) {}
size_t size() const { return static_cast<size_t>(end - start); }
bool is_empty() const { return start == end; }
operator bool() const { return start == kNullAddress; }
};
// Sorted, disjoint and non-overlapping memory ranges. A range is of the
// form [start, end). So there's no [start, end), [end, other_end),
// because that should have been reduced to [start, other_end).
using AddressRange = std::pair<Address, Address>;
class V8_EXPORT_PRIVATE DisjointAllocationPool final {
public:
enum ExtractionMode : bool { kAny = false, kContiguous = true };
DisjointAllocationPool() {}
DisjointAllocationPool() = default;
explicit DisjointAllocationPool(Address, Address);
explicit DisjointAllocationPool(AddressRange range) : ranges_({range}) {}
DisjointAllocationPool(DisjointAllocationPool&& other) = default;
DisjointAllocationPool& operator=(DisjointAllocationPool&& other) = default;
// Merge the ranges of the parameter into this object. Ordering is
// preserved. The assumption is that the passed parameter is
// not intersecting this object - for example, it was obtained
// from a previous Allocate{Pool}.
void Merge(DisjointAllocationPool&&);
// Merge the parameter range into this object while preserving ordering of the
// ranges. The assumption is that the passed parameter is not intersecting
// this object - for example, it was obtained from a previous Allocate.
void Merge(AddressRange);
// Allocate a contiguous range of size {size}. Return an empty pool on
// failure.
DisjointAllocationPool Allocate(size_t size) {
return Extract(size, kContiguous);
}
// Allocate a sub-pool of size {size}. Return an empty pool on failure.
DisjointAllocationPool AllocatePool(size_t size) {
return Extract(size, kAny);
}
AddressRange Allocate(size_t size);
bool IsEmpty() const { return ranges_.empty(); }
const std::list<AddressRange>& ranges() const { return ranges_; }
private:
// Extract out a total of {size}. By default, the return may
// be more than one range. If kContiguous is passed, the return
// will be one range. If the operation fails, this object is
// unchanged, and the return {IsEmpty()}
DisjointAllocationPool Extract(size_t size, ExtractionMode mode);
std::list<AddressRange> ranges_;
DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool)
......
......@@ -18,6 +18,8 @@ class DisjointAllocationPoolTest : public ::testing::Test {
Address A(size_t n) { return static_cast<Address>(n); }
void CheckLooksLike(const DisjointAllocationPool& mem,
std::vector<std::pair<size_t, size_t>> expectation);
void CheckLooksLike(AddressRange range,
std::pair<size_t, size_t> expectation);
DisjointAllocationPool Make(std::vector<std::pair<size_t, size_t>> model);
};
......@@ -28,127 +30,111 @@ void DisjointAllocationPoolTest::CheckLooksLike(
CHECK_EQ(ranges.size(), expectation.size());
auto iter = expectation.begin();
for (auto it = ranges.begin(), e = ranges.end(); it != e; ++it, ++iter) {
CHECK_EQ(it->first, A(iter->first));
CHECK_EQ(it->second, A(iter->second));
CheckLooksLike(*it, *iter);
}
}
void DisjointAllocationPoolTest::CheckLooksLike(
AddressRange range, std::pair<size_t, size_t> expectation) {
CHECK_EQ(range.start, A(expectation.first));
CHECK_EQ(range.end, A(expectation.second));
}
DisjointAllocationPool DisjointAllocationPoolTest::Make(
std::vector<std::pair<size_t, size_t>> model) {
DisjointAllocationPool ret;
for (auto& pair : model) {
ret.Merge(DisjointAllocationPool(A(pair.first), A(pair.second)));
ret.Merge({A(pair.first), A(pair.second)});
}
return ret;
}
TEST_F(DisjointAllocationPoolTest, Construct) {
TEST_F(DisjointAllocationPoolTest, ConstructEmpty) {
DisjointAllocationPool a;
CHECK(a.IsEmpty());
CHECK_EQ(a.ranges().size(), 0);
DisjointAllocationPool b = Make({{1, 5}});
CHECK(!b.IsEmpty());
CHECK_EQ(b.ranges().size(), 1);
a.Merge(std::move(b));
CheckLooksLike(a, {});
a.Merge({1, 5});
CheckLooksLike(a, {{1, 5}});
DisjointAllocationPool c;
a.Merge(std::move(c));
}
TEST_F(DisjointAllocationPoolTest, ConstructWithRange) {
DisjointAllocationPool a({1, 5});
CHECK(!a.IsEmpty());
CheckLooksLike(a, {{1, 5}});
DisjointAllocationPool e, f;
e.Merge(std::move(f));
CHECK(e.IsEmpty());
}
TEST_F(DisjointAllocationPoolTest, SimpleExtract) {
DisjointAllocationPool a = Make({{1, 5}});
DisjointAllocationPool b = a.AllocatePool(2);
AddressRange b = a.Allocate(2);
CheckLooksLike(a, {{3, 5}});
CheckLooksLike(b, {{1, 3}});
a.Merge(std::move(b));
CheckLooksLike(b, {1, 3});
a.Merge(b);
CheckLooksLike(a, {{1, 5}});
CHECK_EQ(a.ranges().size(), 1);
CHECK_EQ(a.ranges().front().first, A(1));
CHECK_EQ(a.ranges().front().second, A(5));
CHECK_EQ(a.ranges().front().start, A(1));
CHECK_EQ(a.ranges().front().end, A(5));
}
TEST_F(DisjointAllocationPoolTest, ExtractAll) {
DisjointAllocationPool a(A(1), A(5));
DisjointAllocationPool b = a.AllocatePool(4);
CheckLooksLike(b, {{1, 5}});
DisjointAllocationPool a({A(1), A(5)});
AddressRange b = a.Allocate(4);
CheckLooksLike(b, {1, 5});
CHECK(a.IsEmpty());
a.Merge(std::move(b));
a.Merge(b);
CheckLooksLike(a, {{1, 5}});
}
TEST_F(DisjointAllocationPoolTest, ExtractAccross) {
DisjointAllocationPool a = Make({{1, 5}, {10, 20}});
DisjointAllocationPool b = a.AllocatePool(5);
CheckLooksLike(a, {{11, 20}});
CheckLooksLike(b, {{1, 5}, {10, 11}});
a.Merge(std::move(b));
CheckLooksLike(a, {{1, 5}, {10, 20}});
}
TEST_F(DisjointAllocationPoolTest, ReassembleOutOfOrder) {
DisjointAllocationPool a = Make({{1, 5}, {10, 15}});
DisjointAllocationPool b = Make({{7, 8}, {20, 22}});
a.Merge(std::move(b));
CheckLooksLike(a, {{1, 5}, {7, 8}, {10, 15}, {20, 22}});
DisjointAllocationPool c = Make({{1, 5}, {10, 15}});
DisjointAllocationPool d = Make({{7, 8}, {20, 22}});
d.Merge(std::move(c));
CheckLooksLike(d, {{1, 5}, {7, 8}, {10, 15}, {20, 22}});
}
TEST_F(DisjointAllocationPoolTest, FailToExtract) {
DisjointAllocationPool a = Make({{1, 5}});
DisjointAllocationPool b = a.AllocatePool(5);
AddressRange b = a.Allocate(5);
CheckLooksLike(a, {{1, 5}});
CHECK(b.IsEmpty());
CHECK(b.is_empty());
}
TEST_F(DisjointAllocationPoolTest, FailToExtractExact) {
DisjointAllocationPool a = Make({{1, 5}, {10, 14}});
DisjointAllocationPool b = a.Allocate(5);
AddressRange b = a.Allocate(5);
CheckLooksLike(a, {{1, 5}, {10, 14}});
CHECK(b.IsEmpty());
CHECK(b.is_empty());
}
TEST_F(DisjointAllocationPoolTest, ExtractExact) {
DisjointAllocationPool a = Make({{1, 5}, {10, 15}});
DisjointAllocationPool b = a.Allocate(5);
AddressRange b = a.Allocate(5);
CheckLooksLike(a, {{1, 5}});
CheckLooksLike(b, {{10, 15}});
CheckLooksLike(b, {10, 15});
}
TEST_F(DisjointAllocationPoolTest, Merging) {
DisjointAllocationPool a = Make({{10, 15}, {20, 25}});
a.Merge(Make({{15, 20}}));
a.Merge({15, 20});
CheckLooksLike(a, {{10, 25}});
}
TEST_F(DisjointAllocationPoolTest, MergingMore) {
DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
a.Merge(Make({{15, 20}, {25, 30}}));
a.Merge({15, 20});
a.Merge({25, 30});
CheckLooksLike(a, {{10, 35}});
}
TEST_F(DisjointAllocationPoolTest, MergingSkip) {
DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
a.Merge(Make({{25, 30}}));
a.Merge({25, 30});
CheckLooksLike(a, {{10, 15}, {20, 35}});
}
TEST_F(DisjointAllocationPoolTest, MergingSkipLargerSrc) {
DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
a.Merge(Make({{25, 30}, {35, 40}}));
a.Merge({25, 30});
a.Merge({35, 40});
CheckLooksLike(a, {{10, 15}, {20, 40}});
}
TEST_F(DisjointAllocationPoolTest, MergingSkipLargerSrcWithGap) {
DisjointAllocationPool a = Make({{10, 15}, {20, 25}, {30, 35}});
a.Merge(Make({{25, 30}, {36, 40}}));
a.Merge({25, 30});
a.Merge({36, 40});
CheckLooksLike(a, {{10, 15}, {20, 35}, {36, 40}});
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment