Commit b3a1adc6 authored by yangguo's avatar yangguo Committed by Commit bot

[heap] Fix skip list for deserialized code objects.

R=hpayer@chromium.org

Review URL: https://codereview.chromium.org/1816463002

Cr-Commit-Position: refs/heads/master@{#34895}
parent 1e2d0e11
......@@ -1158,7 +1158,9 @@ bool Heap::ReserveSpace(Reservation* reservations) {
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRawUnaligned(size);
} else {
allocation = paged_space(space)->AllocateRawUnaligned(size);
// The deserializer will update the skip list.
allocation = paged_space(space)->AllocateRawUnaligned(
size, PagedSpace::IGNORE_SKIP_LIST);
}
HeapObject* free_space = nullptr;
if (allocation.To(&free_space)) {
......
......@@ -498,7 +498,8 @@ HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
// Raw allocation.
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult PagedSpace::AllocateRawUnaligned(
int size_in_bytes, UpdateSkipList update_skip_list) {
HeapObject* object = AllocateLinearly(size_in_bytes);
if (object == NULL) {
......@@ -509,7 +510,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
}
if (object != NULL) {
if (identity() == CODE_SPACE) {
if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
......
......@@ -1200,7 +1200,14 @@ class SkipList {
int start_region = RegionNumber(addr);
int end_region = RegionNumber(addr + size - kPointerSize);
for (int idx = start_region; idx <= end_region; idx++) {
if (starts_[idx] > addr) starts_[idx] = addr;
if (starts_[idx] > addr) {
starts_[idx] = addr;
} else {
// In the first region, there may already be an object closer to the
// start of the region. Do not change the start in that case. If this
// is not the first region, you probably added overlapping objects.
DCHECK_EQ(start_region, idx);
}
}
}
......@@ -2038,10 +2045,13 @@ class PagedSpace : public Space {
return allocation_info_.limit_address();
}
enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
// failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
// to be manually updated later.
MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
int size_in_bytes);
int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
int size_in_bytes);
......
......@@ -425,6 +425,7 @@ Address Deserializer::Allocate(int space_index, int size) {
int chunk_index = current_chunk_[space_index];
CHECK_LE(high_water_[space_index], reservation[chunk_index].end);
#endif
if (space_index == CODE_SPACE) SkipList::Update(address, size);
return address;
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment