Commit cd4275aa authored by mstarzinger's avatar mstarzinger Committed by Commit bot

Fix CodeFlusher::ProcessOptimizedCodeMaps stale fields.

This makes sure that all pointer fields in an entry in the optimized
code map are considered weak, not just the code object. If just one
field of an entry dies, then the whole entry dies.

R=hpayer@chromium.org

Review URL: https://codereview.chromium.org/1217813013

Cr-Commit-Position: refs/heads/master@{#29417}
parent 549749d3
......@@ -950,25 +950,40 @@ void CodeFlusher::ProcessOptimizedCodeMaps() {
int old_length = code_map->length();
for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
i += SharedFunctionInfo::kEntryLength) {
// Each entry contains [ context, code, literals, ast-id ] as fields.
STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
Context* context =
Context::cast(code_map->get(i + SharedFunctionInfo::kContextOffset));
Code* code =
Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
FixedArray* literals = FixedArray::cast(
code_map->get(i + SharedFunctionInfo::kLiteralsOffset));
Smi* ast_id =
Smi::cast(code_map->get(i + SharedFunctionInfo::kOsrAstIdOffset));
if (Marking::IsWhite(Marking::MarkBitFrom(context))) continue;
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(context)));
if (Marking::IsWhite(Marking::MarkBitFrom(code))) continue;
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(code)));
// Move every slot in the entry.
for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
int dst_index = new_length++;
Object** slot = code_map->RawFieldOfElementAt(dst_index);
Object* object = code_map->get(i + j);
code_map->set(dst_index, object);
if (j == SharedFunctionInfo::kOsrAstIdOffset) {
DCHECK(object->IsSmi());
} else {
DCHECK(
Marking::IsBlack(Marking::MarkBitFrom(HeapObject::cast(*slot))));
isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot,
*slot);
}
}
if (Marking::IsWhite(Marking::MarkBitFrom(literals))) continue;
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(literals)));
// Move every slot in the entry and record slots when needed.
code_map->set(new_length + SharedFunctionInfo::kCachedCodeOffset, code);
code_map->set(new_length + SharedFunctionInfo::kContextOffset, context);
code_map->set(new_length + SharedFunctionInfo::kLiteralsOffset, literals);
code_map->set(new_length + SharedFunctionInfo::kOsrAstIdOffset, ast_id);
Object** code_slot = code_map->RawFieldOfElementAt(
new_length + SharedFunctionInfo::kCachedCodeOffset);
isolate_->heap()->mark_compact_collector()->RecordSlot(
code_slot, code_slot, *code_slot);
Object** context_slot = code_map->RawFieldOfElementAt(
new_length + SharedFunctionInfo::kContextOffset);
isolate_->heap()->mark_compact_collector()->RecordSlot(
context_slot, context_slot, *context_slot);
Object** literals_slot = code_map->RawFieldOfElementAt(
new_length + SharedFunctionInfo::kLiteralsOffset);
isolate_->heap()->mark_compact_collector()->RecordSlot(
literals_slot, literals_slot, *literals_slot);
new_length += SharedFunctionInfo::kEntryLength;
}
// Process context-independent entry in the optimized code map.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment