Commit 16f96fde authored by mstarzinger's avatar mstarzinger Committed by Commit bot

Make some foo.h headers usable without foo-inl.h header.

This CL is a pure refactoring that makes an empty compilation unit
including just "foo.h" but not "foo-inl.h" compile without warnings or
errors. This is needed to further reduce the header dependency tangle.

This realizes above state for "spaces.h" and "mark-compact.h".

R=hpayer@chromium.org

Review URL: https://codereview.chromium.org/1288413002

Cr-Commit-Position: refs/heads/master@{#30171}
parent c47d9d07
......@@ -17,6 +17,7 @@
#include "src/heap-profiler.h"
#include "src/isolate.h"
#include "src/list-inl.h"
#include "src/log.h"
#include "src/msan.h"
#include "src/objects.h"
......@@ -632,6 +633,27 @@ void ExternalStringTable::ShrinkNewStrings(int position) {
}
int DescriptorLookupCache::Lookup(Map* source, Name* name) {
if (!name->IsUniqueName()) return kAbsent;
int index = Hash(source, name);
Key& key = keys_[index];
if ((key.source == source) && (key.name == name)) return results_[index];
return kAbsent;
}
void DescriptorLookupCache::Update(Map* source, Name* name, int result) {
DCHECK(result != kAbsent);
if (name->IsUniqueName()) {
int index = Hash(source, name);
Key& key = keys_[index];
key.source = source;
key.name = name;
results_[index] = result;
}
}
void Heap::ClearInstanceofCache() {
set_instanceof_cache_function(Smi::FromInt(0));
}
......
......@@ -2629,25 +2629,10 @@ class DescriptorLookupCache {
public:
// Lookup descriptor index for (map, name).
// If absent, kAbsent is returned.
int Lookup(Map* source, Name* name) {
if (!name->IsUniqueName()) return kAbsent;
int index = Hash(source, name);
Key& key = keys_[index];
if ((key.source == source) && (key.name == name)) return results_[index];
return kAbsent;
}
inline int Lookup(Map* source, Name* name);
// Update an element in the cache.
void Update(Map* source, Name* name, int result) {
DCHECK(result != kAbsent);
if (name->IsUniqueName()) {
int index = Hash(source, name);
Key& key = keys_[index];
key.source = source;
key.name = name;
results_[index] = result;
}
}
inline void Update(Map* source, Name* name, int result);
// Clear the cache.
void Clear();
......
......@@ -61,7 +61,95 @@ void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
}
}
}
void CodeFlusher::AddCandidate(SharedFunctionInfo* shared_info) {
if (GetNextCandidate(shared_info) == NULL) {
SetNextCandidate(shared_info, shared_function_info_candidates_head_);
shared_function_info_candidates_head_ = shared_info;
}
}
void CodeFlusher::AddCandidate(JSFunction* function) {
DCHECK(function->code() == function->shared()->code());
if (GetNextCandidate(function)->IsUndefined()) {
SetNextCandidate(function, jsfunction_candidates_head_);
jsfunction_candidates_head_ = function;
}
}
void CodeFlusher::AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
optimized_code_map_holder_head_ = code_map_holder;
}
}
JSFunction** CodeFlusher::GetNextCandidateSlot(JSFunction* candidate) {
return reinterpret_cast<JSFunction**>(
HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
}
JSFunction* CodeFlusher::GetNextCandidate(JSFunction* candidate) {
Object* next_candidate = candidate->next_function_link();
return reinterpret_cast<JSFunction*>(next_candidate);
}
void CodeFlusher::SetNextCandidate(JSFunction* candidate,
JSFunction* next_candidate) {
candidate->set_next_function_link(next_candidate, UPDATE_WEAK_WRITE_BARRIER);
}
void CodeFlusher::ClearNextCandidate(JSFunction* candidate, Object* undefined) {
DCHECK(undefined->IsUndefined());
candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
}
SharedFunctionInfo* CodeFlusher::GetNextCandidate(
SharedFunctionInfo* candidate) {
Object* next_candidate = candidate->code()->gc_metadata();
return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
}
void CodeFlusher::SetNextCandidate(SharedFunctionInfo* candidate,
SharedFunctionInfo* next_candidate) {
candidate->code()->set_gc_metadata(next_candidate);
}
void CodeFlusher::ClearNextCandidate(SharedFunctionInfo* candidate) {
candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
}
} // namespace v8::internal
SharedFunctionInfo* CodeFlusher::GetNextCodeMap(SharedFunctionInfo* holder) {
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
return reinterpret_cast<SharedFunctionInfo*>(next_map);
}
void CodeFlusher::SetNextCodeMap(SharedFunctionInfo* holder,
SharedFunctionInfo* next_holder) {
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
}
void CodeFlusher::ClearNextCodeMap(SharedFunctionInfo* holder) {
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
}
} // namespace internal
} // namespace v8
#endif // V8_HEAP_MARK_COMPACT_INL_H_
......@@ -459,27 +459,9 @@ class CodeFlusher {
shared_function_info_candidates_head_(NULL),
optimized_code_map_holder_head_(NULL) {}
void AddCandidate(SharedFunctionInfo* shared_info) {
if (GetNextCandidate(shared_info) == NULL) {
SetNextCandidate(shared_info, shared_function_info_candidates_head_);
shared_function_info_candidates_head_ = shared_info;
}
}
void AddCandidate(JSFunction* function) {
DCHECK(function->code() == function->shared()->code());
if (GetNextCandidate(function)->IsUndefined()) {
SetNextCandidate(function, jsfunction_candidates_head_);
jsfunction_candidates_head_ = function;
}
}
void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
optimized_code_map_holder_head_ = code_map_holder;
}
}
inline void AddCandidate(SharedFunctionInfo* shared_info);
inline void AddCandidate(JSFunction* function);
inline void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
void EvictCandidate(SharedFunctionInfo* shared_info);
......@@ -507,57 +489,23 @@ class CodeFlusher {
void EvictJSFunctionCandidates();
void EvictSharedFunctionInfoCandidates();
static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
return reinterpret_cast<JSFunction**>(
HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
}
static JSFunction* GetNextCandidate(JSFunction* candidate) {
Object* next_candidate = candidate->next_function_link();
return reinterpret_cast<JSFunction*>(next_candidate);
}
static void SetNextCandidate(JSFunction* candidate,
JSFunction* next_candidate) {
candidate->set_next_function_link(next_candidate,
UPDATE_WEAK_WRITE_BARRIER);
}
static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
DCHECK(undefined->IsUndefined());
candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
}
static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
Object* next_candidate = candidate->code()->gc_metadata();
return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
}
static void SetNextCandidate(SharedFunctionInfo* candidate,
SharedFunctionInfo* next_candidate) {
candidate->code()->set_gc_metadata(next_candidate);
}
static void ClearNextCandidate(SharedFunctionInfo* candidate) {
candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
}
static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
return reinterpret_cast<SharedFunctionInfo*>(next_map);
}
static void SetNextCodeMap(SharedFunctionInfo* holder,
SharedFunctionInfo* next_holder) {
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
}
static void ClearNextCodeMap(SharedFunctionInfo* holder) {
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
}
static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
static inline JSFunction* GetNextCandidate(JSFunction* candidate);
static inline void SetNextCandidate(JSFunction* candidate,
JSFunction* next_candidate);
static inline void ClearNextCandidate(JSFunction* candidate,
Object* undefined);
static inline SharedFunctionInfo* GetNextCandidate(
SharedFunctionInfo* candidate);
static inline void SetNextCandidate(SharedFunctionInfo* candidate,
SharedFunctionInfo* next_candidate);
static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
static inline SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder);
static inline void SetNextCodeMap(SharedFunctionInfo* holder,
SharedFunctionInfo* next_holder);
static inline void ClearNextCodeMap(SharedFunctionInfo* holder);
Isolate* isolate_;
JSFunction* jsfunction_candidates_head_;
......
......@@ -12,6 +12,12 @@ namespace v8 {
namespace internal {
StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(Map* map) {
return GetVisitorId(map->instance_type(), map->instance_size(),
FLAG_unbox_double_fields && !map->HasFastPointerLayout());
}
StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
int instance_type, int instance_size, bool has_unboxed_fields) {
if (instance_type < FIRST_NONSTRING_TYPE) {
......
......@@ -110,11 +110,7 @@ class StaticVisitorBase : public AllStatic {
bool has_unboxed_fields);
// Determine which specialized visitor should be used for given map.
static VisitorId GetVisitorId(Map* map) {
return GetVisitorId(
map->instance_type(), map->instance_size(),
FLAG_unbox_double_fields && !map->HasFastPointerLayout());
}
static VisitorId GetVisitorId(Map* map);
// For visitors that allow specialization by size calculate VisitorId based
// on size, base visitor id and generic visitor id.
......
......@@ -28,7 +28,6 @@ void Bitmap::Clear(MemoryChunk* chunk) {
// -----------------------------------------------------------------------------
// PageIterator
PageIterator::PageIterator(PagedSpace* space)
: space_(space),
prev_page_(&space->anchor_),
......@@ -47,9 +46,32 @@ Page* PageIterator::next() {
// -----------------------------------------------------------------------------
// NewSpacePageIterator
// SemiSpaceIterator
HeapObject* SemiSpaceIterator::Next() {
if (current_ == limit_) return NULL;
if (NewSpacePage::IsAtEnd(current_)) {
NewSpacePage* page = NewSpacePage::FromLimit(current_);
page = page->next_page();
DCHECK(!page->is_anchor());
current_ = page->area_start();
if (current_ == limit_) return NULL;
}
HeapObject* object = HeapObject::FromAddress(current_);
int size = object->Size();
current_ += size;
return object;
}
HeapObject* SemiSpaceIterator::next_object() { return Next(); }
// -----------------------------------------------------------------------------
// NewSpacePageIterator
NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
: prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
......@@ -81,6 +103,19 @@ NewSpacePage* NewSpacePageIterator::next() {
// -----------------------------------------------------------------------------
// HeapObjectIterator
HeapObject* HeapObjectIterator::Next() {
do {
HeapObject* next_obj = FromCurrentPage();
if (next_obj != NULL) return next_obj;
} while (AdvanceToNextPage());
return NULL;
}
HeapObject* HeapObjectIterator::next_object() { return Next(); }
HeapObject* HeapObjectIterator::FromCurrentPage() {
while (cur_addr_ != cur_end_) {
if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
......@@ -137,8 +172,18 @@ void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
#endif
// --------------------------------------------------------------------------
// AllocationResult
AllocationSpace AllocationResult::RetrySpace() {
DCHECK(IsRetry());
return static_cast<AllocationSpace>(Smi::cast(object_)->value());
}
// --------------------------------------------------------------------------
// PagedSpace
Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
......@@ -161,6 +206,9 @@ bool PagedSpace::Contains(Address addr) {
}
bool PagedSpace::Contains(HeapObject* o) { return Contains(o->address()); }
void MemoryChunk::set_scan_on_scavenge(bool scan) {
if (scan) {
if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
......@@ -193,19 +241,6 @@ MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
}
void MemoryChunk::UpdateHighWaterMark(Address mark) {
if (mark == NULL) return;
// Need to subtract one from the mark because when a chunk is full the
// top points to the next address after the chunk, which effectively belongs
// to another chunk. See the comment to Page::FromAllocationTop.
MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
int new_mark = static_cast<int>(mark - chunk->address());
if (new_mark > chunk->high_water_mark_) {
chunk->high_water_mark_ = new_mark;
}
}
PointerChunkIterator::PointerChunkIterator(Heap* heap)
: state_(kOldSpaceState),
old_iterator_(heap->old_space()),
......@@ -213,15 +248,43 @@ PointerChunkIterator::PointerChunkIterator(Heap* heap)
lo_iterator_(heap->lo_space()) {}
Page* Page::next_page() {
DCHECK(next_chunk()->owner() == owner());
return static_cast<Page*>(next_chunk());
}
Page* Page::prev_page() {
DCHECK(prev_chunk()->owner() == owner());
return static_cast<Page*>(prev_chunk());
MemoryChunk* PointerChunkIterator::next() {
switch (state_) {
case kOldSpaceState: {
if (old_iterator_.has_next()) {
return old_iterator_.next();
}
state_ = kMapState;
// Fall through.
}
case kMapState: {
if (map_iterator_.has_next()) {
return map_iterator_.next();
}
state_ = kLargeObjectState;
// Fall through.
}
case kLargeObjectState: {
HeapObject* heap_object;
do {
heap_object = lo_iterator_.Next();
if (heap_object == NULL) {
state_ = kFinishedState;
return NULL;
}
// Fixed arrays are the only pointer-containing objects in large
// object space.
} while (!heap_object->IsFixedArray());
MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
return answer;
}
case kFinishedState:
return NULL;
default:
break;
}
UNREACHABLE();
return NULL;
}
......
......@@ -9,9 +9,10 @@
#include "src/base/atomicops.h"
#include "src/base/bits.h"
#include "src/base/platform/mutex.h"
#include "src/flags.h"
#include "src/hashmap.h"
#include "src/list.h"
#include "src/log.h"
#include "src/objects.h"
#include "src/utils.h"
namespace v8 {
......@@ -656,7 +657,17 @@ class MemoryChunk {
// Approximate amount of physical memory committed for this chunk.
size_t CommittedPhysicalMemory() { return high_water_mark_; }
static inline void UpdateHighWaterMark(Address mark);
static inline void UpdateHighWaterMark(Address mark) {
if (mark == NULL) return;
// Need to subtract one from the mark because when a chunk is full the
// top points to the next address after the chunk, which effectively belongs
// to another chunk. See the comment to Page::FromAllocationTop.
MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
int new_mark = static_cast<int>(mark - chunk->address());
if (new_mark > chunk->high_water_mark_) {
chunk->high_water_mark_ = new_mark;
}
}
protected:
size_t size_;
......@@ -741,8 +752,14 @@ class Page : public MemoryChunk {
}
// Returns the next page in the chain of pages owned by a space.
inline Page* next_page();
inline Page* prev_page();
inline Page* next_page() {
DCHECK(next_chunk()->owner() == owner());
return static_cast<Page*>(next_chunk());
}
inline Page* prev_page() {
DCHECK(prev_chunk()->owner() == owner());
return static_cast<Page*>(prev_chunk());
}
inline void set_next_page(Page* page);
inline void set_prev_page(Page* page);
......@@ -1246,15 +1263,8 @@ class HeapObjectIterator : public ObjectIterator {
// Advance to the next object, skipping free spaces and other fillers and
// skipping the special garbage section of which there is one per space.
// Returns NULL when the iteration has ended.
inline HeapObject* Next() {
do {
HeapObject* next_obj = FromCurrentPage();
if (next_obj != NULL) return next_obj;
} while (AdvanceToNextPage());
return NULL;
}
virtual HeapObject* next_object() { return Next(); }
inline HeapObject* Next();
virtual inline HeapObject* next_object();
private:
enum PageMode { kOnePageOnly, kAllPagesInSpace };
......@@ -1645,10 +1655,7 @@ class AllocationResult {
return object_;
}
AllocationSpace RetrySpace() {
DCHECK(IsRetry());
return static_cast<AllocationSpace>(Smi::cast(object_)->value());
}
inline AllocationSpace RetrySpace();
private:
explicit AllocationResult(AllocationSpace space)
......@@ -1684,7 +1691,7 @@ class PagedSpace : public Space {
// Checks whether an object/address is in this space.
inline bool Contains(Address a);
bool Contains(HeapObject* o) { return Contains(o->address()); }
inline bool Contains(HeapObject* o);
// Unlike Contains() methods it is safe to call this one even for addresses
// of unmapped memory.
bool ContainsSafe(Address addr);
......@@ -2273,25 +2280,10 @@ class SemiSpaceIterator : public ObjectIterator {
// Create an iterator over the allocated objects in the given to-space.
explicit SemiSpaceIterator(NewSpace* space);
HeapObject* Next() {
if (current_ == limit_) return NULL;
if (NewSpacePage::IsAtEnd(current_)) {
NewSpacePage* page = NewSpacePage::FromLimit(current_);
page = page->next_page();
DCHECK(!page->is_anchor());
current_ = page->area_start();
if (current_ == limit_) return NULL;
}
HeapObject* object = HeapObject::FromAddress(current_);
int size = object->Size();
current_ += size;
return object;
}
inline HeapObject* Next();
// Implementation of the ObjectIterator functions.
virtual HeapObject* next_object() { return Next(); }
virtual inline HeapObject* next_object();
private:
void Initialize(Address start, Address end);
......@@ -2811,45 +2803,7 @@ class PointerChunkIterator BASE_EMBEDDED {
inline explicit PointerChunkIterator(Heap* heap);
// Return NULL when the iterator is done.
MemoryChunk* next() {
switch (state_) {
case kOldSpaceState: {
if (old_iterator_.has_next()) {
return old_iterator_.next();
}
state_ = kMapState;
// Fall through.
}
case kMapState: {
if (map_iterator_.has_next()) {
return map_iterator_.next();
}
state_ = kLargeObjectState;
// Fall through.
}
case kLargeObjectState: {
HeapObject* heap_object;
do {
heap_object = lo_iterator_.Next();
if (heap_object == NULL) {
state_ = kFinishedState;
return NULL;
}
// Fixed arrays are the only pointer-containing objects in large
// object space.
} while (!heap_object->IsFixedArray());
MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
return answer;
}
case kFinishedState:
return NULL;
default:
break;
}
UNREACHABLE();
return NULL;
}
inline MemoryChunk* next();
private:
enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState };
......
......@@ -153,16 +153,6 @@ bool Object::IsExternal() const {
bool Object::IsAccessorInfo() const { return IsExecutableAccessorInfo(); }
bool Object::IsSmi() const {
return HAS_SMI_TAG(this);
}
bool Object::IsHeapObject() const {
return Internals::HasHeapObjectTag(this);
}
TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
TYPE_CHECKER(Symbol, SYMBOL_TYPE)
......
......@@ -1253,6 +1253,11 @@ class Object {
};
// In objects.h to be usable without objects-inl.h inclusion.
bool Object::IsSmi() const { return HAS_SMI_TAG(this); }
bool Object::IsHeapObject() const { return Internals::HasHeapObjectTag(this); }
struct Brief {
explicit Brief(const Object* const v) : value(v) {}
const Object* value;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment