Commit 3fb62235 authored by ricow@chromium.org's avatar ricow@chromium.org

Add functionality for finding code objects from a pc that points into

the code object's instructions.

This allows us to find a code object using just the pc. This approach
uses a cache (PcToCodeCache) to make sure we don't continuously have
to iterate heap pages.

This change eliminates the need for cooking and uncooking of stack frames.


Review URL: http://codereview.chromium.org/3226014

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5369 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 937a9d46
......@@ -37,21 +37,6 @@ namespace v8 {
namespace internal {
StackFrame::Type StackFrame::ComputeType(State* state) {
ASSERT(state->fp != NULL);
if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
return ARGUMENTS_ADAPTOR;
}
// The marker and function offsets overlap. If the marker isn't a
// smi then the frame is a JavaScript frame -- and the marker is
// really the function.
const int offset = StandardFrameConstants::kMarkerOffset;
Object* marker = Memory::Object_at(state->fp + offset);
if (!marker->IsSmi()) return JAVA_SCRIPT;
return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
}
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
// Compute frame type and stack pointer.
......@@ -66,54 +51,6 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
}
void ExitFrame::Iterate(ObjectVisitor* v) const {
v->VisitPointer(&code_slot());
// The arguments are traversed as part of the expression stack of
// the calling frame.
}
int JavaScriptFrame::GetProvidedParametersCount() const {
return ComputeParametersCount();
}
Address JavaScriptFrame::GetCallerStackPointer() const {
int arguments;
if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
// The arguments for cooked frames are traversed as if they were
// expression stack elements of the calling frame. The reason for
// this rather strange decision is that we cannot access the
// function during mark-compact GCs when the stack is cooked.
// In fact accessing heap objects (like function->shared() below)
// at all during GC is problematic.
arguments = 0;
} else {
// Compute the number of arguments by getting the number of formal
// parameters of the function. We must remember to take the
// receiver into account (+1).
JSFunction* function = JSFunction::cast(this->function());
arguments = function->shared()->formal_parameter_count() + 1;
}
const int offset = StandardFrameConstants::kCallerSPOffset;
return fp() + offset + (arguments * kPointerSize);
}
Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
const int arguments = Smi::cast(GetExpression(0))->value();
const int offset = StandardFrameConstants::kCallerSPOffset;
return fp() + offset + (arguments + 1) * kPointerSize;
}
Address InternalFrame::GetCallerStackPointer() const {
// Internal frames have no arguments. The stack pointer of the
// caller is at a fixed offset from the frame pointer.
return fp() + StandardFrameConstants::kCallerSPOffset;
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
......@@ -64,9 +64,8 @@ inline bool StackHandler::includes(Address address) const {
}
inline void StackHandler::Iterate(ObjectVisitor* v) const {
// Stack handlers do not contain any pointers that need to be
// traversed.
inline void StackHandler::Iterate(ObjectVisitor* v, Code* holder) const {
StackFrame::IteratePc(v, pc_address(), holder);
}
......@@ -81,15 +80,9 @@ inline StackHandler::State StackHandler::state() const {
}
inline Address StackHandler::pc() const {
inline Address* StackHandler::pc_address() const {
const int offset = StackHandlerConstants::kPCOffset;
return Memory::Address_at(address() + offset);
}
inline void StackHandler::set_pc(Address value) {
const int offset = StackHandlerConstants::kPCOffset;
Memory::Address_at(address() + offset) = value;
return reinterpret_cast<Address*>(address() + offset);
}
......
......@@ -36,6 +36,11 @@
namespace v8 {
namespace internal {
PcToCodeCache::PcToCodeCacheEntry
PcToCodeCache::cache_[PcToCodeCache::kPcToCodeCacheSize];
int SafeStackFrameIterator::active_count_ = 0;
// Iterator that supports traversing the stack handlers of a
// particular frame. Needs to know the top of the handler chain.
class StackHandlerIterator BASE_EMBEDDED {
......@@ -88,7 +93,6 @@ StackFrameIterator::StackFrameIterator(bool use_top, Address fp, Address sp)
if (use_top || fp != NULL) {
Reset();
}
JavaScriptFrame_.DisableHeapAccess();
}
#undef INITIALIZE_SINGLETON
......@@ -201,7 +205,7 @@ bool StackTraceFrameIterator::IsValidFrame() {
SafeStackFrameIterator::SafeStackFrameIterator(
Address fp, Address sp, Address low_bound, Address high_bound) :
low_bound_(low_bound), high_bound_(high_bound),
maintainer_(), low_bound_(low_bound), high_bound_(high_bound),
is_valid_top_(
IsWithinBounds(low_bound, high_bound,
Top::c_entry_fp(Top::GetCurrentThread())) &&
......@@ -302,69 +306,42 @@ void SafeStackTraceFrameIterator::Advance() {
#endif
// -------------------------------------------------------------------------
void StackHandler::Cook(Code* code) {
ASSERT(code->contains(pc()));
set_pc(AddressFrom<Address>(pc() - code->instruction_start()));
}
void StackHandler::Uncook(Code* code) {
set_pc(code->instruction_start() + OffsetFrom(pc()));
ASSERT(code->contains(pc()));
}
// -------------------------------------------------------------------------
bool StackFrame::HasHandler() const {
StackHandlerIterator it(this, top_handler());
return !it.done();
}
void StackFrame::CookFramesForThread(ThreadLocalTop* thread) {
ASSERT(!thread->stack_is_cooked());
for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
it.frame()->Cook();
void StackFrame::IteratePc(ObjectVisitor* v,
Address* pc_address,
Code* holder) {
Address pc = *pc_address;
ASSERT(holder->contains(pc));
unsigned pc_offset = pc - holder->instruction_start();
Object* code = holder;
v->VisitPointer(&code);
if (code != holder) {
holder = reinterpret_cast<Code*>(code);
pc = holder->instruction_start() + pc_offset;
*pc_address = pc;
}
thread->set_stack_is_cooked(true);
}
void StackFrame::UncookFramesForThread(ThreadLocalTop* thread) {
ASSERT(thread->stack_is_cooked());
for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
it.frame()->Uncook();
StackFrame::Type StackFrame::ComputeType(State* state) {
ASSERT(state->fp != NULL);
if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
return ARGUMENTS_ADAPTOR;
}
thread->set_stack_is_cooked(false);
// The marker and function offsets overlap. If the marker isn't a
// smi then the frame is a JavaScript frame -- and the marker is
// really the function.
const int offset = StandardFrameConstants::kMarkerOffset;
Object* marker = Memory::Object_at(state->fp + offset);
if (!marker->IsSmi()) return JAVA_SCRIPT;
return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
}
void StackFrame::Cook() {
Code* code = this->code();
ASSERT(code->IsCode());
for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
it.handler()->Cook(code);
}
ASSERT(code->contains(pc()));
set_pc(AddressFrom<Address>(pc() - code->instruction_start()));
}
void StackFrame::Uncook() {
Code* code = this->code();
ASSERT(code->IsCode());
for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
it.handler()->Uncook(code);
}
set_pc(code->instruction_start() + OffsetFrom(pc()));
ASSERT(code->contains(pc()));
}
StackFrame::Type StackFrame::GetCallerState(State* state) const {
ComputeCallerState(state);
......@@ -425,6 +402,14 @@ void ExitFrame::SetCallerFp(Address caller_fp) {
}
void ExitFrame::Iterate(ObjectVisitor* v) const {
// The arguments are traversed as part of the expression stack of
// the calling frame.
IteratePc(v, pc_address(), code());
v->VisitPointer(&code_slot());
}
Address ExitFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPDisplacement;
}
......@@ -499,6 +484,49 @@ Code* JavaScriptFrame::unchecked_code() const {
}
int JavaScriptFrame::GetProvidedParametersCount() const {
return ComputeParametersCount();
}
Address JavaScriptFrame::GetCallerStackPointer() const {
int arguments;
if (Heap::gc_state() != Heap::NOT_IN_GC ||
SafeStackFrameIterator::is_active()) {
// If the we are currently iterating the safe stack the
// arguments for frames are traversed as if they were
// expression stack elements of the calling frame. The reason for
// this rather strange decision is that we cannot access the
// function during mark-compact GCs when objects may have been marked.
// In fact accessing heap objects (like function->shared() below)
// at all during GC is problematic.
arguments = 0;
} else {
// Compute the number of arguments by getting the number of formal
// parameters of the function. We must remember to take the
// receiver into account (+1).
JSFunction* function = JSFunction::cast(this->function());
arguments = function->shared()->formal_parameter_count() + 1;
}
const int offset = StandardFrameConstants::kCallerSPOffset;
return fp() + offset + (arguments * kPointerSize);
}
Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
const int arguments = Smi::cast(GetExpression(0))->value();
const int offset = StandardFrameConstants::kCallerSPOffset;
return fp() + offset + (arguments + 1) * kPointerSize;
}
Address InternalFrame::GetCallerStackPointer() const {
// Internal frames have no arguments. The stack pointer of the
// caller is at a fixed offset from the frame pointer.
return fp() + StandardFrameConstants::kCallerSPOffset;
}
Code* ArgumentsAdaptorFrame::unchecked_code() const {
return Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline);
}
......@@ -694,13 +722,14 @@ void EntryFrame::Iterate(ObjectVisitor* v) const {
ASSERT(!it.done());
StackHandler* handler = it.handler();
ASSERT(handler->is_entry());
handler->Iterate(v);
// Make sure that there's the entry frame does not contain more than
// one stack handler.
handler->Iterate(v, code());
#ifdef DEBUG
// Make sure that the entry frame does not contain more than one
// stack handler.
it.Advance();
ASSERT(it.done());
#endif
IteratePc(v, pc_address(), code());
}
......@@ -717,7 +746,7 @@ void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
v->VisitPointers(base, reinterpret_cast<Object**>(address));
base = reinterpret_cast<Object**>(address + StackHandlerConstants::kSize);
// Traverse the pointers in the handler itself.
handler->Iterate(v);
handler->Iterate(v, code());
}
v->VisitPointers(base, limit);
}
......@@ -725,6 +754,7 @@ void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
IterateExpressions(v);
IteratePc(v, pc_address(), code());
// Traverse callee-saved registers, receiver, and parameters.
const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset;
......@@ -739,6 +769,7 @@ void InternalFrame::Iterate(ObjectVisitor* v) const {
// Internal frames only have object pointers on the expression stack
// as they never have any arguments.
IterateExpressions(v);
IteratePc(v, pc_address(), code());
}
......@@ -760,6 +791,56 @@ JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) {
// -------------------------------------------------------------------------
Code* PcToCodeCache::GcSafeCastToCode(HeapObject* object, Address pc) {
Code* code = reinterpret_cast<Code*>(object);
ASSERT(code != NULL && code->contains(pc));
return code;
}
Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
// Check if the pc points into a large object chunk.
LargeObjectChunk* chunk = Heap::lo_space()->FindChunkContainingPc(pc);
if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc);
// Iterate through the 8K page until we reach the end or find an
// object starting after the pc.
Page* page = Page::FromAddress(pc);
HeapObjectIterator iterator(page, Heap::GcSafeSizeOfOldObjectFunction());
HeapObject* previous = NULL;
while (true) {
HeapObject* next = iterator.next();
if (next == NULL || next->address() >= pc) {
return GcSafeCastToCode(previous, pc);
}
previous = next;
}
}
PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
Counters::pc_to_code.Increment();
ASSERT(IsPowerOf2(kPcToCodeCacheSize));
uint32_t hash = ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)));
uint32_t index = hash & (kPcToCodeCacheSize - 1);
PcToCodeCacheEntry* entry = cache(index);
if (entry->pc == pc) {
Counters::pc_to_code_cached.Increment();
ASSERT(entry->code == GcSafeFindCodeForPc(pc));
} else {
// Because this code may be interrupted by a profiling signal that
// also queries the cache, we cannot update pc before the code has
// been set. Otherwise, we risk trying to use a cache entry before
// the code has been computed.
entry->code = GcSafeFindCodeForPc(pc);
entry->pc = pc;
}
return entry;
}
// -------------------------------------------------------------------------
int NumRegs(RegList reglist) {
int n = 0;
while (reglist != 0) {
......
......@@ -46,6 +46,32 @@ class Top;
class ThreadLocalTop;
class PcToCodeCache : AllStatic {
public:
struct PcToCodeCacheEntry {
Address pc;
Code* code;
};
static PcToCodeCacheEntry* cache(int index) {
return &cache_[index];
}
static Code* GcSafeFindCodeForPc(Address pc);
static Code* GcSafeCastToCode(HeapObject* object, Address pc);
static void FlushPcToCodeCache() {
memset(&cache_[0], 0, sizeof(cache_));
}
static PcToCodeCacheEntry* GetCacheEntry(Address pc);
private:
static const int kPcToCodeCacheSize = 256;
static PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
};
class StackHandler BASE_EMBEDDED {
public:
enum State {
......@@ -64,7 +90,7 @@ class StackHandler BASE_EMBEDDED {
inline bool includes(Address address) const;
// Garbage collection support.
inline void Iterate(ObjectVisitor* v) const;
inline void Iterate(ObjectVisitor* v, Code* holder) const;
// Conversion support.
static inline StackHandler* FromAddress(Address address);
......@@ -74,16 +100,11 @@ class StackHandler BASE_EMBEDDED {
bool is_try_catch() { return state() == TRY_CATCH; }
bool is_try_finally() { return state() == TRY_FINALLY; }
// Garbage collection support.
void Cook(Code* code);
void Uncook(Code* code);
private:
// Accessors.
inline State state() const;
inline Address pc() const;
inline void set_pc(Address value);
inline Address* pc_address() const;
DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
};
......@@ -162,15 +183,16 @@ class StackFrame BASE_EMBEDDED {
virtual Code* unchecked_code() const = 0;
// Get the code associated with this frame.
inline Code* code() const {
return Code::cast(unchecked_code());
Code* code() const { return GetContainingCode(pc()); }
// Get the code object that contains the given pc.
Code* GetContainingCode(Address pc) const {
return PcToCodeCache::GetCacheEntry(pc)->code;
}
// Garbage collection support.
static void CookFramesForThread(ThreadLocalTop* thread);
static void UncookFramesForThread(ThreadLocalTop* thread);
virtual void Iterate(ObjectVisitor* v) const = 0;
static void IteratePc(ObjectVisitor* v, Address* pc_address, Code* holder);
virtual void Iterate(ObjectVisitor* v) const { }
// Printing support.
enum PrintMode { OVERVIEW, DETAILS };
......@@ -212,10 +234,6 @@ class StackFrame BASE_EMBEDDED {
// Get the type and the state of the calling frame.
virtual Type GetCallerState(State* state) const;
// Cooking/uncooking support.
void Cook();
void Uncook();
friend class StackFrameIterator;
friend class StackHandlerIterator;
friend class SafeStackFrameIterator;
......@@ -417,19 +435,11 @@ class JavaScriptFrame: public StandardFrame {
protected:
explicit JavaScriptFrame(StackFrameIterator* iterator)
: StandardFrame(iterator), disable_heap_access_(false) { }
: StandardFrame(iterator) { }
virtual Address GetCallerStackPointer() const;
// When this mode is enabled it is not allowed to access heap objects.
// This is a special mode used when gathering stack samples in profiler.
// A shortcoming is that caller's SP value will be calculated incorrectly
// (see GetCallerStackPointer implementation), but it is not used for stack
// sampling.
void DisableHeapAccess() { disable_heap_access_ = true; }
private:
bool disable_heap_access_;
inline Object* function_slot_object() const;
friend class StackFrameIterator;
......@@ -636,6 +646,8 @@ class SafeStackFrameIterator BASE_EMBEDDED {
void Advance();
void Reset();
static bool is_active() { return active_count_ > 0; }
static bool IsWithinBounds(
Address low_bound, Address high_bound, Address addr) {
return low_bound <= addr && addr <= high_bound;
......@@ -649,6 +661,19 @@ class SafeStackFrameIterator BASE_EMBEDDED {
bool IsValidFrame(StackFrame* frame) const;
bool IsValidCaller(StackFrame* frame);
// This is a nasty hack to make sure the active count is incremented
// before the constructor for the embedded iterator is invoked. This
// is needed because the constructor will start looking at frames
// right away and we need to make sure it doesn't start inspecting
// heap objects.
class ActiveCountMaintainer BASE_EMBEDDED {
public:
ActiveCountMaintainer() { active_count_++; }
~ActiveCountMaintainer() { active_count_--; }
};
ActiveCountMaintainer maintainer_;
static int active_count_;
Address low_bound_;
Address high_bound_;
const bool is_valid_top_;
......
......@@ -104,6 +104,7 @@ List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_;
GCCallback Heap::global_gc_prologue_callback_ = NULL;
GCCallback Heap::global_gc_epilogue_callback_ = NULL;
HeapObjectCallback Heap::gc_safe_size_of_old_object_ = NULL;
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap.
......@@ -193,6 +194,33 @@ bool Heap::HasBeenSetup() {
}
int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
ASSERT(!MarkCompactCollector::are_map_pointers_encoded());
MapWord map_word = object->map_word();
map_word.ClearMark();
map_word.ClearOverflow();
return object->SizeFromMap(map_word.ToMap());
}
int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
ASSERT(MarkCompactCollector::are_map_pointers_encoded());
uint32_t marker = Memory::uint32_at(object->address());
if (marker == MarkCompactCollector::kSingleFreeEncoding) {
return kIntSize;
} else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
return Memory::int_at(object->address() + kIntSize);
} else {
MapWord map_word = object->map_word();
Address map_address = map_word.DecodeMapAddress(Heap::map_space());
Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
return object->SizeFromMap(map);
}
}
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
// Is global GC requested?
if (space != NEW_SPACE || FLAG_gc_global) {
......@@ -742,8 +770,6 @@ void Heap::MarkCompact(GCTracer* tracer) {
MarkCompactCollector::CollectGarbage();
MarkCompactEpilogue(is_compacting);
LOG(ResourceEvent("markcompact", "end"));
gc_state_ = NOT_IN_GC;
......@@ -765,9 +791,6 @@ void Heap::MarkCompactPrologue(bool is_compacting) {
CompilationCache::MarkCompactPrologue();
Top::MarkCompactPrologue(is_compacting);
ThreadManager::MarkCompactPrologue(is_compacting);
CompletelyClearInstanceofCache();
if (is_compacting) FlushNumberStringCache();
......@@ -776,12 +799,6 @@ void Heap::MarkCompactPrologue(bool is_compacting) {
}
void Heap::MarkCompactEpilogue(bool is_compacting) {
Top::MarkCompactEpilogue(is_compacting);
ThreadManager::MarkCompactEpilogue(is_compacting);
}
Object* Heap::FindCodeObject(Address a) {
Object* obj = code_space_->FindObject(a);
if (obj->IsFailure()) {
......@@ -4049,6 +4066,8 @@ bool Heap::Setup(bool create_heap_objects) {
NewSpaceScavenger::Initialize();
MarkCompactCollector::Initialize();
MarkMapPointersAsEncoded(false);
// Setup memory allocator and reserve a chunk of memory for new
// space. The chunk is double the size of the requested reserved
// new space size to ensure that we can find a pair of semispaces that
......
......@@ -819,6 +819,13 @@ class Heap : public AllStatic {
roots_[kCodeStubsRootIndex] = value;
}
// Support for computing object sizes for old objects during GCs. Returns
// a function that is guaranteed to be safe for computing object sizes in
// the current GC phase.
static HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
return gc_safe_size_of_old_object_;
}
// Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
static void public_set_non_monomorphic_cache(NumberDictionary* value) {
roots_[kNonMonomorphicCacheRootIndex] = value;
......@@ -1172,6 +1179,18 @@ class Heap : public AllStatic {
static GCCallback global_gc_prologue_callback_;
static GCCallback global_gc_epilogue_callback_;
// Support for computing object sizes during GC.
static HeapObjectCallback gc_safe_size_of_old_object_;
static int GcSafeSizeOfOldObject(HeapObject* object);
static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object);
// Update the GC state. Called from the mark-compact collector.
static void MarkMapPointersAsEncoded(bool encoded) {
gc_safe_size_of_old_object_ = encoded
? &GcSafeSizeOfOldObjectWithEncodedMap
: &GcSafeSizeOfOldObject;
}
// Checks whether a global GC is necessary
static GarbageCollector SelectGarbageCollector(AllocationSpace space);
......@@ -1225,7 +1244,6 @@ class Heap : public AllStatic {
// Code to be run before and after mark-compact.
static void MarkCompactPrologue(bool is_compacting);
static void MarkCompactEpilogue(bool is_compacting);
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
......@@ -1317,6 +1335,7 @@ class Heap : public AllStatic {
friend class DisallowAllocationFailure;
friend class AlwaysAllocateScope;
friend class LinearAllocationScope;
friend class MarkCompactCollector;
};
......
......@@ -35,21 +35,6 @@ namespace v8 {
namespace internal {
StackFrame::Type StackFrame::ComputeType(State* state) {
ASSERT(state->fp != NULL);
if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
return ARGUMENTS_ADAPTOR;
}
// The marker and function offsets overlap. If the marker isn't a
// smi then the frame is a JavaScript frame -- and the marker is
// really the function.
const int offset = StandardFrameConstants::kMarkerOffset;
Object* marker = Memory::Object_at(state->fp + offset);
if (!marker->IsSmi()) return JAVA_SCRIPT;
return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
}
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
// Compute the stack pointer.
......@@ -63,54 +48,6 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
}
void ExitFrame::Iterate(ObjectVisitor* v) const {
v->VisitPointer(&code_slot());
// The arguments are traversed as part of the expression stack of
// the calling frame.
}
int JavaScriptFrame::GetProvidedParametersCount() const {
return ComputeParametersCount();
}
Address JavaScriptFrame::GetCallerStackPointer() const {
int arguments;
if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
// The arguments for cooked frames are traversed as if they were
// expression stack elements of the calling frame. The reason for
// this rather strange decision is that we cannot access the
// function during mark-compact GCs when the stack is cooked.
// In fact accessing heap objects (like function->shared() below)
// at all during GC is problematic.
arguments = 0;
} else {
// Compute the number of arguments by getting the number of formal
// parameters of the function. We must remember to take the
// receiver into account (+1).
JSFunction* function = JSFunction::cast(this->function());
arguments = function->shared()->formal_parameter_count() + 1;
}
const int offset = StandardFrameConstants::kCallerSPOffset;
return fp() + offset + (arguments * kPointerSize);
}
Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
const int arguments = Smi::cast(GetExpression(0))->value();
const int offset = StandardFrameConstants::kCallerSPOffset;
return fp() + offset + (arguments + 1) * kPointerSize;
}
Address InternalFrame::GetCallerStackPointer() const {
// Internal frames have no arguments. The stack pointer of the
// caller is at a fixed offset from the frame pointer.
return fp() + StandardFrameConstants::kCallerSPOffset;
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
......@@ -802,25 +802,6 @@ class ReferenceCollectorVisitor : public ObjectVisitor {
};
class FrameCookingThreadVisitor : public ThreadVisitor {
public:
void VisitThread(ThreadLocalTop* top) {
StackFrame::CookFramesForThread(top);
}
};
class FrameUncookingThreadVisitor : public ThreadVisitor {
public:
void VisitThread(ThreadLocalTop* top) {
StackFrame::UncookFramesForThread(top);
}
};
static void IterateAllThreads(ThreadVisitor* visitor) {
Top::IterateThread(visitor);
ThreadManager::IterateArchivedThreads(visitor);
}
// Finds all references to original and replaces them with substitution.
static void ReplaceCodeObject(Code* original, Code* substitution) {
ASSERT(!Heap::InNewSpace(substitution));
......@@ -836,13 +817,7 @@ static void ReplaceCodeObject(Code* original, Code* substitution) {
// so temporary replace the pointers with offset numbers
// in prologue/epilogue.
{
FrameCookingThreadVisitor cooking_visitor;
IterateAllThreads(&cooking_visitor);
Heap::IterateStrongRoots(&visitor, VISIT_ALL);
FrameUncookingThreadVisitor uncooking_visitor;
IterateAllThreads(&uncooking_visitor);
}
// Now iterate over all pointers of all objects, including code_target
......
......@@ -85,11 +85,15 @@ void MarkCompactCollector::CollectGarbage() {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
EncodeForwardingAddresses();
Heap::MarkMapPointersAsEncoded(true);
UpdatePointers();
Heap::MarkMapPointersAsEncoded(false);
PcToCodeCache::FlushPcToCodeCache();
RelocateObjects();
} else {
SweepSpaces();
PcToCodeCache::FlushPcToCodeCache();
}
Finish();
......@@ -1185,8 +1189,6 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// pair of distinguished invalid map encodings (for single word and multiple
// words) to indicate free regions in the page found during computation of
// forwarding addresses and skipped over in subsequent sweeps.
static const uint32_t kSingleFreeEncoding = 0;
static const uint32_t kMultiFreeEncoding = 1;
// Encode a free region, defined by the given start address and size, in the
......@@ -1194,10 +1196,10 @@ static const uint32_t kMultiFreeEncoding = 1;
void EncodeFreeRegion(Address free_start, int free_size) {
ASSERT(free_size >= kIntSize);
if (free_size == kIntSize) {
Memory::uint32_at(free_start) = kSingleFreeEncoding;
Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
} else {
ASSERT(free_size >= 2 * kIntSize);
Memory::uint32_at(free_start) = kMultiFreeEncoding;
Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
Memory::int_at(free_start + kIntSize) = free_size;
}
......
......@@ -121,11 +121,17 @@ class MarkCompactCollector: public AllStatic {
#ifdef DEBUG
// Checks whether performing mark-compact collection.
static bool in_use() { return state_ > PREPARE_GC; }
static bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
#endif
// Determine type of object and emit deletion log event.
static void ReportDeleteIfNeeded(HeapObject* obj);
// Distinguishable invalid map encodings (for single word and multiple words)
// that indicate free regions.
static const uint32_t kSingleFreeEncoding = 0;
static const uint32_t kMultiFreeEncoding = 1;
private:
#ifdef DEBUG
enum CollectorState {
......
......@@ -36,6 +36,10 @@ namespace internal {
class Memory {
public:
static uint8_t& uint8_at(Address addr) {
return *reinterpret_cast<uint8_t*>(addr);
}
static uint16_t& uint16_at(Address addr) {
return *reinterpret_cast<uint16_t*>(addr);
}
......
......@@ -68,6 +68,12 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
}
HeapObjectIterator::HeapObjectIterator(Page* page,
HeapObjectCallback size_func) {
Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
}
void HeapObjectIterator::Initialize(Address cur, Address end,
HeapObjectCallback size_f) {
cur_addr_ = cur;
......@@ -2721,6 +2727,22 @@ Object* LargeObjectSpace::FindObject(Address a) {
return Failure::Exception();
}
LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
// TODO(853): Change this implementation to only find executable
// chunks and use some kind of hash-based approach to speed it up.
for (LargeObjectChunk* chunk = first_chunk_;
chunk != NULL;
chunk = chunk->next()) {
Address chunk_address = chunk->address();
if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
return chunk;
}
}
return NULL;
}
void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
LargeObjectIterator it(this);
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
......
......@@ -756,6 +756,7 @@ class HeapObjectIterator: public ObjectIterator {
HeapObjectIterator(PagedSpace* space,
Address start,
HeapObjectCallback size_func);
HeapObjectIterator(Page* page, HeapObjectCallback size_func);
inline HeapObject* next() {
return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
......@@ -2137,6 +2138,11 @@ class LargeObjectSpace : public Space {
// space, may be slow.
Object* FindObject(Address a);
// Finds a large object page containing the given pc, returns NULL
// if such a page doesn't exist.
LargeObjectChunk* FindChunkContainingPc(Address pc);
// Iterates objects covered by dirty regions.
void IterateDirtyRegions(ObjectSlotCallback func);
......
......@@ -69,7 +69,6 @@ void ThreadLocalTop::Initialize() {
#ifdef ENABLE_LOGGING_AND_PROFILING
js_entry_sp_ = 0;
#endif
stack_is_cooked_ = false;
try_catch_handler_address_ = NULL;
context_ = NULL;
int id = ThreadManager::CurrentId();
......@@ -303,39 +302,6 @@ void Top::UnregisterTryCatchHandler(v8::TryCatch* that) {
}
void Top::MarkCompactPrologue(bool is_compacting) {
MarkCompactPrologue(is_compacting, &thread_local_);
}
void Top::MarkCompactPrologue(bool is_compacting, char* data) {
MarkCompactPrologue(is_compacting, reinterpret_cast<ThreadLocalTop*>(data));
}
void Top::MarkCompactPrologue(bool is_compacting, ThreadLocalTop* thread) {
if (is_compacting) {
StackFrame::CookFramesForThread(thread);
}
}
void Top::MarkCompactEpilogue(bool is_compacting, char* data) {
MarkCompactEpilogue(is_compacting, reinterpret_cast<ThreadLocalTop*>(data));
}
void Top::MarkCompactEpilogue(bool is_compacting) {
MarkCompactEpilogue(is_compacting, &thread_local_);
}
void Top::MarkCompactEpilogue(bool is_compacting, ThreadLocalTop* thread) {
if (is_compacting) {
StackFrame::UncookFramesForThread(thread);
}
}
static int stack_trace_nesting_level = 0;
static StringStream* incomplete_message = NULL;
......
......@@ -104,9 +104,6 @@ class ThreadLocalTop BASE_EMBEDDED {
#ifdef ENABLE_LOGGING_AND_PROFILING
Address js_entry_sp_; // the stack pointer of the bottom js entry frame
#endif
bool stack_is_cooked_;
inline bool stack_is_cooked() { return stack_is_cooked_; }
inline void set_stack_is_cooked(bool value) { stack_is_cooked_ = value; }
// Generated code scratch locations.
int32_t formal_count_;
......@@ -260,12 +257,6 @@ class Top {
// Generated code scratch locations.
static void* formal_count_address() { return &thread_local_.formal_count_; }
static void MarkCompactPrologue(bool is_compacting);
static void MarkCompactEpilogue(bool is_compacting);
static void MarkCompactPrologue(bool is_compacting,
char* archived_thread_data);
static void MarkCompactEpilogue(bool is_compacting,
char* archived_thread_data);
static void PrintCurrentStackTrace(FILE* out);
static void PrintStackTrace(FILE* out, char* thread_data);
static void PrintStack(StringStream* accumulator);
......
......@@ -107,7 +107,10 @@ namespace internal {
/* Number of contexts created from scratch. */ \
SC(contexts_created_from_scratch, V8.ContextsCreatedFromScratch) \
/* Number of contexts created by partial snapshot. */ \
SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot)
SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot) \
/* Number of code objects found from pc. */ \
SC(pc_to_code, V8.PcToCode) \
SC(pc_to_code_cached, V8.PcToCodeCached)
#define STATS_COUNTER_LIST_2(SC) \
......
......@@ -342,28 +342,6 @@ void ThreadManager::IterateArchivedThreads(ThreadVisitor* v) {
}
void ThreadManager::MarkCompactPrologue(bool is_compacting) {
for (ThreadState* state = ThreadState::FirstInUse();
state != NULL;
state = state->Next()) {
char* data = state->data();
data += HandleScopeImplementer::ArchiveSpacePerThread();
Top::MarkCompactPrologue(is_compacting, data);
}
}
void ThreadManager::MarkCompactEpilogue(bool is_compacting) {
for (ThreadState* state = ThreadState::FirstInUse();
state != NULL;
state = state->Next()) {
char* data = state->data();
data += HandleScopeImplementer::ArchiveSpacePerThread();
Top::MarkCompactEpilogue(is_compacting, data);
}
}
int ThreadManager::CurrentId() {
return Thread::GetThreadLocalInt(thread_id_key);
}
......
......@@ -105,8 +105,6 @@ class ThreadManager : public AllStatic {
static void Iterate(ObjectVisitor* v);
static void IterateArchivedThreads(ThreadVisitor* v);
static void MarkCompactPrologue(bool is_compacting);
static void MarkCompactEpilogue(bool is_compacting);
static bool IsLockedByCurrentThread() { return mutex_owner_.IsSelf(); }
static int CurrentId();
......
......@@ -35,19 +35,6 @@ namespace v8 {
namespace internal {
StackFrame::Type StackFrame::ComputeType(State* state) {
ASSERT(state->fp != NULL);
if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
return ARGUMENTS_ADAPTOR;
}
// The marker and function offsets overlap. If the marker isn't a
// smi then the frame is a JavaScript frame -- and the marker is
// really the function.
const int offset = StandardFrameConstants::kMarkerOffset;
Object* marker = Memory::Object_at(state->fp + offset);
if (!marker->IsSmi()) return JAVA_SCRIPT;
return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
}
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
......@@ -62,51 +49,6 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
return EXIT;
}
int JavaScriptFrame::GetProvidedParametersCount() const {
return ComputeParametersCount();
}
void ExitFrame::Iterate(ObjectVisitor* v) const {
v->VisitPointer(&code_slot());
// The arguments are traversed as part of the expression stack of
// the calling frame.
}
byte* InternalFrame::GetCallerStackPointer() const {
// Internal frames have no arguments. The stack pointer of the
// caller is at a fixed offset from the frame pointer.
return fp() + StandardFrameConstants::kCallerSPOffset;
}
byte* JavaScriptFrame::GetCallerStackPointer() const {
int arguments;
if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
// The arguments for cooked frames are traversed as if they were
// expression stack elements of the calling frame. The reason for
// this rather strange decision is that we cannot access the
// function during mark-compact GCs when the stack is cooked.
// In fact accessing heap objects (like function->shared() below)
// at all during GC is problematic.
arguments = 0;
} else {
// Compute the number of arguments by getting the number of formal
// parameters of the function. We must remember to take the
// receiver into account (+1).
JSFunction* function = JSFunction::cast(this->function());
arguments = function->shared()->formal_parameter_count() + 1;
}
const int offset = StandardFrameConstants::kCallerSPOffset;
return fp() + offset + (arguments * kPointerSize);
}
byte* ArgumentsAdaptorFrame::GetCallerStackPointer() const {
const int arguments = Smi::cast(GetExpression(0))->value();
const int offset = StandardFrameConstants::kCallerSPOffset;
return fp() + offset + (arguments + 1) * kPointerSize;
}
} } // namespace v8::internal
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment