vendor/v8/src/mark-compact.cc in mustang-0.0.1 vs vendor/v8/src/mark-compact.cc in mustang-0.1.0
- old
+ new
@@ -42,32 +42,31 @@
namespace internal {
// -------------------------------------------------------------------------
// MarkCompactCollector
-bool MarkCompactCollector::force_compaction_ = false;
-bool MarkCompactCollector::compacting_collection_ = false;
-bool MarkCompactCollector::compact_on_next_gc_ = false;
-
-int MarkCompactCollector::previous_marked_count_ = 0;
-GCTracer* MarkCompactCollector::tracer_ = NULL;
-
-
+MarkCompactCollector::MarkCompactCollector() : // NOLINT
#ifdef DEBUG
-MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
-
-// Counters used for debugging the marking phase of mark-compact or mark-sweep
-// collection.
-int MarkCompactCollector::live_bytes_ = 0;
-int MarkCompactCollector::live_young_objects_size_ = 0;
-int MarkCompactCollector::live_old_data_objects_size_ = 0;
-int MarkCompactCollector::live_old_pointer_objects_size_ = 0;
-int MarkCompactCollector::live_code_objects_size_ = 0;
-int MarkCompactCollector::live_map_objects_size_ = 0;
-int MarkCompactCollector::live_cell_objects_size_ = 0;
-int MarkCompactCollector::live_lo_objects_size_ = 0;
+ state_(IDLE),
#endif
+ force_compaction_(false),
+ compacting_collection_(false),
+ compact_on_next_gc_(false),
+ previous_marked_count_(0),
+ tracer_(NULL),
+#ifdef DEBUG
+ live_young_objects_size_(0),
+ live_old_pointer_objects_size_(0),
+ live_old_data_objects_size_(0),
+ live_code_objects_size_(0),
+ live_map_objects_size_(0),
+ live_cell_objects_size_(0),
+ live_lo_objects_size_(0),
+ live_bytes_(0),
+#endif
+ heap_(NULL),
+ code_flusher_(NULL) { }
void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
@@ -85,19 +84,19 @@
if (IsCompacting()) {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
EncodeForwardingAddresses();
- Heap::MarkMapPointersAsEncoded(true);
+ heap()->MarkMapPointersAsEncoded(true);
UpdatePointers();
- Heap::MarkMapPointersAsEncoded(false);
- PcToCodeCache::FlushPcToCodeCache();
+ heap()->MarkMapPointersAsEncoded(false);
+ heap()->isolate()->pc_to_code_cache()->Flush();
RelocateObjects();
} else {
SweepSpaces();
- PcToCodeCache::FlushPcToCodeCache();
+ heap()->isolate()->pc_to_code_cache()->Flush();
}
Finish();
// Save the count of marked objects remaining after the collection and
@@ -122,11 +121,11 @@
compacting_collection_ =
FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
compact_on_next_gc_ = false;
if (FLAG_never_compact) compacting_collection_ = false;
- if (!Heap::map_space()->MapPointersEncodable())
+ if (!heap()->map_space()->MapPointersEncodable())
compacting_collection_ = false;
if (FLAG_collect_maps) CreateBackPointers();
#ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit) {
// If GDBJIT interface is active disable compaction.
@@ -160,13 +159,13 @@
#endif
// The stub cache is not traversed during GC; clear the cache to
// force lazy re-initialization of it. This must be done after the
// GC, because it relies on the new address of certain old space
// objects (empty string, illegal builtin).
- StubCache::Clear();
+ heap()->isolate()->stub_cache()->Clear();
- ExternalStringTable::CleanUp();
+ heap()->external_string_table_.CleanUp();
// If we've just compacted old space there's no reason to check the
// fragmentation limit. Just return.
if (HasCompacted()) return;
@@ -219,37 +218,38 @@
// After emptying the marking stack, we clear the overflow flag and traverse
// the heap looking for objects marked as overflowed, push them on the stack,
// and continue with marking. This process repeats until all reachable
// objects have been marked.
-static MarkingStack marking_stack;
-
-class FlushCode : public AllStatic {
+class CodeFlusher {
public:
- static void AddCandidate(SharedFunctionInfo* shared_info) {
+ explicit CodeFlusher(Isolate* isolate)
+ : isolate_(isolate),
+ jsfunction_candidates_head_(NULL),
+ shared_function_info_candidates_head_(NULL) {}
+
+ void AddCandidate(SharedFunctionInfo* shared_info) {
SetNextCandidate(shared_info, shared_function_info_candidates_head_);
shared_function_info_candidates_head_ = shared_info;
}
-
- static void AddCandidate(JSFunction* function) {
+ void AddCandidate(JSFunction* function) {
ASSERT(function->unchecked_code() ==
function->unchecked_shared()->unchecked_code());
SetNextCandidate(function, jsfunction_candidates_head_);
jsfunction_candidates_head_ = function;
}
-
- static void ProcessCandidates() {
+ void ProcessCandidates() {
ProcessSharedFunctionInfoCandidates();
ProcessJSFunctionCandidates();
}
private:
- static void ProcessJSFunctionCandidates() {
- Code* lazy_compile = Builtins::builtin(Builtins::LazyCompile);
+ void ProcessJSFunctionCandidates() {
+ Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
JSFunction* candidate = jsfunction_candidates_head_;
JSFunction* next_candidate;
while (candidate != NULL) {
next_candidate = GetNextCandidate(candidate);
@@ -269,12 +269,12 @@
jsfunction_candidates_head_ = NULL;
}
- static void ProcessSharedFunctionInfoCandidates() {
- Code* lazy_compile = Builtins::builtin(Builtins::LazyCompile);
+ void ProcessSharedFunctionInfoCandidates() {
+ Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
SharedFunctionInfo* next_candidate;
while (candidate != NULL) {
next_candidate = GetNextCandidate(candidate);
@@ -289,87 +289,88 @@
}
shared_function_info_candidates_head_ = NULL;
}
-
static JSFunction** GetNextCandidateField(JSFunction* candidate) {
return reinterpret_cast<JSFunction**>(
candidate->address() + JSFunction::kCodeEntryOffset);
}
-
static JSFunction* GetNextCandidate(JSFunction* candidate) {
return *GetNextCandidateField(candidate);
}
-
static void SetNextCandidate(JSFunction* candidate,
JSFunction* next_candidate) {
*GetNextCandidateField(candidate) = next_candidate;
}
-
STATIC_ASSERT(kPointerSize <= Code::kHeaderSize - Code::kHeaderPaddingStart);
-
static SharedFunctionInfo** GetNextCandidateField(
SharedFunctionInfo* candidate) {
Code* code = candidate->unchecked_code();
return reinterpret_cast<SharedFunctionInfo**>(
code->address() + Code::kHeaderPaddingStart);
}
-
static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
return *GetNextCandidateField(candidate);
}
-
static void SetNextCandidate(SharedFunctionInfo* candidate,
SharedFunctionInfo* next_candidate) {
*GetNextCandidateField(candidate) = next_candidate;
}
- static JSFunction* jsfunction_candidates_head_;
+ Isolate* isolate_;
+ JSFunction* jsfunction_candidates_head_;
+ SharedFunctionInfo* shared_function_info_candidates_head_;
- static SharedFunctionInfo* shared_function_info_candidates_head_;
+ DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
};
-JSFunction* FlushCode::jsfunction_candidates_head_ = NULL;
-SharedFunctionInfo* FlushCode::shared_function_info_candidates_head_ = NULL;
+MarkCompactCollector::~MarkCompactCollector() {
+ if (code_flusher_ != NULL) {
+ delete code_flusher_;
+ code_flusher_ = NULL;
+ }
+}
+
static inline HeapObject* ShortCircuitConsString(Object** p) {
// Optimization: If the heap object pointed to by p is a non-symbol
- // cons string whose right substring is Heap::empty_string, update
+ // cons string whose right substring is HEAP->empty_string, update
// it in place to its left substring. Return the updated value.
//
// Here we assume that if we change *p, we replace it with a heap object
// (ie, the left substring of a cons string is always a heap object).
//
// The check performed is:
// object->IsConsString() && !object->IsSymbol() &&
- // (ConsString::cast(object)->second() == Heap::empty_string())
+ // (ConsString::cast(object)->second() == HEAP->empty_string())
// except the maps for the object and its possible substrings might be
// marked.
HeapObject* object = HeapObject::cast(*p);
MapWord map_word = object->map_word();
map_word.ClearMark();
InstanceType type = map_word.ToMap()->instance_type();
if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
- if (second != Heap::raw_unchecked_empty_string()) {
+ Heap* heap = map_word.ToMap()->heap();
+ if (second != heap->raw_unchecked_empty_string()) {
return object;
}
// Since we don't have the object's start, it is impossible to update the
// page dirty marks. Therefore, we only replace the string with its left
// substring when page dirty marks do not change.
Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
- if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
+ if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
*p = first;
return HeapObject::cast(first);
}
@@ -378,23 +379,10 @@
public:
static inline void IterateBody(Map* map, HeapObject* obj) {
table_.GetVisitor(map)(map, obj);
}
- static void EnableCodeFlushing(bool enabled) {
- if (enabled) {
- table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
- table_.Register(kVisitSharedFunctionInfo,
- &VisitSharedFunctionInfoAndFlushCode);
-
- } else {
- table_.Register(kVisitJSFunction, &VisitJSFunction);
- table_.Register(kVisitSharedFunctionInfo,
- &VisitSharedFunctionInfoGeneric);
- }
- }
-
static void Initialize() {
table_.Register(kVisitShortcutCandidate,
&FixedBodyVisitor<StaticMarkingVisitor,
ConsString::BodyDescriptor,
void>::Visit);
@@ -452,88 +440,95 @@
table_.RegisterSpecializations<StructObjectVisitor,
kVisitStruct,
kVisitStructGeneric>();
}
- INLINE(static void VisitPointer(Object** p)) {
- MarkObjectByPointer(p);
+ INLINE(static void VisitPointer(Heap* heap, Object** p)) {
+ MarkObjectByPointer(heap, p);
}
- INLINE(static void VisitPointers(Object** start, Object** end)) {
+ INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
// Mark all objects pointed to in [start, end).
const int kMinRangeForMarkingRecursion = 64;
if (end - start >= kMinRangeForMarkingRecursion) {
- if (VisitUnmarkedObjects(start, end)) return;
+ if (VisitUnmarkedObjects(heap, start, end)) return;
// We are close to a stack overflow, so just mark the objects.
}
- for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+ for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p);
}
- static inline void VisitCodeTarget(RelocInfo* rinfo) {
+ static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
IC::Clear(rinfo->pc());
// Please note targets for cleared inline cached do not have to be
- // marked since they are contained in Heap::non_monomorphic_cache().
+ // marked since they are contained in HEAP->non_monomorphic_cache().
} else {
- MarkCompactCollector::MarkObject(code);
+ heap->mark_compact_collector()->MarkObject(code);
}
}
- static void VisitGlobalPropertyCell(RelocInfo* rinfo) {
+ static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
Object* cell = rinfo->target_cell();
Object* old_cell = cell;
- VisitPointer(&cell);
+ VisitPointer(heap, &cell);
if (cell != old_cell) {
rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
}
}
- static inline void VisitDebugTarget(RelocInfo* rinfo) {
+ static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
rinfo->IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
rinfo->IsPatchedDebugBreakSlotSequence()));
HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
- MarkCompactCollector::MarkObject(code);
+ heap->mark_compact_collector()->MarkObject(code);
}
// Mark object pointed to by p.
- INLINE(static void MarkObjectByPointer(Object** p)) {
+ INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) {
if (!(*p)->IsHeapObject()) return;
HeapObject* object = ShortCircuitConsString(p);
- MarkCompactCollector::MarkObject(object);
+ if (!object->IsMarked()) {
+ heap->mark_compact_collector()->MarkUnmarkedObject(object);
+ }
}
+
// Visit an unmarked object.
- static inline void VisitUnmarkedObject(HeapObject* obj) {
+ INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
+ HeapObject* obj)) {
#ifdef DEBUG
- ASSERT(Heap::Contains(obj));
+ ASSERT(Isolate::Current()->heap()->Contains(obj));
ASSERT(!obj->IsMarked());
#endif
Map* map = obj->map();
- MarkCompactCollector::SetMark(obj);
+ collector->SetMark(obj);
// Mark the map pointer and the body.
- MarkCompactCollector::MarkObject(map);
+ if (!map->IsMarked()) collector->MarkUnmarkedObject(map);
IterateBody(map, obj);
}
// Visit all unmarked objects pointed to by [start, end).
// Returns false if the operation fails (lack of stack space).
- static inline bool VisitUnmarkedObjects(Object** start, Object** end) {
+ static inline bool VisitUnmarkedObjects(Heap* heap,
+ Object** start,
+ Object** end) {
// Return false is we are close to the stack limit.
- StackLimitCheck check;
+ StackLimitCheck check(heap->isolate());
if (check.HasOverflowed()) return false;
+ MarkCompactCollector* collector = heap->mark_compact_collector();
// Visit the unmarked objects.
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
if (obj->IsMarked()) continue;
- VisitUnmarkedObject(obj);
+ VisitUnmarkedObject(collector, obj);
}
return true;
}
static inline void VisitExternalReference(Address* p) { }
@@ -557,38 +552,38 @@
typedef FlexibleBodyVisitor<StaticMarkingVisitor,
StructBodyDescriptor,
void> StructObjectVisitor;
static void VisitCode(Map* map, HeapObject* object) {
- reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>();
+ reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(
+ map->heap());
}
// Code flushing support.
// How many collections newly compiled code object will survive before being
// flushed.
static const int kCodeAgeThreshold = 5;
- inline static bool HasSourceCode(SharedFunctionInfo* info) {
- Object* undefined = Heap::raw_unchecked_undefined_value();
+ inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
+ Object* undefined = heap->raw_unchecked_undefined_value();
return (info->script() != undefined) &&
(reinterpret_cast<Script*>(info->script())->source() != undefined);
}
inline static bool IsCompiled(JSFunction* function) {
- return
- function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
+ return function->unchecked_code() !=
+ function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
}
-
inline static bool IsCompiled(SharedFunctionInfo* function) {
- return
- function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
+ return function->unchecked_code() !=
+ function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
}
- inline static bool IsFlushable(JSFunction* function) {
+ inline static bool IsFlushable(Heap* heap, JSFunction* function) {
SharedFunctionInfo* shared_info = function->unchecked_shared();
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
if (function->unchecked_code()->IsMarked()) {
@@ -599,24 +594,24 @@
// We do not flush code for optimized functions.
if (function->code() != shared_info->unchecked_code()) {
return false;
}
- return IsFlushable(shared_info);
+ return IsFlushable(heap, shared_info);
}
- inline static bool IsFlushable(SharedFunctionInfo* shared_info) {
+ inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
if (shared_info->unchecked_code()->IsMarked()) {
shared_info->set_code_age(0);
return false;
}
// The function must be compiled and have the source code available,
// to be able to recompile it in case we need the function again.
- if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) {
+ if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
return false;
}
// We never flush code for Api functions.
Object* function_data = shared_info->function_data();
@@ -643,19 +638,19 @@
return true;
}
- static bool FlushCodeForFunction(JSFunction* function) {
- if (!IsFlushable(function)) return false;
+ static bool FlushCodeForFunction(Heap* heap, JSFunction* function) {
+ if (!IsFlushable(heap, function)) return false;
// This function's code looks flushable. But we have to postpone the
// decision until we see all functions that point to the same
// SharedFunctionInfo because some of them might be optimized.
// That would make the nonoptimized version of the code nonflushable,
// because it is required for bailing out from optimized code.
- FlushCode::AddCandidate(function);
+ heap->mark_compact_collector()->code_flusher()->AddCandidate(function);
return true;
}
static inline Map* SafeMap(Object* obj) {
@@ -674,13 +669,14 @@
static inline bool IsValidNotBuiltinContext(Object* ctx) {
if (!ctx->IsHeapObject()) return false;
Map* map = SafeMap(ctx);
- if (!(map == Heap::raw_unchecked_context_map() ||
- map == Heap::raw_unchecked_catch_context_map() ||
- map == Heap::raw_unchecked_global_context_map())) {
+ Heap* heap = map->heap();
+ if (!(map == heap->raw_unchecked_context_map() ||
+ map == heap->raw_unchecked_catch_context_map() ||
+ map == heap->raw_unchecked_global_context_map())) {
return false;
}
Context* context = reinterpret_cast<Context*>(ctx);
@@ -703,51 +699,65 @@
}
static void VisitSharedFunctionInfoAndFlushCode(Map* map,
HeapObject* object) {
+ MarkCompactCollector* collector = map->heap()->mark_compact_collector();
+ if (!collector->is_code_flushing_enabled()) {
+ VisitSharedFunctionInfoGeneric(map, object);
+ return;
+ }
VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
}
static void VisitSharedFunctionInfoAndFlushCodeGeneric(
Map* map, HeapObject* object, bool known_flush_code_candidate) {
+ Heap* heap = map->heap();
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
if (!known_flush_code_candidate) {
- known_flush_code_candidate = IsFlushable(shared);
- if (known_flush_code_candidate) FlushCode::AddCandidate(shared);
+ known_flush_code_candidate = IsFlushable(heap, shared);
+ if (known_flush_code_candidate) {
+ heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
+ }
}
- VisitSharedFunctionInfoFields(object, known_flush_code_candidate);
+ VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate);
}
- static void VisitCodeEntry(Address entry_address) {
+ static void VisitCodeEntry(Heap* heap, Address entry_address) {
Object* code = Code::GetObjectFromEntryAddress(entry_address);
Object* old_code = code;
- VisitPointer(&code);
+ VisitPointer(heap, &code);
if (code != old_code) {
Memory::Address_at(entry_address) =
reinterpret_cast<Code*>(code)->entry();
}
}
static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
+ Heap* heap = map->heap();
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ if (!collector->is_code_flushing_enabled()) {
+ VisitJSFunction(map, object);
+ return;
+ }
+
JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
// The function must have a valid context and not be a builtin.
bool flush_code_candidate = false;
if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
- flush_code_candidate = FlushCodeForFunction(jsfunction);
+ flush_code_candidate = FlushCodeForFunction(heap, jsfunction);
}
if (!flush_code_candidate) {
- MarkCompactCollector::MarkObject(
- jsfunction->unchecked_shared()->unchecked_code());
+ collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code());
if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
// For optimized functions we should retain both non-optimized version
// of it's code and non-optimized version of all inlined functions.
// This is required to support bailing out from inlined code.
@@ -759,12 +769,11 @@
for (int i = 0, count = data->InlinedFunctionCount()->value();
i < count;
i++) {
JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
- MarkCompactCollector::MarkObject(
- inlined->unchecked_shared()->unchecked_code());
+ collector->MarkObject(inlined->unchecked_shared()->unchecked_code());
}
}
}
VisitJSFunctionFields(map,
@@ -785,48 +794,55 @@
static inline void VisitJSFunctionFields(Map* map,
JSFunction* object,
bool flush_code_candidate) {
- VisitPointers(SLOT_ADDR(object, JSFunction::kPropertiesOffset),
+ Heap* heap = map->heap();
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+
+ VisitPointers(heap,
+ SLOT_ADDR(object, JSFunction::kPropertiesOffset),
SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
if (!flush_code_candidate) {
- VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset);
+ VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
} else {
// Don't visit code object.
// Visit shared function info to avoid double checking of it's
// flushability.
SharedFunctionInfo* shared_info = object->unchecked_shared();
if (!shared_info->IsMarked()) {
Map* shared_info_map = shared_info->map();
- MarkCompactCollector::SetMark(shared_info);
- MarkCompactCollector::MarkObject(shared_info_map);
+ collector->SetMark(shared_info);
+ collector->MarkObject(shared_info_map);
VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
shared_info,
true);
}
}
- VisitPointers(SLOT_ADDR(object,
+ VisitPointers(heap,
+ SLOT_ADDR(object,
JSFunction::kCodeEntryOffset + kPointerSize),
SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
// Don't visit the next function list field as it is a weak reference.
}
- static void VisitSharedFunctionInfoFields(HeapObject* object,
+ static void VisitSharedFunctionInfoFields(Heap* heap,
+ HeapObject* object,
bool flush_code_candidate) {
- VisitPointer(SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
+ VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
if (!flush_code_candidate) {
- VisitPointer(SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
+ VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
}
- VisitPointers(SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
+ VisitPointers(heap,
+ SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
SLOT_ADDR(object, SharedFunctionInfo::kSize));
}
#undef SLOT_ADDR
@@ -840,98 +856,123 @@
StaticMarkingVisitor::table_;
class MarkingVisitor : public ObjectVisitor {
public:
+ explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
+
void VisitPointer(Object** p) {
- StaticMarkingVisitor::VisitPointer(p);
+ StaticMarkingVisitor::VisitPointer(heap_, p);
}
void VisitPointers(Object** start, Object** end) {
- StaticMarkingVisitor::VisitPointers(start, end);
+ StaticMarkingVisitor::VisitPointers(heap_, start, end);
}
- void VisitCodeTarget(RelocInfo* rinfo) {
- StaticMarkingVisitor::VisitCodeTarget(rinfo);
+ void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
+ StaticMarkingVisitor::VisitCodeTarget(heap, rinfo);
}
- void VisitGlobalPropertyCell(RelocInfo* rinfo) {
- StaticMarkingVisitor::VisitGlobalPropertyCell(rinfo);
+ void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
+ StaticMarkingVisitor::VisitGlobalPropertyCell(heap, rinfo);
}
- void VisitDebugTarget(RelocInfo* rinfo) {
- StaticMarkingVisitor::VisitDebugTarget(rinfo);
+ void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
+ StaticMarkingVisitor::VisitDebugTarget(heap, rinfo);
}
+
+ private:
+ Heap* heap_;
};
class CodeMarkingVisitor : public ThreadVisitor {
public:
+ explicit CodeMarkingVisitor(MarkCompactCollector* collector)
+ : collector_(collector) {}
+
void VisitThread(ThreadLocalTop* top) {
for (StackFrameIterator it(top); !it.done(); it.Advance()) {
- MarkCompactCollector::MarkObject(it.frame()->unchecked_code());
+ collector_->MarkObject(it.frame()->unchecked_code());
}
}
+
+ private:
+ MarkCompactCollector* collector_;
};
class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
public:
+ explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
+ : collector_(collector) {}
+
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) VisitPointer(p);
}
void VisitPointer(Object** slot) {
Object* obj = *slot;
if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
- MarkCompactCollector::MarkObject(shared->unchecked_code());
- MarkCompactCollector::MarkObject(shared);
+ collector_->MarkObject(shared->unchecked_code());
+ collector_->MarkObject(shared);
}
}
+
+ private:
+ MarkCompactCollector* collector_;
};
void MarkCompactCollector::PrepareForCodeFlushing() {
+ ASSERT(heap() == Isolate::Current()->heap());
+
if (!FLAG_flush_code) {
- StaticMarkingVisitor::EnableCodeFlushing(false);
+ EnableCodeFlushing(false);
return;
}
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (Debug::IsLoaded() || Debug::has_break_points()) {
- StaticMarkingVisitor::EnableCodeFlushing(false);
+ if (heap()->isolate()->debug()->IsLoaded() ||
+ heap()->isolate()->debug()->has_break_points()) {
+ EnableCodeFlushing(false);
return;
}
#endif
- StaticMarkingVisitor::EnableCodeFlushing(true);
+ EnableCodeFlushing(true);
// Ensure that empty descriptor array is marked. Method MarkDescriptorArray
// relies on it being marked before any other descriptor array.
- MarkObject(Heap::raw_unchecked_empty_descriptor_array());
+ MarkObject(heap()->raw_unchecked_empty_descriptor_array());
// Make sure we are not referencing the code from the stack.
+ ASSERT(this == heap()->mark_compact_collector());
for (StackFrameIterator it; !it.done(); it.Advance()) {
MarkObject(it.frame()->unchecked_code());
}
// Iterate the archived stacks in all threads to check if
// the code is referenced.
- CodeMarkingVisitor code_marking_visitor;
- ThreadManager::IterateArchivedThreads(&code_marking_visitor);
+ CodeMarkingVisitor code_marking_visitor(this);
+ heap()->isolate()->thread_manager()->IterateArchivedThreads(
+ &code_marking_visitor);
- SharedFunctionInfoMarkingVisitor visitor;
- CompilationCache::IterateFunctions(&visitor);
- HandleScopeImplementer::Iterate(&visitor);
+ SharedFunctionInfoMarkingVisitor visitor(this);
+ heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
+ heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
ProcessMarkingStack();
}
// Visitor class for marking heap roots.
class RootMarkingVisitor : public ObjectVisitor {
public:
+ explicit RootMarkingVisitor(Heap* heap)
+ : collector_(heap->mark_compact_collector()) { }
+
void VisitPointer(Object** p) {
MarkObjectByPointer(p);
}
void VisitPointers(Object** start, Object** end) {
@@ -946,27 +987,30 @@
HeapObject* object = ShortCircuitConsString(p);
if (object->IsMarked()) return;
Map* map = object->map();
// Mark the object.
- MarkCompactCollector::SetMark(object);
+ collector_->SetMark(object);
// Mark the map pointer and body, and push them on the marking stack.
- MarkCompactCollector::MarkObject(map);
+ collector_->MarkObject(map);
StaticMarkingVisitor::IterateBody(map, object);
// Mark all the objects reachable from the map and body. May leave
// overflowed objects in the heap.
- MarkCompactCollector::EmptyMarkingStack();
+ collector_->EmptyMarkingStack();
}
+
+ MarkCompactCollector* collector_;
};
// Helper class for pruning the symbol table.
class SymbolTableCleaner : public ObjectVisitor {
public:
- SymbolTableCleaner() : pointers_removed_(0) { }
+ explicit SymbolTableCleaner(Heap* heap)
+ : heap_(heap), pointers_removed_(0) { }
virtual void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
@@ -974,23 +1018,24 @@
// delete the associated external data as this symbol is going away.
// Since no objects have yet been moved we can safely access the map of
// the object.
if ((*p)->IsExternalString()) {
- Heap::FinalizeExternalString(String::cast(*p));
+ heap_->FinalizeExternalString(String::cast(*p));
}
// Set the entry to null_value (as deleted).
- *p = Heap::raw_unchecked_null_value();
+ *p = heap_->raw_unchecked_null_value();
pointers_removed_++;
}
}
}
int PointersRemoved() {
return pointers_removed_;
}
private:
+ Heap* heap_;
int pointers_removed_;
};
// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
@@ -1008,27 +1053,27 @@
};
void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
ASSERT(!object->IsMarked());
- ASSERT(Heap::Contains(object));
+ ASSERT(HEAP->Contains(object));
if (object->IsMap()) {
Map* map = Map::cast(object);
if (FLAG_cleanup_caches_in_maps_at_gc) {
- map->ClearCodeCache();
+ map->ClearCodeCache(heap());
}
SetMark(map);
if (FLAG_collect_maps &&
map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
map->instance_type() <= JS_FUNCTION_TYPE) {
MarkMapContents(map);
} else {
- marking_stack.Push(map);
+ marking_stack_.Push(map);
}
} else {
SetMark(object);
- marking_stack.Push(object);
+ marking_stack_.Push(object);
}
}
void MarkCompactCollector::MarkMapContents(Map* map) {
@@ -1041,62 +1086,62 @@
Object** start_slot = HeapObject::RawField(map,
Map::kPointerFieldsBeginOffset);
Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
- StaticMarkingVisitor::VisitPointers(start_slot, end_slot);
+ StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot);
}
void MarkCompactCollector::MarkDescriptorArray(
DescriptorArray* descriptors) {
if (descriptors->IsMarked()) return;
// Empty descriptor array is marked as a root before any maps are marked.
- ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array());
+ ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array());
SetMark(descriptors);
FixedArray* contents = reinterpret_cast<FixedArray*>(
descriptors->get(DescriptorArray::kContentArrayIndex));
ASSERT(contents->IsHeapObject());
ASSERT(!contents->IsMarked());
ASSERT(contents->IsFixedArray());
ASSERT(contents->length() >= 2);
SetMark(contents);
- // Contents contains (value, details) pairs. If the details say that
- // the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, or
- // NULL_DESCRIPTOR, we don't mark the value as live. Only for
- // MAP_TRANSITION and CONSTANT_TRANSITION is the value an Object* (a
- // Map*).
+ // Contents contains (value, details) pairs. If the details say that the type
+ // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
+ // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
+ // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
+ // CONSTANT_TRANSITION is the value an Object* (a Map*).
for (int i = 0; i < contents->length(); i += 2) {
// If the pair (value, details) at index i, i+1 is not
// a transition or null descriptor, mark the value.
PropertyDetails details(Smi::cast(contents->get(i + 1)));
if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
if (object->IsHeapObject() && !object->IsMarked()) {
SetMark(object);
- marking_stack.Push(object);
+ marking_stack_.Push(object);
}
}
}
// The DescriptorArray descriptors contains a pointer to its contents array,
// but the contents array is already marked.
- marking_stack.Push(descriptors);
+ marking_stack_.Push(descriptors);
}
void MarkCompactCollector::CreateBackPointers() {
- HeapObjectIterator iterator(Heap::map_space());
+ HeapObjectIterator iterator(heap()->map_space());
for (HeapObject* next_object = iterator.next();
next_object != NULL; next_object = iterator.next()) {
if (next_object->IsMap()) { // Could also be ByteArray on free list.
Map* map = Map::cast(next_object);
if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
map->instance_type() <= JS_FUNCTION_TYPE) {
map->CreateBackPointers();
} else {
- ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
+ ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
}
}
}
}
@@ -1109,65 +1154,70 @@
map_word.ClearOverflow();
return obj->SizeFromMap(map_word.ToMap());
}
-// Fill the marking stack with overflowed objects returned by the given
-// iterator. Stop when the marking stack is filled or the end of the space
-// is reached, whichever comes first.
-template<class T>
-static void ScanOverflowedObjects(T* it) {
- // The caller should ensure that the marking stack is initially not full,
- // so that we don't waste effort pointlessly scanning for objects.
- ASSERT(!marking_stack.is_full());
+class OverflowedObjectsScanner : public AllStatic {
+ public:
+ // Fill the marking stack with overflowed objects returned by the given
+ // iterator. Stop when the marking stack is filled or the end of the space
+ // is reached, whichever comes first.
+ template<class T>
+ static inline void ScanOverflowedObjects(MarkCompactCollector* collector,
+ T* it) {
+ // The caller should ensure that the marking stack is initially not full,
+ // so that we don't waste effort pointlessly scanning for objects.
+ ASSERT(!collector->marking_stack_.is_full());
- for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
- if (object->IsOverflowed()) {
- object->ClearOverflow();
- ASSERT(object->IsMarked());
- ASSERT(Heap::Contains(object));
- marking_stack.Push(object);
- if (marking_stack.is_full()) return;
+ for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
+ if (object->IsOverflowed()) {
+ object->ClearOverflow();
+ ASSERT(object->IsMarked());
+ ASSERT(HEAP->Contains(object));
+ collector->marking_stack_.Push(object);
+ if (collector->marking_stack_.is_full()) return;
+ }
}
}
-}
+};
bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
}
void MarkCompactCollector::MarkSymbolTable() {
- SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
+ SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
// Mark the symbol table itself.
SetMark(symbol_table);
// Explicitly mark the prefix.
- MarkingVisitor marker;
+ MarkingVisitor marker(heap());
symbol_table->IteratePrefix(&marker);
ProcessMarkingStack();
}
void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// Mark the heap roots including global variables, stack variables,
// etc., and all objects reachable from them.
- Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
+ heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
// Handle the symbol table specially.
MarkSymbolTable();
// There may be overflowed objects in the heap. Visit them now.
- while (marking_stack.overflowed()) {
+ while (marking_stack_.overflowed()) {
RefillMarkingStack();
EmptyMarkingStack();
}
}
void MarkCompactCollector::MarkObjectGroups() {
- List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups();
+ List<ObjectGroup*>* object_groups =
+ heap()->isolate()->global_handles()->object_groups();
for (int i = 0; i < object_groups->length(); i++) {
ObjectGroup* entry = object_groups->at(i);
if (entry == NULL) continue;
@@ -1188,27 +1238,55 @@
for (int j = 0; j < objects.length(); ++j) {
if ((*objects[j])->IsHeapObject()) {
MarkObject(HeapObject::cast(*objects[j]));
}
}
+
// Once the entire group has been colored gray, set the object group
// to NULL so it won't be processed again.
- delete object_groups->at(i);
+ delete entry;
object_groups->at(i) = NULL;
}
}
+void MarkCompactCollector::MarkImplicitRefGroups() {
+ List<ImplicitRefGroup*>* ref_groups =
+ heap()->isolate()->global_handles()->implicit_ref_groups();
+
+ for (int i = 0; i < ref_groups->length(); i++) {
+ ImplicitRefGroup* entry = ref_groups->at(i);
+ if (entry == NULL) continue;
+
+ if (!entry->parent_->IsMarked()) continue;
+
+ List<Object**>& children = entry->children_;
+ // A parent object is marked, so mark as gray all child white heap
+ // objects.
+ for (int j = 0; j < children.length(); ++j) {
+ if ((*children[j])->IsHeapObject()) {
+ MarkObject(HeapObject::cast(*children[j]));
+ }
+ }
+
+ // Once the entire group has been colored gray, set the group
+ // to NULL so it won't be processed again.
+ delete entry;
+ ref_groups->at(i) = NULL;
+ }
+}
+
+
// Mark all objects reachable from the objects on the marking stack.
// Before: the marking stack contains zero or more heap object pointers.
// After: the marking stack is empty, and all objects reachable from the
// marking stack have been marked, or are overflowed in the heap.
void MarkCompactCollector::EmptyMarkingStack() {
- while (!marking_stack.is_empty()) {
- HeapObject* object = marking_stack.Pop();
+ while (!marking_stack_.is_empty()) {
+ HeapObject* object = marking_stack_.Pop();
ASSERT(object->IsHeapObject());
- ASSERT(Heap::Contains(object));
+ ASSERT(heap()->Contains(object));
ASSERT(object->IsMarked());
ASSERT(!object->IsOverflowed());
// Because the object is marked, we have to recover the original map
// pointer and use it to mark the object's body.
@@ -1226,156 +1304,163 @@
// push them on the marking stack. Stop early if the marking stack fills
// before sweeping completes. If sweeping completes, there are no remaining
// overflowed objects in the heap so the overflow flag on the markings stack
// is cleared.
void MarkCompactCollector::RefillMarkingStack() {
- ASSERT(marking_stack.overflowed());
+ ASSERT(marking_stack_.overflowed());
- SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&new_it);
- if (marking_stack.is_full()) return;
+ SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it);
+ if (marking_stack_.is_full()) return;
- HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
+ HeapObjectIterator old_pointer_it(heap()->old_pointer_space(),
&OverflowObjectSize);
- ScanOverflowedObjects(&old_pointer_it);
- if (marking_stack.is_full()) return;
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it);
+ if (marking_stack_.is_full()) return;
- HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&old_data_it);
- if (marking_stack.is_full()) return;
+ HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it);
+ if (marking_stack_.is_full()) return;
- HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&code_it);
- if (marking_stack.is_full()) return;
+ HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it);
+ if (marking_stack_.is_full()) return;
- HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&map_it);
- if (marking_stack.is_full()) return;
+ HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it);
+ if (marking_stack_.is_full()) return;
- HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&cell_it);
- if (marking_stack.is_full()) return;
+ HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it);
+ if (marking_stack_.is_full()) return;
- LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&lo_it);
- if (marking_stack.is_full()) return;
+ LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it);
+ if (marking_stack_.is_full()) return;
- marking_stack.clear_overflowed();
+ marking_stack_.clear_overflowed();
}
// Mark all objects reachable (transitively) from objects on the marking
// stack. Before: the marking stack contains zero or more heap object
// pointers. After: the marking stack is empty and there are no overflowed
// objects in the heap.
void MarkCompactCollector::ProcessMarkingStack() {
EmptyMarkingStack();
- while (marking_stack.overflowed()) {
+ while (marking_stack_.overflowed()) {
RefillMarkingStack();
EmptyMarkingStack();
}
}
-void MarkCompactCollector::ProcessObjectGroups() {
+void MarkCompactCollector::ProcessExternalMarking() {
bool work_to_do = true;
- ASSERT(marking_stack.is_empty());
+ ASSERT(marking_stack_.is_empty());
while (work_to_do) {
MarkObjectGroups();
- work_to_do = !marking_stack.is_empty();
+ MarkImplicitRefGroups();
+ work_to_do = !marking_stack_.is_empty();
ProcessMarkingStack();
}
}
void MarkCompactCollector::MarkLiveObjects() {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
// The recursive GC marker detects when it is nearing stack overflow,
// and switches to a different marking system. JS interrupts interfere
// with the C stack limit check.
- PostponeInterruptsScope postpone;
+ PostponeInterruptsScope postpone(heap()->isolate());
#ifdef DEBUG
ASSERT(state_ == PREPARE_GC);
state_ = MARK_LIVE_OBJECTS;
#endif
// The to space contains live objects, the from space is used as a marking
// stack.
- marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
- Heap::new_space()->FromSpaceHigh());
+ marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(),
+ heap()->new_space()->FromSpaceHigh());
- ASSERT(!marking_stack.overflowed());
+ ASSERT(!marking_stack_.overflowed());
PrepareForCodeFlushing();
- RootMarkingVisitor root_visitor;
+ RootMarkingVisitor root_visitor(heap());
MarkRoots(&root_visitor);
// The objects reachable from the roots are marked, yet unreachable
- // objects are unmarked. Mark objects reachable from object groups
- // containing at least one marked object, and continue until no new
- // objects are reachable from the object groups.
- ProcessObjectGroups();
+ // objects are unmarked. Mark objects reachable due to host
+ // application specific logic.
+ ProcessExternalMarking();
// The objects reachable from the roots or object groups are marked,
// yet unreachable objects are unmarked. Mark objects reachable
// only from weak global handles.
//
// First we identify nonlive weak handles and mark them as pending
// destruction.
- GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject);
+ heap()->isolate()->global_handles()->IdentifyWeakHandles(
+ &IsUnmarkedHeapObject);
// Then we mark the objects and process the transitive closure.
- GlobalHandles::IterateWeakRoots(&root_visitor);
- while (marking_stack.overflowed()) {
+ heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
+ while (marking_stack_.overflowed()) {
RefillMarkingStack();
EmptyMarkingStack();
}
- // Repeat the object groups to mark unmarked groups reachable from the
- // weak roots.
- ProcessObjectGroups();
+ // Repeat host application specific marking to mark unmarked objects
+ // reachable from the weak roots.
+ ProcessExternalMarking();
// Prune the symbol table removing all symbols only pointed to by the
// symbol table. Cannot use symbol_table() here because the symbol
// table is marked.
- SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
- SymbolTableCleaner v;
+ SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
+ SymbolTableCleaner v(heap());
symbol_table->IterateElements(&v);
symbol_table->ElementsRemoved(v.PointersRemoved());
- ExternalStringTable::Iterate(&v);
- ExternalStringTable::CleanUp();
+ heap()->external_string_table_.Iterate(&v);
+ heap()->external_string_table_.CleanUp();
// Process the weak references.
MarkCompactWeakObjectRetainer mark_compact_object_retainer;
- Heap::ProcessWeakReferences(&mark_compact_object_retainer);
+ heap()->ProcessWeakReferences(&mark_compact_object_retainer);
// Remove object groups after marking phase.
- GlobalHandles::RemoveObjectGroups();
+ heap()->isolate()->global_handles()->RemoveObjectGroups();
+ heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
// Flush code from collected candidates.
- FlushCode::ProcessCandidates();
+ if (is_code_flushing_enabled()) {
+ code_flusher_->ProcessCandidates();
+ }
+
+ // Clean up dead objects from the runtime profiler.
+ heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
}
#ifdef DEBUG
void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
live_bytes_ += obj->Size();
- if (Heap::new_space()->Contains(obj)) {
+ if (heap()->new_space()->Contains(obj)) {
live_young_objects_size_ += obj->Size();
- } else if (Heap::map_space()->Contains(obj)) {
+ } else if (heap()->map_space()->Contains(obj)) {
ASSERT(obj->IsMap());
live_map_objects_size_ += obj->Size();
- } else if (Heap::cell_space()->Contains(obj)) {
+ } else if (heap()->cell_space()->Contains(obj)) {
ASSERT(obj->IsJSGlobalPropertyCell());
live_cell_objects_size_ += obj->Size();
- } else if (Heap::old_pointer_space()->Contains(obj)) {
+ } else if (heap()->old_pointer_space()->Contains(obj)) {
live_old_pointer_objects_size_ += obj->Size();
- } else if (Heap::old_data_space()->Contains(obj)) {
+ } else if (heap()->old_data_space()->Contains(obj)) {
live_old_data_objects_size_ += obj->Size();
- } else if (Heap::code_space()->Contains(obj)) {
+ } else if (heap()->code_space()->Contains(obj)) {
live_code_objects_size_ += obj->Size();
- } else if (Heap::lo_space()->Contains(obj)) {
+ } else if (heap()->lo_space()->Contains(obj)) {
live_lo_objects_size_ += obj->Size();
} else {
UNREACHABLE();
}
}
@@ -1387,11 +1472,11 @@
ASSERT(state_ == MARK_LIVE_OBJECTS);
state_ =
compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
#endif
// Deallocate unmarked objects and clear marked bits for marked objects.
- Heap::lo_space()->FreeUnmarkedObjects();
+ heap()->lo_space()->FreeUnmarkedObjects();
}
// Safe to use during marking phase only.
bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
@@ -1400,11 +1485,11 @@
return metamap.ToMap()->instance_type() == MAP_TYPE;
}
void MarkCompactCollector::ClearNonLiveTransitions() {
- HeapObjectIterator map_iterator(Heap::map_space(), &SizeOfMarkedObject);
+ HeapObjectIterator map_iterator(heap() ->map_space(), &SizeOfMarkedObject);
// Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. At the same time,
// set all the prototype fields of maps back to their original value,
// dropping the back pointers temporarily stored in the prototype field.
// Setting the prototype field requires following the linked list of
@@ -1450,11 +1535,11 @@
// A live map above a dead map indicates a dead transition.
// This test will always be false on the first iteration.
if (on_dead_path && current->IsMarked()) {
on_dead_path = false;
- current->ClearNonLiveTransitions(real_prototype);
+ current->ClearNonLiveTransitions(heap(), real_prototype);
}
*HeapObject::RawField(current, Map::kPrototypeOffset) =
real_prototype;
current = reinterpret_cast<Map*>(next);
}
@@ -1512,81 +1597,88 @@
// Try to promote all objects in new space. Heap numbers and sequential
// strings are promoted to the code space, large objects to large object space,
// and all others to the old space.
-inline MaybeObject* MCAllocateFromNewSpace(HeapObject* object,
+inline MaybeObject* MCAllocateFromNewSpace(Heap* heap,
+ HeapObject* object,
int object_size) {
MaybeObject* forwarded;
- if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
+ if (object_size > heap->MaxObjectSizeInPagedSpace()) {
forwarded = Failure::Exception();
} else {
- OldSpace* target_space = Heap::TargetSpace(object);
- ASSERT(target_space == Heap::old_pointer_space() ||
- target_space == Heap::old_data_space());
+ OldSpace* target_space = heap->TargetSpace(object);
+ ASSERT(target_space == heap->old_pointer_space() ||
+ target_space == heap->old_data_space());
forwarded = target_space->MCAllocateRaw(object_size);
}
Object* result;
if (!forwarded->ToObject(&result)) {
- result = Heap::new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
+ result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
}
return result;
}
// Allocation functions for the paged spaces call the space's MCAllocateRaw.
MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace(
+ Heap *heap,
HeapObject* ignore,
int object_size) {
- return Heap::old_pointer_space()->MCAllocateRaw(object_size);
+ return heap->old_pointer_space()->MCAllocateRaw(object_size);
}
MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace(
+ Heap* heap,
HeapObject* ignore,
int object_size) {
- return Heap::old_data_space()->MCAllocateRaw(object_size);
+ return heap->old_data_space()->MCAllocateRaw(object_size);
}
MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace(
+ Heap* heap,
HeapObject* ignore,
int object_size) {
- return Heap::code_space()->MCAllocateRaw(object_size);
+ return heap->code_space()->MCAllocateRaw(object_size);
}
MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
+ Heap* heap,
HeapObject* ignore,
int object_size) {
- return Heap::map_space()->MCAllocateRaw(object_size);
+ return heap->map_space()->MCAllocateRaw(object_size);
}
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(HeapObject* ignore,
- int object_size) {
- return Heap::cell_space()->MCAllocateRaw(object_size);
+MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
+ Heap* heap, HeapObject* ignore, int object_size) {
+ return heap->cell_space()->MCAllocateRaw(object_size);
}
// The forwarding address is encoded at the same offset as the current
// to-space object, but in from space.
-inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
+inline void EncodeForwardingAddressInNewSpace(Heap* heap,
+ HeapObject* old_object,
int object_size,
Object* new_object,
int* ignored) {
int offset =
- Heap::new_space()->ToSpaceOffsetForAddress(old_object->address());
- Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset) =
+ heap->new_space()->ToSpaceOffsetForAddress(old_object->address());
+ Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) =
HeapObject::cast(new_object)->address();
}
// The forwarding address is encoded in the map pointer of the object as an
// offset (in terms of live bytes) from the address of the first live object
// in the page.
-inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
+inline void EncodeForwardingAddressInPagedSpace(Heap* heap,
+ HeapObject* old_object,
int object_size,
Object* new_object,
int* offset) {
// Record the forwarding address of the first live object if necessary.
if (*offset == 0) {
@@ -1601,11 +1693,11 @@
ASSERT(*offset <= Page::kObjectAreaSize);
}
// Most non-live objects are ignored.
-inline void IgnoreNonLiveObject(HeapObject* object) {}
+inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {}
// Function template that, given a range of addresses (eg, a semispace or a
// paged space page), iterates through the objects in the range to clear
// mark bits and compute and encode forwarding addresses. As a side effect,
@@ -1615,11 +1707,12 @@
// The template parameters are an allocation function, a forwarding address
// encoding function, and a function to process non-live objects.
template<MarkCompactCollector::AllocationFunction Alloc,
MarkCompactCollector::EncodingFunction Encode,
MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
-inline void EncodeForwardingAddressesInRange(Address start,
+inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector,
+ Address start,
Address end,
int* offset) {
// The start address of the current free region while sweeping the space.
// This address is set when a transition from live to non-live objects is
// encountered. A value (an encoding of the 'next free region' pointer)
@@ -1635,16 +1728,16 @@
int object_size; // Will be set on each iteration of the loop.
for (Address current = start; current < end; current += object_size) {
HeapObject* object = HeapObject::FromAddress(current);
if (object->IsMarked()) {
object->ClearMark();
- MarkCompactCollector::tracer()->decrement_marked_count();
+ collector->tracer()->decrement_marked_count();
object_size = object->Size();
- // Allocation cannot fail, because we are compacting the space.
- Object* forwarded = Alloc(object, object_size)->ToObjectUnchecked();
- Encode(object, object_size, forwarded, offset);
+ Object* forwarded =
+ Alloc(collector->heap(), object, object_size)->ToObjectUnchecked();
+ Encode(collector->heap(), object, object_size, forwarded, offset);
#ifdef DEBUG
if (FLAG_gc_verbose) {
PrintF("forward %p -> %p.\n", object->address(),
HeapObject::cast(forwarded)->address());
@@ -1654,11 +1747,11 @@
EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
is_prev_alive = true;
}
} else { // Non-live object.
object_size = object->Size();
- ProcessNonLive(object);
+ ProcessNonLive(object, collector->heap()->isolate());
if (is_prev_alive) { // Transition from live to non-live.
free_start = current;
is_prev_alive = false;
}
LiveObjectList::ProcessNonLive(object);
@@ -1676,12 +1769,13 @@
void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
int ignored;
EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
EncodeForwardingAddressInNewSpace,
IgnoreNonLiveObject>(
- Heap::new_space()->bottom(),
- Heap::new_space()->top(),
+ this,
+ heap()->new_space()->bottom(),
+ heap()->new_space()->top(),
&ignored);
}
template<MarkCompactCollector::AllocationFunction Alloc,
@@ -1696,10 +1790,11 @@
// in the page.
int offset = 0;
EncodeForwardingAddressesInRange<Alloc,
EncodeForwardingAddressInPagedSpace,
ProcessNonLive>(
+ this,
p->ObjectAreaStart(),
p->AllocationTop(),
&offset);
}
}
@@ -1713,52 +1808,55 @@
// NULL as a forwarding address.
// The second pass updates pointers to new space in all spaces. It is possible
// to encounter pointers to dead objects during traversal of dirty regions we
// should clear them to avoid encountering them during next dirty regions
// iteration.
-static void MigrateObject(Address dst,
+static void MigrateObject(Heap* heap,
+ Address dst,
Address src,
int size,
bool to_old_space) {
if (to_old_space) {
- Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
+ heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
} else {
- Heap::CopyBlock(dst, src, size);
+ heap->CopyBlock(dst, src, size);
}
Memory::Address_at(src) = dst;
}
class StaticPointersToNewGenUpdatingVisitor : public
StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
public:
- static inline void VisitPointer(Object** p) {
+ static inline void VisitPointer(Heap* heap, Object** p) {
if (!(*p)->IsHeapObject()) return;
HeapObject* obj = HeapObject::cast(*p);
Address old_addr = obj->address();
- if (Heap::new_space()->Contains(obj)) {
- ASSERT(Heap::InFromSpace(*p));
+ if (heap->new_space()->Contains(obj)) {
+ ASSERT(heap->InFromSpace(*p));
*p = HeapObject::FromAddress(Memory::Address_at(old_addr));
}
}
};
// Visitor for updating pointers from live objects in old spaces to new space.
// It does not expect to encounter pointers to dead objects.
class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
public:
+ explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { }
+
void VisitPointer(Object** p) {
- StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
+ StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
}
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
- StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
+ StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
}
}
void VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
@@ -1774,21 +1872,23 @@
rinfo->IsPatchedDebugBreakSlotSequence()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
VisitPointer(&target);
rinfo->set_call_address(Code::cast(target)->instruction_start());
}
+ private:
+ Heap* heap_;
};
// Visitor for updating pointers from live objects in old spaces to new space.
// It can encounter pointers to dead objects in new space when traversing map
// space (see comment for MigrateObject).
static void UpdatePointerToNewGen(HeapObject** p) {
if (!(*p)->IsHeapObject()) return;
Address old_addr = (*p)->address();
- ASSERT(Heap::InFromSpace(*p));
+ ASSERT(HEAP->InFromSpace(*p));
Address new_addr = Memory::Address_at(old_addr);
if (new_addr == NULL) {
// We encountered pointer to a dead object. Clear it so we will
@@ -1798,54 +1898,57 @@
*p = HeapObject::FromAddress(new_addr);
}
}
-static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) {
+static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
+ Object** p) {
Address old_addr = HeapObject::cast(*p)->address();
Address new_addr = Memory::Address_at(old_addr);
return String::cast(HeapObject::FromAddress(new_addr));
}
-static bool TryPromoteObject(HeapObject* object, int object_size) {
+static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) {
Object* result;
- if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
+ if (object_size > heap->MaxObjectSizeInPagedSpace()) {
MaybeObject* maybe_result =
- Heap::lo_space()->AllocateRawFixedArray(object_size);
+ heap->lo_space()->AllocateRawFixedArray(object_size);
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(), object->address(), object_size, true);
- MarkCompactCollector::tracer()->
+ MigrateObject(heap, target->address(), object->address(), object_size,
+ true);
+ heap->mark_compact_collector()->tracer()->
increment_promoted_objects_size(object_size);
return true;
}
} else {
- OldSpace* target_space = Heap::TargetSpace(object);
+ OldSpace* target_space = heap->TargetSpace(object);
- ASSERT(target_space == Heap::old_pointer_space() ||
- target_space == Heap::old_data_space());
+ ASSERT(target_space == heap->old_pointer_space() ||
+ target_space == heap->old_data_space());
MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(),
+ MigrateObject(heap,
+ target->address(),
object->address(),
object_size,
- target_space == Heap::old_pointer_space());
- MarkCompactCollector::tracer()->
+ target_space == heap->old_pointer_space());
+ heap->mark_compact_collector()->tracer()->
increment_promoted_objects_size(object_size);
return true;
}
}
return false;
}
-static void SweepNewSpace(NewSpace* space) {
- Heap::CheckNewSpaceExpansionCriteria();
+static void SweepNewSpace(Heap* heap, NewSpace* space) {
+ heap->CheckNewSpaceExpansionCriteria();
Address from_bottom = space->bottom();
Address from_top = space->top();
// Flip the semispaces. After flipping, to space is empty, from space has
@@ -1861,25 +1964,26 @@
for (Address current = from_bottom; current < from_top; current += size) {
HeapObject* object = HeapObject::FromAddress(current);
if (object->IsMarked()) {
object->ClearMark();
- MarkCompactCollector::tracer()->decrement_marked_count();
+ heap->mark_compact_collector()->tracer()->decrement_marked_count();
size = object->Size();
survivors_size += size;
// Aggressively promote young survivors to the old space.
- if (TryPromoteObject(object, size)) {
+ if (TryPromoteObject(heap, object, size)) {
continue;
}
// Promotion failed. Just migrate object to another semispace.
// Allocation cannot fail at this point: semispaces are of equal size.
Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
- MigrateObject(HeapObject::cast(target)->address(),
+ MigrateObject(heap,
+ HeapObject::cast(target)->address(),
current,
size,
false);
} else {
// Process the dead object before we write a NULL into its header.
@@ -1889,11 +1993,11 @@
Memory::Address_at(current) = NULL;
}
}
// Second pass: find pointers to new space and update them.
- PointersToNewGenUpdatingVisitor updating_visitor;
+ PointersToNewGenUpdatingVisitor updating_visitor(heap);
// Update pointers in to space.
Address current = space->bottom();
while (current < space->top()) {
HeapObject* object = HeapObject::FromAddress(current);
@@ -1901,23 +2005,23 @@
StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
object);
}
// Update roots.
- Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
+ heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
LiveObjectList::IterateElements(&updating_visitor);
// Update pointers in old spaces.
- Heap::IterateDirtyRegions(Heap::old_pointer_space(),
+ heap->IterateDirtyRegions(heap->old_pointer_space(),
&Heap::IteratePointersInDirtyRegion,
&UpdatePointerToNewGen,
- Heap::WATERMARK_SHOULD_BE_VALID);
+ heap->WATERMARK_SHOULD_BE_VALID);
- Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
+ heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
// Update pointers from cells.
- HeapObjectIterator cell_iterator(Heap::cell_space());
+ HeapObjectIterator cell_iterator(heap->cell_space());
for (HeapObject* cell = cell_iterator.next();
cell != NULL;
cell = cell_iterator.next()) {
if (cell->IsJSGlobalPropertyCell()) {
Address value_address =
@@ -1926,23 +2030,26 @@
updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
}
}
// Update pointer from the global contexts list.
- updating_visitor.VisitPointer(Heap::global_contexts_list_address());
+ updating_visitor.VisitPointer(heap->global_contexts_list_address());
// Update pointers from external string table.
- Heap::UpdateNewSpaceReferencesInExternalStringTable(
+ heap->UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
// All pointers were updated. Update auxiliary allocation info.
- Heap::IncrementYoungSurvivorsCounter(survivors_size);
+ heap->IncrementYoungSurvivorsCounter(survivors_size);
space->set_age_mark(space->top());
+
+ // Update JSFunction pointers from the runtime profiler.
+ heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
}
-static void SweepSpace(PagedSpace* space) {
+static void SweepSpace(Heap* heap, PagedSpace* space) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
// During sweeping of paged space we are trying to find longest sequences
// of pages without live objects and free them (instead of putting them on
// the free list).
@@ -1976,20 +2083,21 @@
current < p->AllocationTop();
current += object->Size()) {
object = HeapObject::FromAddress(current);
if (object->IsMarked()) {
object->ClearMark();
- MarkCompactCollector::tracer()->decrement_marked_count();
+ heap->mark_compact_collector()->tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live.
space->DeallocateBlock(free_start,
static_cast<int>(current - free_start),
true);
is_previous_alive = true;
}
} else {
- MarkCompactCollector::ReportDeleteIfNeeded(object);
+ heap->mark_compact_collector()->ReportDeleteIfNeeded(
+ object, heap->isolate());
if (is_previous_alive) { // Transition from live to free.
free_start = current;
is_previous_alive = false;
}
LiveObjectList::ProcessNonLive(object);
@@ -2085,28 +2193,28 @@
void MarkCompactCollector::EncodeForwardingAddresses() {
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
// Objects in the active semispace of the young generation may be
// relocated to the inactive semispace (if not promoted). Set the
// relocation info to the beginning of the inactive semispace.
- Heap::new_space()->MCResetRelocationInfo();
+ heap()->new_space()->MCResetRelocationInfo();
// Compute the forwarding pointers in each space.
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
ReportDeleteIfNeeded>(
- Heap::old_pointer_space());
+ heap()->old_pointer_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
IgnoreNonLiveObject>(
- Heap::old_data_space());
+ heap()->old_data_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
ReportDeleteIfNeeded>(
- Heap::code_space());
+ heap()->code_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
IgnoreNonLiveObject>(
- Heap::cell_space());
+ heap()->cell_space());
// Compute new space next to last after the old and code spaces have been
// compacted. Objects in new space can be promoted to old or code space.
EncodeForwardingAddressesInNewSpace();
@@ -2114,29 +2222,30 @@
// Compute map space last because computing forwarding addresses
// overwrites non-live objects. Objects in the other spaces rely on
// non-live map pointers to get the sizes of non-live objects.
EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
IgnoreNonLiveObject>(
- Heap::map_space());
+ heap()->map_space());
// Write relocation info to the top page, so we can use it later. This is
// done after promoting objects from the new space so we get the correct
// allocation top.
- Heap::old_pointer_space()->MCWriteRelocationInfoToPage();
- Heap::old_data_space()->MCWriteRelocationInfoToPage();
- Heap::code_space()->MCWriteRelocationInfoToPage();
- Heap::map_space()->MCWriteRelocationInfoToPage();
- Heap::cell_space()->MCWriteRelocationInfoToPage();
+ heap()->old_pointer_space()->MCWriteRelocationInfoToPage();
+ heap()->old_data_space()->MCWriteRelocationInfoToPage();
+ heap()->code_space()->MCWriteRelocationInfoToPage();
+ heap()->map_space()->MCWriteRelocationInfoToPage();
+ heap()->cell_space()->MCWriteRelocationInfoToPage();
}
class MapIterator : public HeapObjectIterator {
public:
- MapIterator() : HeapObjectIterator(Heap::map_space(), &SizeCallback) { }
+ explicit MapIterator(Heap* heap)
+ : HeapObjectIterator(heap->map_space(), &SizeCallback) { }
- explicit MapIterator(Address start)
- : HeapObjectIterator(Heap::map_space(), start, &SizeCallback) { }
+ MapIterator(Heap* heap, Address start)
+ : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { }
private:
static int SizeCallback(HeapObject* unused) {
USE(unused);
return Map::kSize;
@@ -2144,14 +2253,16 @@
};
class MapCompact {
public:
- explicit MapCompact(int live_maps)
- : live_maps_(live_maps),
- to_evacuate_start_(Heap::map_space()->TopAfterCompaction(live_maps)),
- map_to_evacuate_it_(to_evacuate_start_),
+ explicit MapCompact(Heap* heap, int live_maps)
+ : heap_(heap),
+ live_maps_(live_maps),
+ to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
+ vacant_map_it_(heap),
+ map_to_evacuate_it_(heap, to_evacuate_start_),
first_map_to_evacuate_(
reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
}
void CompactMaps() {
@@ -2167,50 +2278,59 @@
CheckNoMapsToEvacuate();
#endif
}
void UpdateMapPointersInRoots() {
- Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
- GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
- LiveObjectList::IterateElements(&map_updating_visitor_);
+ MapUpdatingVisitor map_updating_visitor;
+ heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
+ heap()->isolate()->global_handles()->IterateWeakRoots(
+ &map_updating_visitor);
+ LiveObjectList::IterateElements(&map_updating_visitor);
}
void UpdateMapPointersInPagedSpace(PagedSpace* space) {
- ASSERT(space != Heap::map_space());
+ ASSERT(space != heap()->map_space());
PageIterator it(space, PageIterator::PAGES_IN_USE);
while (it.has_next()) {
Page* p = it.next();
- UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
+ UpdateMapPointersInRange(heap(),
+ p->ObjectAreaStart(),
+ p->AllocationTop());
}
}
void UpdateMapPointersInNewSpace() {
- NewSpace* space = Heap::new_space();
- UpdateMapPointersInRange(space->bottom(), space->top());
+ NewSpace* space = heap()->new_space();
+ UpdateMapPointersInRange(heap(), space->bottom(), space->top());
}
void UpdateMapPointersInLargeObjectSpace() {
- LargeObjectIterator it(Heap::lo_space());
+ LargeObjectIterator it(heap()->lo_space());
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
- UpdateMapPointersInObject(obj);
+ UpdateMapPointersInObject(heap(), obj);
}
void Finish() {
- Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
+ heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
}
+ inline Heap* heap() const { return heap_; }
+
private:
+ Heap* heap_;
int live_maps_;
Address to_evacuate_start_;
MapIterator vacant_map_it_;
MapIterator map_to_evacuate_it_;
Map* first_map_to_evacuate_;
// Helper class for updating map pointers in HeapObjects.
class MapUpdatingVisitor: public ObjectVisitor {
public:
+ MapUpdatingVisitor() {}
+
void VisitPointer(Object** p) {
UpdateMapPointer(p);
}
void VisitPointers(Object** start, Object** end) {
@@ -2229,12 +2349,10 @@
*p = GetForwardedMap(map_word);
}
};
- static MapUpdatingVisitor map_updating_visitor_;
-
static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
while (true) {
HeapObject* next = it->next();
ASSERT(next != NULL);
if (next == last)
@@ -2264,13 +2382,12 @@
ASSERT(FreeListNode::IsFreeListNode(vacant_map));
ASSERT(map_to_evacuate->IsMap());
ASSERT(Map::kSize % 4 == 0);
- Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(),
- map_to_evacuate->address(),
- Map::kSize);
+ map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(
+ vacant_map->address(), map_to_evacuate->address(), Map::kSize);
ASSERT(vacant_map->IsMap()); // Due to memcpy above.
MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
forwarding_map_word.SetOverflow();
@@ -2286,19 +2403,19 @@
Map* new_map = map_word.ToMap();
ASSERT_MAP_ALIGNED(new_map->address());
return new_map;
}
- static int UpdateMapPointersInObject(HeapObject* obj) {
+ static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) {
ASSERT(!obj->IsMarked());
Map* map = obj->map();
- ASSERT(Heap::map_space()->Contains(map));
+ ASSERT(heap->map_space()->Contains(map));
MapWord map_word = map->map_word();
ASSERT(!map_word.IsMarked());
if (map_word.IsOverflowed()) {
Map* new_map = GetForwardedMap(map_word);
- ASSERT(Heap::map_space()->Contains(new_map));
+ ASSERT(heap->map_space()->Contains(new_map));
obj->set_map(new_map);
#ifdef DEBUG
if (FLAG_gc_verbose) {
PrintF("update %p : %p -> %p\n",
@@ -2308,20 +2425,21 @@
}
#endif
}
int size = obj->SizeFromMap(map);
- obj->IterateBody(map->instance_type(), size, &map_updating_visitor_);
+ MapUpdatingVisitor map_updating_visitor;
+ obj->IterateBody(map->instance_type(), size, &map_updating_visitor);
return size;
}
- static void UpdateMapPointersInRange(Address start, Address end) {
+ static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) {
HeapObject* object;
int size;
for (Address current = start; current < end; current += size) {
object = HeapObject::FromAddress(current);
- size = UpdateMapPointersInObject(object);
+ size = UpdateMapPointersInObject(heap, object);
ASSERT(size > 0);
}
}
#ifdef DEBUG
@@ -2334,51 +2452,49 @@
ASSERT(FreeListNode::IsFreeListNode(obj));
}
#endif
};
-MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
-
void MarkCompactCollector::SweepSpaces() {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
ASSERT(state_ == SWEEP_SPACES);
ASSERT(!IsCompacting());
// Noncompacting collections simply sweep the spaces to clear the mark
// bits and free the nonlive blocks (for old and map spaces). We sweep
// the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
- SweepSpace(Heap::old_pointer_space());
- SweepSpace(Heap::old_data_space());
- SweepSpace(Heap::code_space());
- SweepSpace(Heap::cell_space());
+ SweepSpace(heap(), heap()->old_pointer_space());
+ SweepSpace(heap(), heap()->old_data_space());
+ SweepSpace(heap(), heap()->code_space());
+ SweepSpace(heap(), heap()->cell_space());
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
- SweepNewSpace(Heap::new_space());
+ SweepNewSpace(heap(), heap()->new_space());
}
- SweepSpace(Heap::map_space());
+ SweepSpace(heap(), heap()->map_space());
- Heap::IterateDirtyRegions(Heap::map_space(),
- &Heap::IteratePointersInDirtyMapsRegion,
- &UpdatePointerToNewGen,
- Heap::WATERMARK_SHOULD_BE_VALID);
+ heap()->IterateDirtyRegions(heap()->map_space(),
+ &heap()->IteratePointersInDirtyMapsRegion,
+ &UpdatePointerToNewGen,
+ heap()->WATERMARK_SHOULD_BE_VALID);
- intptr_t live_maps_size = Heap::map_space()->Size();
+ intptr_t live_maps_size = heap()->map_space()->Size();
int live_maps = static_cast<int>(live_maps_size / Map::kSize);
ASSERT(live_map_objects_size_ == live_maps_size);
- if (Heap::map_space()->NeedsCompaction(live_maps)) {
- MapCompact map_compact(live_maps);
+ if (heap()->map_space()->NeedsCompaction(live_maps)) {
+ MapCompact map_compact(heap(), live_maps);
map_compact.CompactMaps();
map_compact.UpdateMapPointersInRoots();
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL; space = spaces.next()) {
- if (space == Heap::map_space()) continue;
+ if (space == heap()->map_space()) continue;
map_compact.UpdateMapPointersInPagedSpace(space);
}
map_compact.UpdateMapPointersInNewSpace();
map_compact.UpdateMapPointersInLargeObjectSpace();
@@ -2393,38 +2509,38 @@
// address in the range is top. The callback function is used to get the
// size of each live object.
int MarkCompactCollector::IterateLiveObjectsInRange(
Address start,
Address end,
- HeapObjectCallback size_func) {
+ LiveObjectCallback size_func) {
int live_objects_size = 0;
Address current = start;
while (current < end) {
uint32_t encoded_map = Memory::uint32_at(current);
if (encoded_map == kSingleFreeEncoding) {
current += kPointerSize;
} else if (encoded_map == kMultiFreeEncoding) {
current += Memory::int_at(current + kIntSize);
} else {
- int size = size_func(HeapObject::FromAddress(current));
+ int size = (this->*size_func)(HeapObject::FromAddress(current));
current += size;
live_objects_size += size;
}
}
return live_objects_size;
}
-int MarkCompactCollector::IterateLiveObjects(NewSpace* space,
- HeapObjectCallback size_f) {
+int MarkCompactCollector::IterateLiveObjects(
+ NewSpace* space, LiveObjectCallback size_f) {
ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
}
-int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
- HeapObjectCallback size_f) {
+int MarkCompactCollector::IterateLiveObjects(
+ PagedSpace* space, LiveObjectCallback size_f) {
ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
int total = 0;
PageIterator it(space, PageIterator::PAGES_IN_USE);
while (it.has_next()) {
Page* p = it.next();
@@ -2440,10 +2556,12 @@
// Phase 3: Update pointers
// Helper class for updating pointers in HeapObjects.
class UpdatingVisitor: public ObjectVisitor {
public:
+ explicit UpdatingVisitor(Heap* heap) : heap_(heap) {}
+
void VisitPointer(Object** p) {
UpdatePointer(p);
}
void VisitPointers(Object** start, Object** end) {
@@ -2468,38 +2586,40 @@
VisitPointer(&target);
rinfo->set_call_address(
reinterpret_cast<Code*>(target)->instruction_start());
}
+ inline Heap* heap() const { return heap_; }
+
private:
void UpdatePointer(Object** p) {
if (!(*p)->IsHeapObject()) return;
HeapObject* obj = HeapObject::cast(*p);
Address old_addr = obj->address();
Address new_addr;
- ASSERT(!Heap::InFromSpace(obj));
+ ASSERT(!heap()->InFromSpace(obj));
- if (Heap::new_space()->Contains(obj)) {
+ if (heap()->new_space()->Contains(obj)) {
Address forwarding_pointer_addr =
- Heap::new_space()->FromSpaceLow() +
- Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
+ heap()->new_space()->FromSpaceLow() +
+ heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
new_addr = Memory::Address_at(forwarding_pointer_addr);
#ifdef DEBUG
- ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
- Heap::old_data_space()->Contains(new_addr) ||
- Heap::new_space()->FromSpaceContains(new_addr) ||
- Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr)));
+ ASSERT(heap()->old_pointer_space()->Contains(new_addr) ||
+ heap()->old_data_space()->Contains(new_addr) ||
+ heap()->new_space()->FromSpaceContains(new_addr) ||
+ heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
- if (Heap::new_space()->FromSpaceContains(new_addr)) {
- ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
- Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
+ if (heap()->new_space()->FromSpaceContains(new_addr)) {
+ ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
+ heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
}
#endif
- } else if (Heap::lo_space()->Contains(obj)) {
+ } else if (heap()->lo_space()->Contains(obj)) {
// Don't move objects in the large object space.
return;
} else {
#ifdef DEBUG
@@ -2524,42 +2644,48 @@
PrintF("update %p : %p -> %p\n",
reinterpret_cast<Address>(p), old_addr, new_addr);
}
#endif
}
+
+ Heap* heap_;
};
void MarkCompactCollector::UpdatePointers() {
#ifdef DEBUG
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
state_ = UPDATE_POINTERS;
#endif
- UpdatingVisitor updating_visitor;
- Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
- GlobalHandles::IterateWeakRoots(&updating_visitor);
+ UpdatingVisitor updating_visitor(heap());
+ heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
+ &updating_visitor);
+ heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
+ heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
// Update the pointer to the head of the weak list of global contexts.
- updating_visitor.VisitPointer(&Heap::global_contexts_list_);
+ updating_visitor.VisitPointer(&heap()->global_contexts_list_);
LiveObjectList::IterateElements(&updating_visitor);
- int live_maps_size = IterateLiveObjects(Heap::map_space(),
- &UpdatePointersInOldObject);
- int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
- &UpdatePointersInOldObject);
- int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
- &UpdatePointersInOldObject);
- int live_codes_size = IterateLiveObjects(Heap::code_space(),
- &UpdatePointersInOldObject);
- int live_cells_size = IterateLiveObjects(Heap::cell_space(),
- &UpdatePointersInOldObject);
- int live_news_size = IterateLiveObjects(Heap::new_space(),
- &UpdatePointersInNewObject);
+ int live_maps_size = IterateLiveObjects(
+ heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_pointer_olds_size = IterateLiveObjects(
+ heap()->old_pointer_space(),
+ &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_data_olds_size = IterateLiveObjects(
+ heap()->old_data_space(),
+ &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_codes_size = IterateLiveObjects(
+ heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_cells_size = IterateLiveObjects(
+ heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_news_size = IterateLiveObjects(
+ heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
// Large objects do not move, the map word can be updated directly.
- LargeObjectIterator it(Heap::lo_space());
+ LargeObjectIterator it(heap()->lo_space());
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
UpdatePointersInNewObject(obj);
}
USE(live_maps_size);
@@ -2582,12 +2708,12 @@
Map* old_map = obj->map();
ASSERT(old_map->IsHeapObject());
Address forwarded = GetForwardingAddressInOldSpace(old_map);
- ASSERT(Heap::map_space()->Contains(old_map));
- ASSERT(Heap::map_space()->Contains(forwarded));
+ ASSERT(heap()->map_space()->Contains(old_map));
+ ASSERT(heap()->map_space()->Contains(forwarded));
#ifdef DEBUG
if (FLAG_gc_verbose) {
PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
forwarded);
}
@@ -2598,21 +2724,21 @@
// We have to compute the object size relying on the old map because
// map objects are not relocated yet.
int obj_size = obj->SizeFromMap(old_map);
// Update pointers in the object body.
- UpdatingVisitor updating_visitor;
+ UpdatingVisitor updating_visitor(heap());
obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
return obj_size;
}
int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
// Decode the map pointer.
MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
- ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+ Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+ ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
// At this point, the first word of map_addr is also encoded, cannot
// cast it to Map* using Map::cast.
Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
int obj_size = obj->SizeFromMap(map);
@@ -2629,11 +2755,11 @@
map_addr, new_map_addr);
}
#endif
// Update pointers in the object body.
- UpdatingVisitor updating_visitor;
+ UpdatingVisitor updating_visitor(heap());
obj->IterateBody(type, obj_size, &updating_visitor);
return obj_size;
}
@@ -2685,22 +2811,23 @@
ASSERT(state_ == UPDATE_POINTERS);
state_ = RELOCATE_OBJECTS;
#endif
// Relocates objects, always relocate map objects first. Relocating
// objects in other space relies on map objects to get object size.
- int live_maps_size = IterateLiveObjects(Heap::map_space(),
- &RelocateMapObject);
- int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
- &RelocateOldPointerObject);
- int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
- &RelocateOldDataObject);
- int live_codes_size = IterateLiveObjects(Heap::code_space(),
- &RelocateCodeObject);
- int live_cells_size = IterateLiveObjects(Heap::cell_space(),
- &RelocateCellObject);
- int live_news_size = IterateLiveObjects(Heap::new_space(),
- &RelocateNewObject);
+ int live_maps_size = IterateLiveObjects(
+ heap()->map_space(), &MarkCompactCollector::RelocateMapObject);
+ int live_pointer_olds_size = IterateLiveObjects(
+ heap()->old_pointer_space(),
+ &MarkCompactCollector::RelocateOldPointerObject);
+ int live_data_olds_size = IterateLiveObjects(
+ heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
+ int live_codes_size = IterateLiveObjects(
+ heap()->code_space(), &MarkCompactCollector::RelocateCodeObject);
+ int live_cells_size = IterateLiveObjects(
+ heap()->cell_space(), &MarkCompactCollector::RelocateCellObject);
+ int live_news_size = IterateLiveObjects(
+ heap()->new_space(), &MarkCompactCollector::RelocateNewObject);
USE(live_maps_size);
USE(live_pointer_olds_size);
USE(live_data_olds_size);
USE(live_codes_size);
@@ -2712,32 +2839,32 @@
ASSERT(live_codes_size == live_code_objects_size_);
ASSERT(live_cells_size == live_cell_objects_size_);
ASSERT(live_news_size == live_young_objects_size_);
// Flip from and to spaces
- Heap::new_space()->Flip();
+ heap()->new_space()->Flip();
- Heap::new_space()->MCCommitRelocationInfo();
+ heap()->new_space()->MCCommitRelocationInfo();
// Set age_mark to bottom in to space
- Address mark = Heap::new_space()->bottom();
- Heap::new_space()->set_age_mark(mark);
+ Address mark = heap()->new_space()->bottom();
+ heap()->new_space()->set_age_mark(mark);
PagedSpaces spaces;
for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
space->MCCommitRelocationInfo();
- Heap::CheckNewSpaceExpansionCriteria();
- Heap::IncrementYoungSurvivorsCounter(live_news_size);
+ heap()->CheckNewSpaceExpansionCriteria();
+ heap()->IncrementYoungSurvivorsCounter(live_news_size);
}
int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
// Recover map pointer.
MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
- ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+ Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+ ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
// Get forwarding address before resetting map pointer
Address new_addr = GetForwardingAddressInOldSpace(obj);
// Reset map pointer. The meta map object may not be copied yet so
@@ -2746,13 +2873,13 @@
Address old_addr = obj->address();
if (new_addr != old_addr) {
// Move contents.
- Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
- old_addr,
- Map::kSize);
+ heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ Map::kSize);
}
#ifdef DEBUG
if (FLAG_gc_verbose) {
PrintF("relocate %p -> %p\n", old_addr, new_addr);
@@ -2792,12 +2919,12 @@
int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
PagedSpace* space) {
// Recover map pointer.
MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
- ASSERT(Heap::map_space()->Contains(map_addr));
+ Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+ ASSERT(heap()->map_space()->Contains(map_addr));
// Get forwarding address before resetting map pointer.
Address new_addr = GetForwardingAddressInOldSpace(obj);
// Reset the map pointer.
@@ -2805,131 +2932,146 @@
Address old_addr = obj->address();
if (new_addr != old_addr) {
// Move contents.
- if (space == Heap::old_data_space()) {
- Heap::MoveBlock(new_addr, old_addr, obj_size);
+ if (space == heap()->old_data_space()) {
+ heap()->MoveBlock(new_addr, old_addr, obj_size);
} else {
- Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
- old_addr,
- obj_size);
+ heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ obj_size);
}
}
ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsSharedFunctionInfo()) {
- PROFILE(SFIMoveEvent(old_addr, new_addr));
+ PROFILE(heap()->isolate(),
+ SharedFunctionInfoMoveEvent(old_addr, new_addr));
}
- HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
+ HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
- return RelocateOldNonCodeObject(obj, Heap::old_pointer_space());
+ return RelocateOldNonCodeObject(obj, heap()->old_pointer_space());
}
int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
- return RelocateOldNonCodeObject(obj, Heap::old_data_space());
+ return RelocateOldNonCodeObject(obj, heap()->old_data_space());
}
int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
- return RelocateOldNonCodeObject(obj, Heap::cell_space());
+ return RelocateOldNonCodeObject(obj, heap()->cell_space());
}
int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
// Recover map pointer.
MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
- ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+ Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+ ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
// Get forwarding address before resetting map pointer
Address new_addr = GetForwardingAddressInOldSpace(obj);
// Reset the map pointer.
- int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
+ int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr);
Address old_addr = obj->address();
if (new_addr != old_addr) {
// Move contents.
- Heap::MoveBlock(new_addr, old_addr, obj_size);
+ heap()->MoveBlock(new_addr, old_addr, obj_size);
}
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsCode()) {
// May also update inline cache target.
Code::cast(copied_to)->Relocate(new_addr - old_addr);
// Notify the logger that compiled code has moved.
- PROFILE(CodeMoveEvent(old_addr, new_addr));
+ PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr));
}
- HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
+ HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
int obj_size = obj->Size();
// Get forwarding address
Address old_addr = obj->address();
- int offset = Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
+ int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
Address new_addr =
- Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
+ Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset);
#ifdef DEBUG
- if (Heap::new_space()->FromSpaceContains(new_addr)) {
- ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
- Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
+ if (heap()->new_space()->FromSpaceContains(new_addr)) {
+ ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
+ heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
} else {
- ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
- Heap::TargetSpace(obj) == Heap::old_data_space());
+ ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() ||
+ heap()->TargetSpace(obj) == heap()->old_data_space());
}
#endif
// New and old addresses cannot overlap.
- if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) {
- Heap::CopyBlock(new_addr, old_addr, obj_size);
+ if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) {
+ heap()->CopyBlock(new_addr, old_addr, obj_size);
} else {
- Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
- old_addr,
- obj_size);
+ heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ obj_size);
}
#ifdef DEBUG
if (FLAG_gc_verbose) {
PrintF("relocate %p -> %p\n", old_addr, new_addr);
}
#endif
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsSharedFunctionInfo()) {
- PROFILE(SFIMoveEvent(old_addr, new_addr));
+ PROFILE(heap()->isolate(),
+ SharedFunctionInfoMoveEvent(old_addr, new_addr));
}
- HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
+ HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
-void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
+void MarkCompactCollector::EnableCodeFlushing(bool enable) {
+ if (enable) {
+ if (code_flusher_ != NULL) return;
+ code_flusher_ = new CodeFlusher(heap()->isolate());
+ } else {
+ if (code_flusher_ == NULL) return;
+ delete code_flusher_;
+ code_flusher_ = NULL;
+ }
+}
+
+
+void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
+ Isolate* isolate) {
#ifdef ENABLE_GDB_JIT_INTERFACE
if (obj->IsCode()) {
GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
}
#endif
#ifdef ENABLE_LOGGING_AND_PROFILING
if (obj->IsCode()) {
- PROFILE(CodeDeleteEvent(obj->address()));
+ PROFILE(isolate, CodeDeleteEvent(obj->address()));
}
#endif
}