Commit b514973d authored by Nikolaos Papaspyrou's avatar Nikolaos Papaspyrou Committed by V8 LUCI CQ

cleanup: Fix some typos

Mostly in comments, not much to be said...

Bug: v8:12425
Change-Id: Ib1e4d3913f9b91eeafefbef13330fd1388223c06
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3650597
Commit-Queue: Nikolaos Papaspyrou <nikolaos@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80562}
parent a26f9f54
......@@ -347,7 +347,7 @@ class Heap {
static const int kPointerMultiplier = kTaggedSize / 4;
// The heap limit needs to be computed based on the system pointer size
// because we want a pointer-compressed heap to have larger limit than
// an orinary 32-bit which that is contrained by 2GB virtual address space.
// an ordinary 32-bit which that is constrained by 2GB virtual address space.
static const int kHeapLimitMultiplier = kSystemPointerSize / 4;
#endif
......@@ -383,7 +383,7 @@ class Heap {
static const int kNoGCFlags = 0;
static const int kReduceMemoryFootprintMask = 1;
// GCs that are forced, either through testing configurations (requring
// --expose-gc) or through DevTools (using LowMemoryNotificaton).
// --expose-gc) or through DevTools (using LowMemoryNotification).
static const int kForcedGC = 2;
// The minimum size of a HeapObject on the heap.
......@@ -436,7 +436,7 @@ class Heap {
}
// Helper function to get the bytecode flushing mode based on the flags. This
// is required because it is not safe to acess flags in concurrent marker.
// is required because it is not safe to access flags in concurrent marker.
static inline base::EnumSet<CodeFlushMode> GetCodeFlushMode(Isolate* isolate);
static uintptr_t ZapValue() {
......@@ -602,7 +602,7 @@ class Heap {
return reinterpret_cast<Address>(&allocation_sites_list_);
}
// Traverse all the allocaions_sites [nested_site and weak_next] in the list
// Traverse all the allocation_sites [nested_site and weak_next] in the list
// and foreach call the visitor
void ForeachAllocationSite(
Object list, const std::function<void(AllocationSite)>& visitor);
......@@ -1354,7 +1354,7 @@ class Heap {
// Returns the amount of executable memory currently committed for the heap.
size_t CommittedMemoryExecutable();
// Returns the amount of phyical memory currently committed for the heap.
// Returns the amount of physical memory currently committed for the heap.
size_t CommittedPhysicalMemory();
// Returns the maximum amount of memory ever committed for the heap.
......@@ -2262,7 +2262,7 @@ class Heap {
// Observer that can cause early scavenge start.
StressScavengeObserver* stress_scavenge_observer_ = nullptr;
// The maximum percent of the marking limit reached wihout causing marking.
// The maximum percent of the marking limit reached without causing marking.
// This is tracked when specyfing --fuzzer-gc-analysis.
double max_marking_limit_reached_ = 0.0;
......@@ -2460,7 +2460,7 @@ class Heap {
// ephemeron is stored in this map.
std::unordered_map<HeapObject, HeapObject, Object::Hasher>
ephemeron_retainer_;
// For each index inthe retaining_path_targets_ array this map
// For each index in the retaining_path_targets_ array this map
// stores the option of the corresponding target.
std::unordered_map<int, RetainingPathOption> retaining_path_target_option_;
......
......@@ -837,7 +837,7 @@ void IncrementalMarking::FetchBytesMarkedConcurrently() {
if (FLAG_concurrent_marking) {
size_t current_bytes_marked_concurrently =
heap()->concurrent_marking()->TotalMarkedBytes();
// The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
// The concurrent_marking()->TotalMarkedBytes() is not monotonic for a
// short period of time when a concurrent marking task is finishing.
if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
bytes_marked_ +=
......@@ -865,7 +865,7 @@ size_t IncrementalMarking::ComputeStepSizeInBytes(StepOrigin step_origin) {
(bytes_marked_ - scheduled_bytes_to_mark_) / KB);
}
}
// Allow steps on allocation to get behind the schedule by small ammount.
// Allow steps on allocation to get behind the schedule by small amount.
// This gives higher priority to steps in tasks.
size_t kScheduleMarginInBytes = step_origin == StepOrigin::kV8 ? 1 * MB : 0;
if (bytes_marked_ + kScheduleMarginInBytes > scheduled_bytes_to_mark_)
......
......@@ -5533,7 +5533,7 @@ void MinorMarkCompactCollector::CollectGarbage() {
heap()->concurrent_marking()->ClearMemoryChunkData(p);
}
}
// Since we promote all surviving large objects immediatelly, all remaining
// Since we promote all surviving large objects immediately, all remaining
// large objects must be dead.
// TODO(v8:11685): Don't free all as soon as we have an intermediate
// generation.
......
......@@ -421,7 +421,7 @@ void ScavengerCollector::CollectGarbage() {
// Set age mark.
semi_space_new_space->set_age_mark(semi_space_new_space->top());
// Since we promote all surviving large objects immediatelly, all remaining
// Since we promote all surviving large objects immediately, all remaining
// large objects must be dead.
// TODO(hpayer): Don't free all as soon as we have an intermediate generation.
heap_->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
......@@ -464,7 +464,7 @@ void ScavengerCollector::IterateStackAndScavenge(
RootScavengeVisitor* root_scavenge_visitor,
std::vector<std::unique_ptr<Scavenger>>* scavengers, int main_thread_id) {
// Scan the stack, scavenge the newly discovered objects, and report
// the survival statistics before and afer the stack scanning.
// the survival statistics before and after the stack scanning.
// This code is not intended for production.
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_STACK_ROOTS);
size_t survived_bytes_before = 0;
......
......@@ -2510,7 +2510,7 @@ inline void setcc_32_no_spill(LiftoffAssembler* assm, Condition cond,
assm->movzx_b(dst, tmp_byte_reg);
}
// Setcc into dst register (no contraints). Might spill.
// Setcc into dst register (no constraints). Might spill.
inline void setcc_32(LiftoffAssembler* assm, Condition cond, Register dst) {
Register tmp_byte_reg = GetTmpByteRegister(assm, dst);
setcc_32_no_spill(assm, cond, dst, tmp_byte_reg);
......
......@@ -1020,7 +1020,7 @@
'test-global-handles/FinalizerDiesAndKeepsPhantomAliveOnMarkCompact': [SKIP],
'test-global-handles/FinalizerWeakness': [SKIP],
'test-global-handles/GCFromWeakCallbacks': [SKIP],
'test-global-handles/PhatomHandlesWithoutCallbacks': [SKIP],
'test-global-handles/PhantomHandlesWithoutCallbacks': [SKIP],
'test-global-handles/SecondPassPhantomCallbacks': [SKIP],
'test-global-handles/WeakHandleToUnmodifiedJSApiObjectDiesOnMarkCompact': [SKIP],
'test-global-handles/WeakHandleToUnmodifiedJSObjectDiesOnMarkCompact': [SKIP],
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment