Commit 7926e5d2 authored by Nikolaos Papaspyrou's avatar Nikolaos Papaspyrou Committed by V8 LUCI CQ

cleanup: Fix some typos

Mostly in comments, again, not much to be said...

Bug: v8:12425
Change-Id: If0890132606b5ae8d5e173907bfdc063b9811ac6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3657428Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Nikolaos Papaspyrou <nikolaos@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80689}
parent 698413be
......@@ -109,7 +109,7 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
const size_t reserved_area = GetWritableReservedAreaSize();
if (requested < (kMaximalCodeRangeSize - reserved_area)) {
requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
// Fullfilling both reserved pages requirement and huge code area
// Fulfilling both reserved pages requirement and huge code area
// alignments is not supported (requires re-implementation).
DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
}
......
......@@ -14,7 +14,7 @@
namespace v8 {
namespace internal {
// Record code statisitcs.
// Record code statistics.
void CodeStatistics::RecordCodeAndMetadataStatistics(HeapObject object,
Isolate* isolate) {
PtrComprCageBase cage_base(isolate);
......@@ -29,7 +29,7 @@ void CodeStatistics::RecordCodeAndMetadataStatistics(HeapObject object,
isolate->set_external_script_source_size(size);
}
} else if (object.IsAbstractCode(cage_base)) {
// Record code+metadata statisitcs.
// Record code+metadata statistics.
AbstractCode abstract_code = AbstractCode::cast(object);
int size = abstract_code.SizeIncludingMetadata();
if (abstract_code.IsCode(cage_base)) {
......@@ -96,7 +96,7 @@ void CodeStatistics::ReportCodeStatistics(Isolate* isolate) {
}
PrintF("\n");
// Report code and metadata statisitcs
// Report code and metadata statistics
if (isolate->code_and_metadata_size() > 0) {
PrintF("Code size including metadata : %10d bytes\n",
isolate->code_and_metadata_size());
......
......@@ -455,7 +455,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
unsigned mark_compact_epoch,
bool should_keep_ages_unchanged) {
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
int kObjectsUntilInterruptCheck = 1000;
uint8_t task_id = delegate->GetTaskId() + 1;
TaskState* task_state = &task_state_[task_id];
auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
......@@ -500,7 +500,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
size_t current_marked_bytes = 0;
int objects_processed = 0;
while (current_marked_bytes < kBytesUntilInterruptCheck &&
objects_processed < kObjectsUntilInterrupCheck) {
objects_processed < kObjectsUntilInterruptCheck) {
HeapObject object;
if (!local_marking_worklists.Pop(&object)) {
done = true;
......
......@@ -74,7 +74,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
// Schedules asynchronous job to perform concurrent marking at |priority| if
// not already running, otherwise adjusts the number of workers running job
// and the priority if diffrent from the default kUserVisible.
// and the priority if different from the default kUserVisible.
void RescheduleJobIfNeeded(
TaskPriority priority = TaskPriority::kUserVisible);
// Flushes native context sizes to the given table of the main thread.
......
......@@ -383,7 +383,7 @@ class Heap {
static const int kNoGCFlags = 0;
static const int kReduceMemoryFootprintMask = 1;
// GCs that are forced, either through testing configurations (requring
// GCs that are forced, either through testing configurations (requiring
// --expose-gc) or through DevTools (using LowMemoryNotification).
static const int kForcedGC = 2;
......@@ -1473,7 +1473,7 @@ class Heap {
V8_EXPORT_PRIVATE size_t GlobalSizeOfObjects();
// We allow incremental marking to overshoot the V8 and global allocation
// limit for performace reasons. If the overshoot is too large then we are
// limit for performance reasons. If the overshoot is too large then we are
// more eager to finalize incremental marking.
bool AllocationLimitOvershotByLargeMargin();
......@@ -1842,7 +1842,7 @@ class Heap {
// slots since the sweeper can run concurrently.
void CreateFillerObjectAtSweeper(Address addr, int size);
// Creates a filler object in the specificed memory area. This method is the
// Creates a filler object in the specified memory area. This method is the
// internal method used by all CreateFillerObjectAtXXX-methods.
void CreateFillerObjectAtRaw(Address addr, int size,
ClearFreedMemoryMode clear_memory_mode,
......@@ -2263,7 +2263,7 @@ class Heap {
StressScavengeObserver* stress_scavenge_observer_ = nullptr;
// The maximum percent of the marking limit reached without causing marking.
// This is tracked when specyfing --fuzzer-gc-analysis.
// This is tracked when specifying --fuzzer-gc-analysis.
double max_marking_limit_reached_ = 0.0;
// How many mark-sweep collections happened.
......@@ -2691,7 +2691,7 @@ using CodeTPageHeaderModificationScope = NopRwxMemoryWriteScope;
using CodeTPageHeaderModificationScope = CodePageHeaderModificationScope;
#endif // V8_EXTERNAL_CODE_SPACE
// The CodePageMemoryModificationScope does not check if tansitions to
// The CodePageMemoryModificationScope does not check if transitions to
// writeable and back to executable are actually allowed, i.e. the MemoryChunk
// was registered to be executable. It can be used by concurrent threads.
class V8_NODISCARD CodePageMemoryModificationScope {
......@@ -2722,7 +2722,7 @@ class V8_NODISCARD IgnoreLocalGCRequests {
};
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
// or care about inter-generational references. All heap object pointers have to
// point into the heap to a location that has a map pointer at its first word.
// Caveat: Heap::Contains is an approximation because it can return true for
// objects in a heap space but above the allocation pointer.
......
......@@ -230,7 +230,7 @@ size_t PagedSpaceBase::CommittedPhysicalMemory() const {
return CommittedMemory();
}
CodePageHeaderModificationScope rwx_write_scope(
"Updating high water mark for Code pages requries write access to "
"Updating high water mark for Code pages requires write access to "
"the Code page headers");
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
return committed_physical_memory();
......
......@@ -531,7 +531,7 @@ int ScavengeVisitor::VisitEphemeronHashTable(Map map,
EphemeronHashTable table) {
// Register table with the scavenger, so it can take care of the weak keys
// later. This allows to only iterate the tables' values, which are treated
// as strong independetly of whether the key is live.
// as strong independently of whether the key is live.
scavenger_->AddEphemeronHashTable(table);
for (InternalIndex i : table.IterateEntries()) {
ObjectSlot value_slot =
......
......@@ -229,7 +229,7 @@ class Page : public MemoryChunk {
Address area_end, VirtualMemory reservation, Executability executable);
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[. This only works if the object
// from [page_addr .. page_addr + kPageSize]. This only works if the object
// is in fact in a page.
static Page* FromAddress(Address addr) {
DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment