Commit ddd2bef2 authored by Nikolaos Papaspyrou's avatar Nikolaos Papaspyrou Committed by V8 LUCI CQ

cleanup: Fix some typos

Mostly in comments, again, not much to be said...

Bug: v8:12425
Change-Id: Id847447ade3100f13c5da8931fbb47d06ff1ce1f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3711883Reviewed-by: 's avatarOmer Katz <omerkatz@chromium.org>
Commit-Queue: Nikolaos Papaspyrou <nikolaos@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81242}
parent bc7b89f7
......@@ -29,7 +29,7 @@ asm(
#endif
// Push all callee-saved registers.
// lr, TOC pointer, r16 to r31. 160 bytes.
// The parameter save area shall be allocated by the caller. 112 btes.
// The parameter save area shall be allocated by the caller. 112 bytes.
// At anytime, SP (r1) needs to be multiple of 16 (i.e. 16-aligned).
" mflr 0 \n"
" std 0, 16(1) \n"
......
......@@ -6,7 +6,7 @@
// stack scanning.
//
// We cannot rely on clang generating the function and right symbol mangling
// as `__attribite__((naked))` does not prevent clang from generating TSAN
// as `__attribute__((naked))` does not prevent clang from generating TSAN
// function entry stubs (`__tsan_func_entry`). Even with
// `__attribute__((no_sanitize_thread)` annotation clang generates the entry
// stub.
......
......@@ -22,7 +22,7 @@ static_assert(api_constants::kLargeObjectSizeThreshold ==
kLargeObjectSizeThreshold);
#if !(defined(V8_TARGET_ARCH_32_BIT) && defined(V8_CC_GNU))
// GCC on x86 has alignof(std::max_alignt) == 16 (quad word) which is not
// GCC on x86 has alignof(std::max_align_t) == 16 (quad word) which is not
// satisfied by Oilpan.
static_assert(api_constants::kMaxSupportedAlignment >=
alignof(std::max_align_t),
......
......@@ -199,14 +199,14 @@ void MovableReferences::RelocateInteriorReferences(Address from, Address to,
if (!interior_it->second) {
// Update the interior reference value, so that when the object the slot
// is pointing to is moved, it can re-use this value.
Address refernece = to + offset;
interior_it->second = refernece;
Address reference = to + offset;
interior_it->second = reference;
// If the |slot|'s content is pointing into the region [from, from +
// size) we are dealing with an interior pointer that does not point to
// a valid HeapObjectHeader. Such references need to be fixed up
// immediately.
Address& reference_contents = *reinterpret_cast<Address*>(refernece);
Address& reference_contents = *reinterpret_cast<Address*>(reference);
if (reference_contents > from && reference_contents < (from + size)) {
reference_contents = reference_contents - from + to;
}
......
......@@ -169,7 +169,7 @@ void ObjectAllocator::RefillLinearAllocationBuffer(NormalPageSpace& space,
// allocation or we finish sweeping all pages of this heap.
Sweeper& sweeper = raw_heap_.heap()->sweeper();
// TODO(chromium:1056170): Investigate whether this should be a loop which
// would result in more agressive re-use of memory at the expense of
// would result in more aggressive re-use of memory at the expense of
// potentially larger allocation time.
if (sweeper.SweepForAllocationIfRunning(&space, size)) {
// Sweeper found a block of at least `size` bytes. Allocation from the
......
......@@ -88,7 +88,7 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
inline void ObjectStartIndexAndBit(ConstAddress, size_t*, size_t*) const;
const Address offset_;
// `fully_populated_` is used to denote that the bitmap is popluated with all
// `fully_populated_` is used to denote that the bitmap is populated with all
// currently allocated objects on the page and is in a consistent state. It is
// used to guard against using the bitmap for finding headers during
// concurrent sweeping.
......
......@@ -194,7 +194,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryPool final {
// A backend that is used for allocating and freeing normal and large pages.
//
// Internally maintaints a set of PageMemoryRegions. The backend keeps its used
// Internally maintains a set of PageMemoryRegions. The backend keeps its used
// regions alive.
class V8_EXPORT_PRIVATE PageBackend final {
public:
......
......@@ -807,7 +807,7 @@ class Sweeper::SweeperImpl final {
StatsCollector::kIncrementalSweep);
StatsCollector::EnabledScope inner_scope(
stats_collector_, StatsCollector::kSweepOnAllocation);
MutatorThreadSweepingScope sweeping_in_progresss(*this);
MutatorThreadSweepingScope sweeping_in_progress(*this);
{
// First, process unfinalized pages as finalizing a page is faster than
......@@ -924,7 +924,7 @@ class Sweeper::SweeperImpl final {
StatsCollector::ScopeId internal_scope_id) {
if (!is_in_progress_) return true;
MutatorThreadSweepingScope sweeping_in_progresss(*this);
MutatorThreadSweepingScope sweeping_in_progress(*this);
bool sweep_complete;
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment