Commit 010667b4 authored by Jakob Gruber's avatar Jakob Gruber Committed by Commit Bot

[nci] Cache NCI code in the compilation cache

This adds a new Code section to the compilation cache (aka isolate
cache), and inserts generated native context independent code into it.

Cache consumption will be implemented in a following CL.

Bug: v8:8888
Change-Id: I997c13da0fe547f395627a48f1cb7e5f19dfc3ba
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2288851Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68824}
parent 7286f388
......@@ -28,16 +28,17 @@ CompilationCache::CompilationCache(Isolate* isolate)
eval_global_(isolate),
eval_contextual_(isolate),
reg_exp_(isolate, kRegExpGenerations),
code_(isolate),
enabled_script_and_eval_(true) {
CompilationSubCache* subcaches[kSubCacheCount] = {
&script_, &eval_global_, &eval_contextual_, &reg_exp_};
&script_, &eval_global_, &eval_contextual_, &reg_exp_, &code_};
for (int i = 0; i < kSubCacheCount; ++i) {
subcaches_[i] = subcaches[i];
}
}
Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
DCHECK(generation < generations_);
DCHECK_LT(generation, generations());
Handle<CompilationCacheTable> result;
if (tables_[generation].IsUndefined(isolate())) {
result = CompilationCacheTable::New(isolate(), kInitialCacheSize);
......@@ -50,33 +51,41 @@ Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
return result;
}
void CompilationSubCache::Age() {
// Don't directly age single-generation caches.
if (generations_ == 1) {
if (!tables_[0].IsUndefined(isolate())) {
CompilationCacheTable::cast(tables_[0]).Age();
}
return;
}
// static
void CompilationSubCache::AgeByGeneration(CompilationSubCache* c) {
DCHECK_GT(c->generations(), 1);
// Age the generations implicitly killing off the oldest.
for (int i = generations_ - 1; i > 0; i--) {
tables_[i] = tables_[i - 1];
for (int i = c->generations() - 1; i > 0; i--) {
c->tables_[i] = c->tables_[i - 1];
}
// Set the first generation as unborn.
tables_[0] = ReadOnlyRoots(isolate()).undefined_value();
c->tables_[0] = ReadOnlyRoots(c->isolate()).undefined_value();
}
// static
void CompilationSubCache::AgeCustom(CompilationSubCache* c) {
DCHECK_EQ(c->generations(), 1);
if (c->tables_[0].IsUndefined(c->isolate())) return;
CompilationCacheTable::cast(c->tables_[0]).Age();
}
void CompilationCacheScript::Age() { AgeCustom(this); }
void CompilationCacheEval::Age() { AgeCustom(this); }
void CompilationCacheRegExp::Age() { AgeByGeneration(this); }
void CompilationCacheCode::Age() { AgeByGeneration(this); }
void CompilationSubCache::Iterate(RootVisitor* v) {
v->VisitRootPointers(Root::kCompilationCache, nullptr,
FullObjectSlot(&tables_[0]),
FullObjectSlot(&tables_[generations_]));
FullObjectSlot(&tables_[generations()]));
}
void CompilationSubCache::Clear() {
MemsetPointer(reinterpret_cast<Address*>(tables_),
ReadOnlyRoots(isolate()).undefined_value().ptr(), generations_);
ReadOnlyRoots(isolate()).undefined_value().ptr(),
generations());
}
void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
......@@ -253,6 +262,37 @@ void CompilationCacheRegExp::Put(Handle<String> source, JSRegExp::Flags flags,
CompilationCacheTable::PutRegExp(isolate(), table, source, flags, data));
}
MaybeHandle<Code> CompilationCacheCode::Lookup(Handle<SharedFunctionInfo> key) {
// Make sure not to leak the table into the surrounding handle
// scope. Otherwise, we risk keeping old tables around even after
// having cleared the cache.
HandleScope scope(isolate());
MaybeHandle<Code> maybe_value;
int generation = 0;
for (; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
maybe_value = table->LookupCode(key);
if (!maybe_value.is_null()) break;
}
if (maybe_value.is_null()) {
isolate()->counters()->compilation_cache_misses()->Increment();
return MaybeHandle<Code>();
}
Handle<Code> value = maybe_value.ToHandleChecked();
if (generation != 0) Put(key, value); // Add to the first generation.
isolate()->counters()->compilation_cache_hits()->Increment();
return scope.CloseAndEscape(value);
}
void CompilationCacheCode::Put(Handle<SharedFunctionInfo> key,
Handle<Code> value) {
HandleScope scope(isolate());
Handle<CompilationCacheTable> table = GetFirstTable();
SetFirstTable(CompilationCacheTable::PutCode(isolate(), table, key, value));
}
void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
if (!IsEnabledScriptAndEval()) return;
......@@ -306,6 +346,10 @@ MaybeHandle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
return reg_exp_.Lookup(source, flags);
}
MaybeHandle<Code> CompilationCache::LookupCode(Handle<SharedFunctionInfo> sfi) {
return code_.Lookup(sfi);
}
void CompilationCache::PutScript(Handle<String> source,
Handle<Context> native_context,
LanguageMode language_mode,
......@@ -345,6 +389,11 @@ void CompilationCache::PutRegExp(Handle<String> source, JSRegExp::Flags flags,
reg_exp_.Put(source, flags, data);
}
void CompilationCache::PutCode(Handle<SharedFunctionInfo> shared,
Handle<Code> code) {
code_.Put(shared, code);
}
void CompilationCache::Clear() {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches_[i]->Clear();
......
......@@ -26,13 +26,11 @@ class CompilationSubCache {
public:
CompilationSubCache(Isolate* isolate, int generations)
: isolate_(isolate), generations_(generations) {
tables_ = NewArray<Object>(generations);
DCHECK_LE(generations, kMaxGenerations);
}
~CompilationSubCache() { DeleteArray(tables_); }
// Index for the first generation in the cache.
static const int kFirstGeneration = 0;
static constexpr int kFirstGeneration = 0;
static constexpr int kMaxGenerations = 2;
// Get the compilation cache tables for a specific generation.
Handle<CompilationCacheTable> GetTable(int generation);
......@@ -48,7 +46,7 @@ class CompilationSubCache {
// Age the sub-cache by evicting the oldest generation and creating a new
// young generation.
void Age();
virtual void Age() = 0;
// GC support.
void Iterate(RootVisitor* v);
......@@ -60,15 +58,20 @@ class CompilationSubCache {
void Remove(Handle<SharedFunctionInfo> function_info);
// Number of generations in this sub-cache.
inline int generations() { return generations_; }
int generations() const { return generations_; }
protected:
Isolate* isolate() { return isolate_; }
Isolate* isolate() const { return isolate_; }
// Ageing occurs either by removing the oldest generation, or with
// custom logic implemented in CompilationCacheTable::Age.
static void AgeByGeneration(CompilationSubCache* c);
static void AgeCustom(CompilationSubCache* c);
private:
Isolate* isolate_;
int generations_; // Number of generations.
Object* tables_; // Compilation cache tables - one for each generation.
Isolate* const isolate_;
const int generations_;
Object tables_[kMaxGenerations]; // One for each generation.
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache);
};
......@@ -89,6 +92,8 @@ class CompilationCacheScript : public CompilationSubCache {
LanguageMode language_mode,
Handle<SharedFunctionInfo> function_info);
void Age() override;
private:
bool HasOrigin(Handle<SharedFunctionInfo> function_info,
MaybeHandle<Object> name, int line_offset, int column_offset,
......@@ -124,6 +129,8 @@ class CompilationCacheEval : public CompilationSubCache {
Handle<Context> native_context, Handle<FeedbackCell> feedback_cell,
int position);
void Age() override;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
};
......@@ -139,10 +146,33 @@ class CompilationCacheRegExp : public CompilationSubCache {
void Put(Handle<String> source, JSRegExp::Flags flags,
Handle<FixedArray> data);
void Age() override;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
};
// Sub-cache for Code objects. All code inserted into this cache must
// be usable across different native contexts.
class CompilationCacheCode : public CompilationSubCache {
public:
explicit CompilationCacheCode(Isolate* isolate)
: CompilationSubCache(isolate, kGenerations) {}
MaybeHandle<Code> Lookup(Handle<SharedFunctionInfo> key);
void Put(Handle<SharedFunctionInfo> key, Handle<Code> value);
void Age() override;
// TODO(jgruber,v8:8888): For simplicity we use the generational
// approach here, but could consider something else (or more
// generations) in the future.
static constexpr int kGenerations = 2;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheCode);
};
// The compilation cache keeps shared function infos for compiled
// scripts and evals. The shared function infos are looked up using
// the source string as the key. For regular expressions the
......@@ -170,6 +200,8 @@ class V8_EXPORT_PRIVATE CompilationCache {
MaybeHandle<FixedArray> LookupRegExp(Handle<String> source,
JSRegExp::Flags flags);
MaybeHandle<Code> LookupCode(Handle<SharedFunctionInfo> sfi);
// Associate the (source, kind) pair to the shared function
// info. This may overwrite an existing mapping.
void PutScript(Handle<String> source, Handle<Context> native_context,
......@@ -188,6 +220,8 @@ class V8_EXPORT_PRIVATE CompilationCache {
void PutRegExp(Handle<String> source, JSRegExp::Flags flags,
Handle<FixedArray> data);
void PutCode(Handle<SharedFunctionInfo> shared, Handle<Code> code);
// Clear the cache - also used to initialize the cache at startup.
void Clear();
......@@ -218,9 +252,6 @@ class V8_EXPORT_PRIVATE CompilationCache {
base::HashMap* EagerOptimizingSet();
// The number of sub caches covering the different types to cache.
static const int kSubCacheCount = 4;
bool IsEnabledScriptAndEval() const {
return FLAG_compilation_cache && enabled_script_and_eval_;
}
......@@ -233,6 +264,9 @@ class V8_EXPORT_PRIVATE CompilationCache {
CompilationCacheEval eval_global_;
CompilationCacheEval eval_contextual_;
CompilationCacheRegExp reg_exp_;
CompilationCacheCode code_;
static constexpr int kSubCacheCount = 5;
CompilationSubCache* subcaches_[kSubCacheCount];
// Current enable state of the compilation cache for scripts and eval.
......
......@@ -773,6 +773,33 @@ void InsertCodeIntoOptimizedCodeCache(
}
}
void InsertCodeIntoCompilationCache(Isolate* isolate,
OptimizedCompilationInfo* info) {
if (!info->native_context_independent()) return;
// TODO(jgruber,v8:8888): This should turn into a DCHECK once we
// spawn dedicated NCI compile tasks.
if (!info->osr_offset().IsNone()) return;
Handle<Code> code = info->code();
DCHECK(!info->function_context_specializing());
DCHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
Handle<SharedFunctionInfo> sfi = info->shared_info();
CompilationCache* cache = isolate->compilation_cache();
cache->PutCode(sfi, code);
DCHECK(!cache->LookupCode(sfi).is_null());
sfi->set_maybe_has_cached_code(true);
if (FLAG_trace_turbo_nci) {
StdoutStream os;
os << "NCI cache insertion: " << Brief(*sfi) << ", " << Brief(*code)
<< std::endl
<< std::flush;
}
}
bool GetOptimizedCodeNow(OptimizedCompilationJob* job, Isolate* isolate) {
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
RuntimeCallTimerScope runtimeTimer(
......@@ -944,8 +971,11 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
}
} else {
if (GetOptimizedCodeNow(job.get(), isolate))
DCHECK_EQ(mode, ConcurrencyMode::kNotConcurrent);
if (GetOptimizedCodeNow(job.get(), isolate)) {
InsertCodeIntoCompilationCache(isolate, compilation_info);
return compilation_info->code();
}
}
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
......@@ -2732,6 +2762,7 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
job->RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
isolate);
InsertCodeIntoOptimizedCodeCache(compilation_info);
InsertCodeIntoCompilationCache(isolate, compilation_info);
if (FLAG_trace_opt) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[completed optimizing ");
......
......@@ -20,13 +20,14 @@ namespace internal {
OptimizedCompilationInfo::OptimizedCompilationInfo(
Zone* zone, Isolate* isolate, Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure, bool native_context_independent)
: OptimizedCompilationInfo(Code::OPTIMIZED_FUNCTION, zone) {
: code_kind_(Code::OPTIMIZED_FUNCTION),
zone_(zone),
optimization_id_(isolate->NextOptimizationId()) {
DCHECK_EQ(*shared, closure->shared());
DCHECK(shared->is_compiled());
bytecode_array_ = handle(shared->GetBytecodeArray(), isolate);
shared_info_ = shared;
closure_ = closure;
optimization_id_ = isolate->NextOptimizationId();
// Collect source positions for optimized code when profiling or if debugger
// is active, to be able to get more precise source positions at the price of
......@@ -37,20 +38,17 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
if (native_context_independent) set_native_context_independent();
SetTracingFlags(shared->PassesFilter(FLAG_trace_turbo_filter));
ConfigureFlags();
}
OptimizedCompilationInfo::OptimizedCompilationInfo(
Vector<const char> debug_name, Zone* zone, Code::Kind code_kind)
: OptimizedCompilationInfo(code_kind, zone) {
debug_name_ = debug_name;
: code_kind_(code_kind),
zone_(zone),
optimization_id_(kNoOptimizationId),
debug_name_(debug_name) {
SetTracingFlags(
PassesFilter(debug_name, CStrVector(FLAG_trace_turbo_filter)));
}
OptimizedCompilationInfo::OptimizedCompilationInfo(Code::Kind code_kind,
Zone* zone)
: code_kind_(code_kind), zone_(zone) {
ConfigureFlags();
}
......@@ -59,6 +57,8 @@ bool OptimizedCompilationInfo::FlagSetIsValid(Flag flag) const {
switch (flag) {
case kPoisonRegisterArguments:
return untrusted_code_mitigations();
case kFunctionContextSpecializing:
return !native_context_independent();
default:
return true;
}
......@@ -84,7 +84,8 @@ void OptimizedCompilationInfo::ConfigureFlags() {
case Code::OPTIMIZED_FUNCTION:
set_called_with_code_start_register();
set_switch_jump_table();
if (FLAG_function_context_specialization) {
if (FLAG_function_context_specialization &&
!native_context_independent()) {
set_function_context_specializing();
}
if (FLAG_turbo_splitting) {
......
......@@ -221,7 +221,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
}
private:
OptimizedCompilationInfo(Code::Kind code_kind, Zone* zone);
void ConfigureFlags();
void SetFlag(Flag flag) { flags_ |= flag; }
......@@ -234,15 +233,13 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
PoisoningMitigationLevel poisoning_level_ =
PoisoningMitigationLevel::kDontPoison;
Code::Kind code_kind_;
const Code::Kind code_kind_;
int32_t builtin_index_ = -1;
// We retain a reference the bytecode array specifically to ensure it doesn't
// get flushed while we are optimizing the code.
Handle<BytecodeArray> bytecode_array_;
Handle<SharedFunctionInfo> shared_info_;
Handle<JSFunction> closure_;
// The compiled code.
......@@ -259,7 +256,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// The zone from which the compilation pipeline working on this
// OptimizedCompilationInfo allocates.
Zone* zone_;
Zone* const zone_;
std::unique_ptr<DeferredHandles> deferred_handles_;
......@@ -267,7 +264,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
InlinedFunctionList inlined_functions_;
int optimization_id_ = -1;
static constexpr int kNoOptimizationId = -1;
const int optimization_id_;
unsigned inlined_bytecode_size_ = 0;
// The current OSR frame for specialization or {nullptr}.
......
......@@ -1082,11 +1082,13 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
// Determine whether to specialize the code for the function's context.
// We can't do this in the case of OSR, because we want to cache the
// generated code on the native context keyed on SharedFunctionInfo.
// We also can't do this for native context independent code (yet).
// TODO(mythria): Check if it is better to key the OSR cache on JSFunction and
// allow context specialization for OSR code.
if (compilation_info()->closure()->raw_feedback_cell().map() ==
ReadOnlyRoots(isolate).one_closure_cell_map() &&
!compilation_info()->is_osr()) {
!compilation_info()->is_osr() &&
!compilation_info()->native_context_independent()) {
compilation_info()->set_function_context_specializing();
data_.ChooseSpecializationContext();
}
......
......@@ -53,8 +53,15 @@ uint32_t CompilationCacheShape::StringSharedHash(String source,
uint32_t CompilationCacheShape::HashForObject(ReadOnlyRoots roots,
Object object) {
// Eval: The key field contains the hash as a Number.
if (object.IsNumber()) return static_cast<uint32_t>(object.Number());
// Code: The key field contains the SFI key.
if (object.IsSharedFunctionInfo()) {
return SharedFunctionInfo::cast(object).Hash();
}
// Script: See StringSharedKey::ToHandle for the encoding.
FixedArray val = FixedArray::cast(object);
if (val.map() == roots.fixed_cow_array_map()) {
DCHECK_EQ(4, val.length());
......@@ -66,7 +73,10 @@ uint32_t CompilationCacheShape::HashForObject(ReadOnlyRoots roots,
int position = Smi::ToInt(val.get(3));
return StringSharedHash(source, shared, language_mode, position);
}
DCHECK_LT(2, val.length());
// RegExp: The key field (and the value field) contains the
// JSRegExp::data fixed array.
DCHECK_GE(val.length(), JSRegExp::kMinDataArrayLength);
return RegExpHash(String::cast(val.get(JSRegExp::kSourceIndex)),
Smi::cast(val.get(JSRegExp::kFlagsIndex)));
}
......
......@@ -74,17 +74,22 @@ class InfoCellPair {
EXTERN_DECLARE_HASH_TABLE(CompilationCacheTable, CompilationCacheShape)
// This cache is used in two different variants. For regexp caching, it simply
// maps identifying info of the regexp to the cached regexp object. Scripts and
// eval code only gets cached after a second probe for the code object. To do
// so, on first "put" only a hash identifying the source is entered into the
// cache, mapping it to a lifetime count of the hash. On each call to Age all
// such lifetimes get reduced, and removed once they reach zero. If a second put
// is called while such a hash is live in the cache, the hash gets replaced by
// an actual cache entry. Age also removes stale live entries from the cache.
// Such entries are identified by SharedFunctionInfos pointing to either the
// recompilation stub, or to "old" code. This avoids memory leaks due to
// premature caching of scripts and eval strings that are never needed later.
// This cache is used in multiple different variants.
//
// For regexp caching, it simply maps identifying info of the regexp
// to the cached regexp object.
//
// Scripts and eval code only gets cached after a second probe for the
// code object. To do so, on first "put" only a hash identifying the
// source is entered into the cache, mapping it to a lifetime count of
// the hash. On each call to Age all such lifetimes get reduced, and
// removed once they reach zero. If a second put is called while such
// a hash is live in the cache, the hash gets replaced by an actual
// cache entry. Age also removes stale live entries from the cache.
// Such entries are identified by SharedFunctionInfos pointing to
// either the recompilation stub, or to "old" code. This avoids memory
// leaks due to premature caching of scripts and eval strings that are
// never needed later.
class CompilationCacheTable
: public HashTable<CompilationCacheTable, CompilationCacheShape> {
public:
......@@ -98,6 +103,8 @@ class CompilationCacheTable
Handle<Context> native_context,
LanguageMode language_mode, int position);
Handle<Object> LookupRegExp(Handle<String> source, JSRegExp::Flags flags);
MaybeHandle<Code> LookupCode(Handle<SharedFunctionInfo> key);
static Handle<CompilationCacheTable> PutScript(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<Context> native_context, LanguageMode language_mode,
......@@ -110,6 +117,9 @@ class CompilationCacheTable
static Handle<CompilationCacheTable> PutRegExp(
Isolate* isolate, Handle<CompilationCacheTable> cache, Handle<String> src,
JSRegExp::Flags flags, Handle<FixedArray> value);
static Handle<CompilationCacheTable> PutCode(
Isolate* isolate, Handle<CompilationCacheTable> cache,
Handle<SharedFunctionInfo> key, Handle<Code> value);
void Remove(Object value);
void Age();
static const int kHashGenerations = 10;
......
......@@ -137,6 +137,12 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
static const int kSourceIndex = kTagIndex + 1;
static const int kFlagsIndex = kSourceIndex + 1;
static const int kDataIndex = kFlagsIndex + 1;
// TODO(jgruber): Rename kDataIndex to something more appropriate.
// There is no 'data' field, kDataIndex is just a marker for the
// first non-generic index.
static constexpr int kMinDataArrayLength = kDataIndex;
// The data fields are used in different ways depending on the
// value of the tag.
// Atom regexps (literal strings).
......
......@@ -6561,6 +6561,18 @@ class RegExpKey : public HashTableKey {
Smi flags_;
};
// CodeKey carries the SharedFunctionInfo key associated with a Code
// object value.
class CodeKey : public HashTableKey {
public:
explicit CodeKey(Handle<SharedFunctionInfo> key)
: HashTableKey(key->Hash()), key_(key) {}
bool IsMatch(Object string) override { return *key_ == string; }
Handle<SharedFunctionInfo> key_;
};
// InternalizedStringKey carries a string/internalized-string object as key.
class InternalizedStringKey final : public StringTableKey {
public:
......@@ -7251,6 +7263,16 @@ Handle<Object> CompilationCacheTable::LookupRegExp(Handle<String> src,
return Handle<Object>(get(EntryToIndex(entry) + 1), isolate);
}
MaybeHandle<Code> CompilationCacheTable::LookupCode(
Handle<SharedFunctionInfo> key) {
Isolate* isolate = GetIsolate();
DisallowHeapAllocation no_allocation;
CodeKey k(key);
InternalIndex entry = FindEntry(isolate, &k);
if (entry.is_not_found()) return {};
return Handle<Code>(Code::cast(get(EntryToIndex(entry) + 1)), isolate);
}
Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<Context> native_context, LanguageMode language_mode,
......@@ -7314,13 +7336,25 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutRegExp(
cache = EnsureCapacity(isolate, cache);
InternalIndex entry = cache->FindInsertionEntry(isolate, key.Hash());
// We store the value in the key slot, and compare the search key
// to the stored value with a custon IsMatch function during lookups.
// to the stored value with a custom IsMatch function during lookups.
cache->set(EntryToIndex(entry), *value);
cache->set(EntryToIndex(entry) + 1, *value);
cache->ElementAdded();
return cache;
}
Handle<CompilationCacheTable> CompilationCacheTable::PutCode(
Isolate* isolate, Handle<CompilationCacheTable> cache,
Handle<SharedFunctionInfo> key, Handle<Code> value) {
CodeKey k(key);
cache = EnsureCapacity(isolate, cache);
InternalIndex entry = cache->FindInsertionEntry(isolate, k.Hash());
cache->set(EntryToIndex(entry), *key);
cache->set(EntryToIndex(entry) + 1, *value);
cache->ElementAdded();
return cache;
}
void CompilationCacheTable::Age() {
DisallowHeapAllocation no_allocation;
Object the_hole_value = GetReadOnlyRoots().the_hole_value();
......
......@@ -186,6 +186,9 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2,
has_static_private_methods_or_accessors,
SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2, maybe_has_cached_code,
SharedFunctionInfo::MaybeHasCachedCodeBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, syntax_kind,
SharedFunctionInfo::FunctionSyntaxKindBits)
......
......@@ -410,6 +410,11 @@ class SharedFunctionInfo : public HeapObject {
DECL_BOOLEAN_ACCESSORS(class_scope_has_private_brand)
DECL_BOOLEAN_ACCESSORS(has_static_private_methods_or_accessors)
// True if a Code object associated with this SFI has been inserted into the
// compilation cache. Note that the cache entry may be removed by aging,
// hence the 'maybe'.
DECL_BOOLEAN_ACCESSORS(maybe_has_cached_code)
// Is this function a top-level function (scripts, evals).
DECL_BOOLEAN_ACCESSORS(is_toplevel)
......
......@@ -44,6 +44,7 @@ bitfield struct SharedFunctionInfoFlags extends uint32 {
bitfield struct SharedFunctionInfoFlags2 extends uint8 {
class_scope_has_private_brand: bool: 1 bit;
has_static_private_methods_or_accessors: bool: 1 bit;
maybe_has_cached_code: bool: 1 bit;
}
extern class SharedFunctionInfo extends HeapObject {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment