Commit 7a82be3d authored by adamk's avatar adamk Committed by Commit bot

Remove unnecessary duplication of FunctionKind enums in CompilerHints

The duplicated enum values are only used by the FastNewClosureStub,
so inline them there, with the help of one new constant (kFunctionKindShift)
in SharedFunctionInfo.

Review-Url: https://codereview.chromium.org/2390043003
Cr-Commit-Position: refs/heads/master@{#40005}
parent 138127a6
...@@ -2458,11 +2458,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, ...@@ -2458,11 +2458,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor; Label class_constructor;
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset)); __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
__ TestAndBranchIfAnySet( __ TestAndBranchIfAnySet(w3, FunctionKind::kClassConstructor
w3, (1 << SharedFunctionInfo::kIsDefaultConstructor) | << SharedFunctionInfo::kFunctionKindShift,
(1 << SharedFunctionInfo::kIsSubclassConstructor) | &class_constructor);
(1 << SharedFunctionInfo::kIsBaseConstructor),
&class_constructor);
// Enter the context of the function; ToObject has to run in the function // Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function // context, and we also need to take the global proxy from the function
......
...@@ -5011,33 +5011,38 @@ compiler::Node* FastNewClosureStub::Generate(CodeStubAssembler* assembler, ...@@ -5011,33 +5011,38 @@ compiler::Node* FastNewClosureStub::Generate(CodeStubAssembler* assembler,
load_map(assembler); load_map(assembler);
Variable map_index(assembler, MachineType::PointerRepresentation()); Variable map_index(assembler, MachineType::PointerRepresentation());
STATIC_ASSERT(FunctionKind::kNormalFunction == 0);
Node* is_not_normal = assembler->Word32And( Node* is_not_normal = assembler->Word32And(
compiler_hints, compiler_hints,
assembler->Int32Constant(SharedFunctionInfo::kFunctionKindMaskBits)); assembler->Int32Constant(SharedFunctionInfo::kAllFunctionKindBitsMask));
assembler->GotoUnless(is_not_normal, &if_normal); assembler->GotoUnless(is_not_normal, &if_normal);
Node* is_generator = assembler->Word32And( Node* is_generator = assembler->Word32And(
compiler_hints, compiler_hints,
assembler->Int32Constant(1 << SharedFunctionInfo::kIsGeneratorBit)); assembler->Int32Constant(FunctionKind::kGeneratorFunction
<< SharedFunctionInfo::kFunctionKindShift));
assembler->GotoIf(is_generator, &if_generator); assembler->GotoIf(is_generator, &if_generator);
Node* is_async = assembler->Word32And( Node* is_async = assembler->Word32And(
compiler_hints, compiler_hints,
assembler->Int32Constant(1 << SharedFunctionInfo::kIsAsyncFunctionBit)); assembler->Int32Constant(FunctionKind::kAsyncFunction
<< SharedFunctionInfo::kFunctionKindShift));
assembler->GotoIf(is_async, &if_async); assembler->GotoIf(is_async, &if_async);
Node* is_class_constructor = assembler->Word32And( Node* is_class_constructor = assembler->Word32And(
compiler_hints, compiler_hints,
assembler->Int32Constant(SharedFunctionInfo::kClassConstructorBits)); assembler->Int32Constant(FunctionKind::kClassConstructor
<< SharedFunctionInfo::kFunctionKindShift));
assembler->GotoIf(is_class_constructor, &if_class_constructor); assembler->GotoIf(is_class_constructor, &if_class_constructor);
if (FLAG_debug_code) { if (FLAG_debug_code) {
// Function must be a function without a prototype. // Function must be a function without a prototype.
assembler->Assert(assembler->Word32And( assembler->Assert(assembler->Word32And(
compiler_hints, assembler->Int32Constant( compiler_hints,
SharedFunctionInfo::kAccessorFunctionBits | assembler->Int32Constant((FunctionKind::kAccessorFunction |
(1 << SharedFunctionInfo::kIsArrowBit) | FunctionKind::kArrowFunction |
(1 << SharedFunctionInfo::kIsConciseMethodBit)))); FunctionKind::kConciseMethod)
<< SharedFunctionInfo::kFunctionKindShift)));
} }
assembler->Goto(&if_function_without_prototype); assembler->Goto(&if_function_without_prototype);
......
...@@ -7707,18 +7707,9 @@ class SharedFunctionInfo: public HeapObject { ...@@ -7707,18 +7707,9 @@ class SharedFunctionInfo: public HeapObject {
kDontFlush, kDontFlush,
// byte 2 // byte 2
kFunctionKind, kFunctionKind,
kIsArrow = kFunctionKind, // rest of byte 2 and first two bits of byte 3 are used by FunctionKind
kIsGenerator,
kIsConciseMethod,
kIsDefaultConstructor,
kIsSubclassConstructor,
kIsBaseConstructor,
kIsGetterFunction,
kIsSetterFunction,
// byte 3 // byte 3
kIsAsyncFunction, kDeserialized = kFunctionKind + 10,
kIsModule,
kDeserialized,
kIsDeclaration, kIsDeclaration,
kIsAsmWasmBroken, kIsAsmWasmBroken,
kRequiresClassFieldInit, kRequiresClassFieldInit,
...@@ -7727,23 +7718,8 @@ class SharedFunctionInfo: public HeapObject { ...@@ -7727,23 +7718,8 @@ class SharedFunctionInfo: public HeapObject {
}; };
// kFunctionKind has to be byte-aligned // kFunctionKind has to be byte-aligned
STATIC_ASSERT((kFunctionKind % kBitsPerByte) == 0); STATIC_ASSERT((kFunctionKind % kBitsPerByte) == 0);
// Make sure that FunctionKind and byte 2 are in sync:
#define ASSERT_FUNCTION_KIND_ORDER(functionKind, compilerFunctionKind) \ class FunctionKindBits : public BitField<FunctionKind, kFunctionKind, 10> {};
STATIC_ASSERT(FunctionKind::functionKind == \
1 << (compilerFunctionKind - kFunctionKind))
ASSERT_FUNCTION_KIND_ORDER(kArrowFunction, kIsArrow);
ASSERT_FUNCTION_KIND_ORDER(kGeneratorFunction, kIsGenerator);
ASSERT_FUNCTION_KIND_ORDER(kConciseMethod, kIsConciseMethod);
ASSERT_FUNCTION_KIND_ORDER(kDefaultConstructor, kIsDefaultConstructor);
ASSERT_FUNCTION_KIND_ORDER(kSubclassConstructor, kIsSubclassConstructor);
ASSERT_FUNCTION_KIND_ORDER(kBaseConstructor, kIsBaseConstructor);
ASSERT_FUNCTION_KIND_ORDER(kGetterFunction, kIsGetterFunction);
ASSERT_FUNCTION_KIND_ORDER(kSetterFunction, kIsSetterFunction);
ASSERT_FUNCTION_KIND_ORDER(kAsyncFunction, kIsAsyncFunction);
ASSERT_FUNCTION_KIND_ORDER(kModule, kIsModule);
#undef ASSERT_FUNCTION_KIND_ORDER
class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 10> {};
class DeoptCountBits : public BitField<int, 0, 4> {}; class DeoptCountBits : public BitField<int, 0, 4> {};
class OptReenableTriesBits : public BitField<int, 4, 18> {}; class OptReenableTriesBits : public BitField<int, 4, 18> {};
...@@ -7775,21 +7751,10 @@ class SharedFunctionInfo: public HeapObject { ...@@ -7775,21 +7751,10 @@ class SharedFunctionInfo: public HeapObject {
static const int kHasDuplicateParametersBit = static const int kHasDuplicateParametersBit =
kHasDuplicateParameters + kCompilerHintsSmiTagSize; kHasDuplicateParameters + kCompilerHintsSmiTagSize;
static const int kIsArrowBit = kIsArrow + kCompilerHintsSmiTagSize; static const int kFunctionKindShift =
static const int kIsGeneratorBit = kIsGenerator + kCompilerHintsSmiTagSize; kFunctionKind + kCompilerHintsSmiTagSize;
static const int kIsConciseMethodBit = static const int kAllFunctionKindBitsMask = FunctionKindBits::kMask
kIsConciseMethod + kCompilerHintsSmiTagSize; << kCompilerHintsSmiTagSize;
static const int kIsAsyncFunctionBit =
kIsAsyncFunction + kCompilerHintsSmiTagSize;
static const int kAccessorFunctionBits =
FunctionKind::kAccessorFunction
<< (kFunctionKind + kCompilerHintsSmiTagSize);
static const int kClassConstructorBits =
FunctionKind::kClassConstructor
<< (kFunctionKind + kCompilerHintsSmiTagSize);
static const int kFunctionKindMaskBits = FunctionKindBits::kMask
<< kCompilerHintsSmiTagSize;
// Constants for optimizing codegen for strict mode function and // Constants for optimizing codegen for strict mode function and
// native tests. // native tests.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment