Commit 1f4958de authored by danno's avatar danno Committed by Commit bot

Complete separation of CodeAssembler and CodeStubAssembler

Move allocation-related and smi un/tagging methods into CodeStubAssembler.

Review URL: https://codereview.chromium.org/1893383002

Cr-Commit-Position: refs/heads/master@{#35684}
parent b4697727
......@@ -22,6 +22,30 @@ CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
const char* name)
: compiler::CodeAssembler(isolate, zone, parameter_count, flags, name) {}
Node* CodeStubAssembler::BooleanMapConstant() {
return HeapConstant(isolate()->factory()->boolean_map());
}
Node* CodeStubAssembler::EmptyStringConstant() {
return LoadRoot(Heap::kempty_stringRootIndex);
}
Node* CodeStubAssembler::HeapNumberMapConstant() {
return HeapConstant(isolate()->factory()->heap_number_map());
}
Node* CodeStubAssembler::NoContextConstant() {
return SmiConstant(Smi::FromInt(0));
}
Node* CodeStubAssembler::NullConstant() {
return LoadRoot(Heap::kNullValueRootIndex);
}
Node* CodeStubAssembler::UndefinedConstant() {
return LoadRoot(Heap::kUndefinedValueRootIndex);
}
Node* CodeStubAssembler::Float64Round(Node* x) {
Node* one = Float64Constant(1.0);
Node* one_half = Float64Constant(0.5);
......@@ -216,6 +240,14 @@ Node* CodeStubAssembler::SmiFromWord32(Node* value) {
return WordShl(value, SmiShiftBitsConstant());
}
Node* CodeStubAssembler::SmiTag(Node* value) {
return WordShl(value, SmiShiftBitsConstant());
}
Node* CodeStubAssembler::SmiUntag(Node* value) {
return WordSar(value, SmiShiftBitsConstant());
}
Node* CodeStubAssembler::SmiToWord32(Node* value) {
Node* result = WordSar(value, SmiShiftBitsConstant());
if (Is64()) {
......@@ -278,6 +310,127 @@ Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
IntPtrConstant(0));
}
Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
Node* top = Load(MachineType::Pointer(), top_address);
Node* limit = Load(MachineType::Pointer(), limit_address);
// If there's not enough space, call the runtime.
Variable result(this, MachineRepresentation::kTagged);
Label runtime_call(this, Label::kDeferred), no_runtime_call(this);
Label merge_runtime(this, &result);
Branch(IntPtrLessThan(IntPtrSub(limit, top), size_in_bytes), &runtime_call,
&no_runtime_call);
Bind(&runtime_call);
// AllocateInTargetSpace does not use the context.
Node* context = IntPtrConstant(0);
Node* runtime_flags = SmiTag(Int32Constant(
AllocateDoubleAlignFlag::encode(false) |
AllocateTargetSpace::encode(flags & kPretenured
? AllocationSpace::OLD_SPACE
: AllocationSpace::NEW_SPACE)));
Node* runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, context,
SmiTag(size_in_bytes), runtime_flags);
result.Bind(runtime_result);
Goto(&merge_runtime);
// When there is enough space, return `top' and bump it up.
Bind(&no_runtime_call);
Node* no_runtime_result = top;
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
IntPtrAdd(top, size_in_bytes));
no_runtime_result =
IntPtrAdd(no_runtime_result, IntPtrConstant(kHeapObjectTag));
result.Bind(no_runtime_result);
Goto(&merge_runtime);
Bind(&merge_runtime);
return result.value();
}
Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
Node* top = Load(MachineType::Pointer(), top_address);
Node* limit = Load(MachineType::Pointer(), limit_address);
Variable adjusted_size(this, MachineType::PointerRepresentation());
adjusted_size.Bind(size_in_bytes);
if (flags & kDoubleAlignment) {
// TODO(epertoso): Simd128 alignment.
Label aligned(this), not_aligned(this), merge(this, &adjusted_size);
Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), &not_aligned,
&aligned);
Bind(&not_aligned);
Node* not_aligned_size =
IntPtrAdd(size_in_bytes, IntPtrConstant(kPointerSize));
adjusted_size.Bind(not_aligned_size);
Goto(&merge);
Bind(&aligned);
Goto(&merge);
Bind(&merge);
}
Variable address(this, MachineRepresentation::kTagged);
address.Bind(AllocateRawUnaligned(adjusted_size.value(), kNone, top, limit));
Label needs_filler(this), doesnt_need_filler(this),
merge_address(this, &address);
Branch(IntPtrEqual(adjusted_size.value(), size_in_bytes), &doesnt_need_filler,
&needs_filler);
Bind(&needs_filler);
// Store a filler and increase the address by kPointerSize.
// TODO(epertoso): this code assumes that we only align to kDoubleSize. Change
// it when Simd128 alignment is supported.
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top,
LoadRoot(Heap::kOnePointerFillerMapRootIndex));
address.Bind(IntPtrAdd(address.value(), IntPtrConstant(kPointerSize)));
Goto(&merge_address);
Bind(&doesnt_need_filler);
Goto(&merge_address);
Bind(&merge_address);
// Update the top.
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
IntPtrAdd(top, adjusted_size.value()));
return address.value();
}
Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
bool const new_space = !(flags & kPretenured);
Node* top_address = ExternalConstant(
new_space
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
Node* limit_address = ExternalConstant(
new_space
? ExternalReference::new_space_allocation_limit_address(isolate())
: ExternalReference::old_space_allocation_limit_address(isolate()));
#ifdef V8_HOST_ARCH_32_BIT
if (flags & kDoubleAlignment) {
return AllocateRawAligned(IntPtrConstant(size_in_bytes), flags, top_address,
limit_address);
}
#endif
return AllocateRawUnaligned(IntPtrConstant(size_in_bytes), flags, top_address,
limit_address);
}
Node* CodeStubAssembler::InnerAllocate(Node* previous, int offset) {
return IntPtrAdd(previous, IntPtrConstant(offset));
}
Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
MachineType rep) {
return Load(rep, buffer, IntPtrConstant(offset));
......
......@@ -32,12 +32,24 @@ class CodeStubAssembler : public compiler::CodeAssembler {
CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count,
Code::Flags flags, const char* name);
compiler::Node* BooleanMapConstant();
compiler::Node* EmptyStringConstant();
compiler::Node* HeapNumberMapConstant();
compiler::Node* NoContextConstant();
compiler::Node* NullConstant();
compiler::Node* UndefinedConstant();
// Float64 operations.
compiler::Node* Float64Ceil(compiler::Node* x);
compiler::Node* Float64Floor(compiler::Node* x);
compiler::Node* Float64Round(compiler::Node* x);
compiler::Node* Float64Trunc(compiler::Node* x);
// Tag a Word as a Smi value.
compiler::Node* SmiTag(compiler::Node* value);
// Untag a Smi value as a Word.
compiler::Node* SmiUntag(compiler::Node* value);
// Smi conversions.
compiler::Node* SmiToFloat64(compiler::Node* value);
compiler::Node* SmiFromWord32(compiler::Node* value);
......@@ -55,6 +67,10 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* SmiLessThanOrEqual(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiMin(compiler::Node* a, compiler::Node* b);
// Allocate an object of the given size.
compiler::Node* Allocate(int size, AllocationFlags flags = kNone);
compiler::Node* InnerAllocate(compiler::Node* previous, int offset);
// Check a value for smi-ness
compiler::Node* WordIsSmi(compiler::Node* a);
// Check that the value is a positive smi.
......@@ -179,6 +195,16 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* BitFieldDecode(compiler::Node* word32, uint32_t shift,
uint32_t mask);
private:
compiler::Node* AllocateRawAligned(compiler::Node* size_in_bytes,
AllocationFlags flags,
compiler::Node* top_address,
compiler::Node* limit_address);
compiler::Node* AllocateRawUnaligned(compiler::Node* size_in_bytes,
AllocationFlags flags,
compiler::Node* top_adddress,
compiler::Node* limit_address);
};
} // namespace internal
......
......@@ -119,34 +119,10 @@ Node* CodeAssembler::Float64Constant(double value) {
return raw_assembler_->Float64Constant(value);
}
Node* CodeAssembler::BooleanMapConstant() {
return HeapConstant(isolate()->factory()->boolean_map());
}
Node* CodeAssembler::EmptyStringConstant() {
return LoadRoot(Heap::kempty_stringRootIndex);
}
Node* CodeAssembler::HeapNumberMapConstant() {
return HeapConstant(isolate()->factory()->heap_number_map());
}
Node* CodeAssembler::NaNConstant() {
return LoadRoot(Heap::kNanValueRootIndex);
}
Node* CodeAssembler::NoContextConstant() {
return SmiConstant(Smi::FromInt(0));
}
Node* CodeAssembler::NullConstant() {
return LoadRoot(Heap::kNullValueRootIndex);
}
Node* CodeAssembler::UndefinedConstant() {
return LoadRoot(Heap::kUndefinedValueRootIndex);
}
Node* CodeAssembler::Parameter(int value) {
return raw_assembler_->Parameter(value);
}
......@@ -173,14 +149,6 @@ Node* CodeAssembler::SmiShiftBitsConstant() {
return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
Node* CodeAssembler::SmiTag(Node* value) {
return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
}
Node* CodeAssembler::SmiUntag(Node* value) {
return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
}
#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name) \
Node* CodeAssembler::name(Node* a, Node* b) { \
return raw_assembler_->name(a, b); \
......@@ -213,6 +181,18 @@ Node* CodeAssembler::ChangeUint32ToWord(Node* value) {
CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
#undef DEFINE_CODE_ASSEMBLER_UNARY_OP
Node* CodeAssembler::Load(MachineType rep, Node* base) {
return raw_assembler_->Load(rep, base);
}
Node* CodeAssembler::Load(MachineType rep, Node* base, Node* index) {
return raw_assembler_->Load(rep, base, index);
}
Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* index) {
return raw_assembler_->AtomicLoad(rep, base, index);
}
Node* CodeAssembler::LoadRoot(Heap::RootListIndex root_index) {
if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
Handle<Object> root = isolate()->heap()->root_handle(root_index);
......@@ -227,145 +207,12 @@ Node* CodeAssembler::LoadRoot(Heap::RootListIndex root_index) {
ExternalConstant(ExternalReference::roots_array_start(isolate()));
USE(roots_array_start);
// TODO(danno): Implement thee root-access case where the root is not constant
// TODO(danno): Implement the root-access case where the root is not constant
// and must be loaded from the root array.
UNIMPLEMENTED();
return nullptr;
}
Node* CodeAssembler::AllocateRawUnaligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
Node* top = Load(MachineType::Pointer(), top_address);
Node* limit = Load(MachineType::Pointer(), limit_address);
// If there's not enough space, call the runtime.
RawMachineLabel runtime_call(RawMachineLabel::kDeferred), no_runtime_call,
merge_runtime;
raw_assembler_->Branch(
raw_assembler_->IntPtrLessThan(IntPtrSub(limit, top), size_in_bytes),
&runtime_call, &no_runtime_call);
raw_assembler_->Bind(&runtime_call);
// AllocateInTargetSpace does not use the context.
Node* context = IntPtrConstant(0);
Node* runtime_flags = SmiTag(Int32Constant(
AllocateDoubleAlignFlag::encode(false) |
AllocateTargetSpace::encode(flags & kPretenured
? AllocationSpace::OLD_SPACE
: AllocationSpace::NEW_SPACE)));
Node* runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, context,
SmiTag(size_in_bytes), runtime_flags);
raw_assembler_->Goto(&merge_runtime);
// When there is enough space, return `top' and bump it up.
raw_assembler_->Bind(&no_runtime_call);
Node* no_runtime_result = top;
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
IntPtrAdd(top, size_in_bytes));
no_runtime_result =
IntPtrAdd(no_runtime_result, IntPtrConstant(kHeapObjectTag));
raw_assembler_->Goto(&merge_runtime);
raw_assembler_->Bind(&merge_runtime);
return raw_assembler_->Phi(MachineType::PointerRepresentation(),
runtime_result, no_runtime_result);
}
Node* CodeAssembler::AllocateRawAligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
Node* top = Load(MachineType::Pointer(), top_address);
Node* limit = Load(MachineType::Pointer(), limit_address);
Node* adjusted_size = size_in_bytes;
if (flags & kDoubleAlignment) {
// TODO(epertoso): Simd128 alignment.
RawMachineLabel aligned, not_aligned, merge;
raw_assembler_->Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)),
&not_aligned, &aligned);
raw_assembler_->Bind(&not_aligned);
Node* not_aligned_size =
IntPtrAdd(size_in_bytes, IntPtrConstant(kPointerSize));
raw_assembler_->Goto(&merge);
raw_assembler_->Bind(&aligned);
raw_assembler_->Goto(&merge);
raw_assembler_->Bind(&merge);
adjusted_size = raw_assembler_->Phi(MachineType::PointerRepresentation(),
not_aligned_size, adjusted_size);
}
Node* address = AllocateRawUnaligned(adjusted_size, kNone, top, limit);
RawMachineLabel needs_filler, doesnt_need_filler, merge_address;
raw_assembler_->Branch(
raw_assembler_->IntPtrEqual(adjusted_size, size_in_bytes),
&doesnt_need_filler, &needs_filler);
raw_assembler_->Bind(&needs_filler);
// Store a filler and increase the address by kPointerSize.
// TODO(epertoso): this code assumes that we only align to kDoubleSize. Change
// it when Simd128 alignment is supported.
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top,
LoadRoot(Heap::kOnePointerFillerMapRootIndex));
Node* address_with_filler = IntPtrAdd(address, IntPtrConstant(kPointerSize));
raw_assembler_->Goto(&merge_address);
raw_assembler_->Bind(&doesnt_need_filler);
Node* address_without_filler = address;
raw_assembler_->Goto(&merge_address);
raw_assembler_->Bind(&merge_address);
address = raw_assembler_->Phi(MachineType::PointerRepresentation(),
address_with_filler, address_without_filler);
// Update the top.
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
IntPtrAdd(top, adjusted_size));
return address;
}
Node* CodeAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
bool const new_space = !(flags & kPretenured);
Node* top_address = ExternalConstant(
new_space
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
Node* limit_address = ExternalConstant(
new_space
? ExternalReference::new_space_allocation_limit_address(isolate())
: ExternalReference::old_space_allocation_limit_address(isolate()));
#ifdef V8_HOST_ARCH_32_BIT
if (flags & kDoubleAlignment) {
return AllocateRawAligned(IntPtrConstant(size_in_bytes), flags, top_address,
limit_address);
}
#endif
return AllocateRawUnaligned(IntPtrConstant(size_in_bytes), flags, top_address,
limit_address);
}
Node* CodeAssembler::InnerAllocate(Node* previous, int offset) {
return IntPtrAdd(previous, IntPtrConstant(offset));
}
Node* CodeAssembler::Load(MachineType rep, Node* base) {
return raw_assembler_->Load(rep, base);
}
Node* CodeAssembler::Load(MachineType rep, Node* base, Node* index) {
return raw_assembler_->Load(rep, base, index);
}
Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* index) {
return raw_assembler_->AtomicLoad(rep, base, index);
}
Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* value) {
return raw_assembler_->Store(rep, base, value, kFullWriteBarrier);
}
......
......@@ -52,6 +52,7 @@ class Schedule;
V(Int32LessThanOrEqual) \
V(IntPtrLessThan) \
V(IntPtrLessThanOrEqual) \
V(IntPtrEqual) \
V(Uint32LessThan) \
V(UintPtrGreaterThanOrEqual) \
V(WordEqual) \
......@@ -191,13 +192,7 @@ class CodeAssembler {
Node* BooleanConstant(bool value);
Node* ExternalConstant(ExternalReference address);
Node* Float64Constant(double value);
Node* BooleanMapConstant();
Node* EmptyStringConstant();
Node* HeapNumberMapConstant();
Node* NaNConstant();
Node* NoContextConstant();
Node* NullConstant();
Node* UndefinedConstant();
Node* Parameter(int value);
void Return(Node* value);
......@@ -223,6 +218,9 @@ class CodeAssembler {
Node* Load(MachineType rep, Node* base, Node* index);
Node* AtomicLoad(MachineType rep, Node* base, Node* index);
// Load a value from the root array.
Node* LoadRoot(Heap::RootListIndex root_index);
// Store value to raw memory location.
Node* Store(MachineRepresentation rep, Node* base, Node* value);
Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value);
......@@ -307,22 +305,6 @@ class CodeAssembler {
Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
Node* code_target_address, Node** args);
// ===========================================================================
// Macros
// ===========================================================================
// Tag a Word as a Smi value.
Node* SmiTag(Node* value);
// Untag a Smi value as a Word.
Node* SmiUntag(Node* value);
// Load a value from the root array.
Node* LoadRoot(Heap::RootListIndex root_index);
// Allocate an object of the given size.
Node* Allocate(int size, AllocationFlags flags = kNone);
Node* InnerAllocate(Node* previous, int offset);
// Branching helpers.
void BranchIf(Node* condition, Label* if_true, Label* if_false);
......@@ -357,11 +339,6 @@ class CodeAssembler {
Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
Node* AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags,
Node* top_address, Node* limit_address);
Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags,
Node* top_adddress, Node* limit_address);
base::SmartPointer<RawMachineAssembler> raw_assembler_;
Code::Flags flags_;
const char* name_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment