Commit 7dcb6ad3 authored by bmeurer's avatar bmeurer Committed by Commit bot

[turbofan] Restore basic write barrier elimination.

Restore the basic write barrier elimination that we used to run as part
of the simplified lowering phase (in ChangeLowering actually) before, by
moving the write barrier computation to SimplifiedLowering where we can
still look at types and consider the heap/isolate, and just update the
WriteBarrierKind in the FieldAccess/ElementAccess that we later use when
lowering to a machine Load/Store.

CQ_INCLUDE_TRYBOTS=tryserver.v8:v8_linux64_tsan_rel
R=mstarzinger@chromium.org
BUG=v8:4969,chromium:608636
LOG=n

Review-Url: https://codereview.chromium.org/1938993002
Cr-Commit-Position: refs/heads/master@{#35969}
parent 57f0a521
This diff is collapsed.
......@@ -142,12 +142,6 @@ class AccessBuilder final : public AllStatic {
static ElementAccess ForTypedArrayElement(ExternalArrayType type,
bool is_external);
// ===========================================================================
// Access to global per-isolate variables (based on external reference).
// Provides access to the backing store of a StatsCounter.
static FieldAccess ForStatsCounter();
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
};
......
......@@ -9,7 +9,6 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/conversions-inl.h"
namespace v8 {
namespace internal {
......@@ -37,38 +36,6 @@ Reduction ChangeLowering::Reduce(Node* node) {
return NoChange();
}
namespace {
WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
MachineRepresentation representation,
Node* value) {
// TODO(bmeurer): Optimize write barriers based on input.
if (base_is_tagged == kTaggedBase &&
representation == MachineRepresentation::kTagged) {
if (value->opcode() == IrOpcode::kHeapConstant) {
return kPointerWriteBarrier;
} else if (value->opcode() == IrOpcode::kNumberConstant) {
double const number_value = OpParameter<double>(value);
if (IsSmiDouble(number_value)) return kNoWriteBarrier;
return kPointerWriteBarrier;
}
return kFullWriteBarrier;
}
return kNoWriteBarrier;
}
WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
MachineRepresentation representation,
int field_offset, Node* value) {
if (base_is_tagged == kTaggedBase && field_offset == HeapObject::kMapOffset) {
// Write barriers for storing maps are cheaper.
return kMapWriteBarrier;
}
return ComputeWriteBarrierKind(base_is_tagged, representation, value);
}
} // namespace
Reduction ChangeLowering::ReduceLoadField(Node* node) {
const FieldAccess& access = FieldAccessOf(node->op());
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
......@@ -79,14 +46,11 @@ Reduction ChangeLowering::ReduceLoadField(Node* node) {
Reduction ChangeLowering::ReduceStoreField(Node* node) {
const FieldAccess& access = FieldAccessOf(node->op());
WriteBarrierKind kind = ComputeWriteBarrierKind(
access.base_is_tagged, access.machine_type.representation(),
access.offset, node->InputAt(1));
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
NodeProperties::ChangeOp(node,
machine()->Store(StoreRepresentation(
access.machine_type.representation(), kind)));
NodeProperties::ChangeOp(node, machine()->Store(StoreRepresentation(
access.machine_type.representation(),
access.write_barrier_kind)));
return Changed(node);
}
......@@ -124,12 +88,9 @@ Reduction ChangeLowering::ReduceLoadElement(Node* node) {
Reduction ChangeLowering::ReduceStoreElement(Node* node) {
const ElementAccess& access = ElementAccessOf(node->op());
node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(),
ComputeWriteBarrierKind(access.base_is_tagged,
NodeProperties::ChangeOp(node, machine()->Store(StoreRepresentation(
access.machine_type.representation(),
node->InputAt(2)))));
access.write_barrier_kind)));
return Changed(node);
}
......
......@@ -279,8 +279,9 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
simplified()->LoadField(AccessBuilder::ForJSObjectProperties()),
this_storage, this_effect, this_control);
}
FieldAccess field_access = {kTaggedBase, field_index.offset(), name,
field_type, MachineType::AnyTagged()};
FieldAccess field_access = {
kTaggedBase, field_index.offset(), name,
field_type, MachineType::AnyTagged(), kFullWriteBarrier};
if (access_mode == AccessMode::kLoad) {
if (field_type->Is(Type::UntaggedFloat64())) {
if (!field_index.is_inobject() || field_index.is_hidden_field() ||
......@@ -723,7 +724,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
element_type = type_cache_.kSmi;
}
ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
element_type, element_machine_type};
element_type, element_machine_type,
kFullWriteBarrier};
// Access the actual element.
// TODO(bmeurer): Refactor this into separate methods or even a separate
......
......@@ -12,22 +12,6 @@ namespace v8 {
namespace internal {
namespace compiler {
std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
switch (kind) {
case kNoWriteBarrier:
return os << "NoWriteBarrier";
case kMapWriteBarrier:
return os << "MapWriteBarrier";
case kPointerWriteBarrier:
return os << "PointerWriteBarrier";
case kFullWriteBarrier:
return os << "FullWriteBarrier";
}
UNREACHABLE();
return os;
}
bool operator==(StoreRepresentation lhs, StoreRepresentation rhs) {
return lhs.representation() == rhs.representation() &&
lhs.write_barrier_kind() == rhs.write_barrier_kind();
......
......@@ -32,16 +32,6 @@ class OptionalOperator final {
const Operator* const op_;
};
// Supported write barrier modes.
enum WriteBarrierKind {
kNoWriteBarrier,
kMapWriteBarrier,
kPointerWriteBarrier,
kFullWriteBarrier
};
std::ostream& operator<<(std::ostream& os, WriteBarrierKind);
// A Load needs a MachineType.
typedef MachineType LoadRepresentation;
......
......@@ -6,6 +6,7 @@
#include <limits>
#include "src/address-map.h"
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/compiler/access-builder.h"
......@@ -18,6 +19,7 @@
#include "src/compiler/representation-change.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/source-position.h"
#include "src/conversions-inl.h"
#include "src/objects.h"
#include "src/type-cache.h"
......@@ -709,6 +711,71 @@ class RepresentationSelector {
return changer_->Float64OperatorFor(node->opcode());
}
WriteBarrierKind WriteBarrierKindFor(
BaseTaggedness base_taggedness,
MachineRepresentation field_representation, Type* field_type,
Node* value) {
if (base_taggedness == kTaggedBase &&
field_representation == MachineRepresentation::kTagged) {
Type* value_type = NodeProperties::GetType(value);
if (field_type->Is(Type::TaggedSigned()) ||
value_type->Is(Type::TaggedSigned())) {
// Write barriers are only for stores of heap objects.
return kNoWriteBarrier;
}
if (field_type->Is(Type::BooleanOrNullOrUndefined()) ||
value_type->Is(Type::BooleanOrNullOrUndefined())) {
// Write barriers are not necessary when storing true, false, null or
// undefined, because these special oddballs are always in the root set.
return kNoWriteBarrier;
}
if (value_type->IsConstant() &&
value_type->AsConstant()->Value()->IsHeapObject()) {
Handle<HeapObject> value_object =
Handle<HeapObject>::cast(value_type->AsConstant()->Value());
if (value_object->IsMap()) {
// Write barriers for storing maps are cheaper.
return kMapWriteBarrier;
}
RootIndexMap root_index_map(jsgraph_->isolate());
int root_index = root_index_map.Lookup(*value_object);
if (root_index != RootIndexMap::kInvalidRootIndex &&
jsgraph_->isolate()->heap()->RootIsImmortalImmovable(root_index)) {
// Write barriers are unnecessary for immortal immovable roots.
return kNoWriteBarrier;
}
}
if (field_type->Is(Type::TaggedPointer()) ||
value_type->Is(Type::TaggedPointer())) {
// Write barriers for heap objects are cheaper.
return kPointerWriteBarrier;
}
NumberMatcher m(value);
if (m.HasValue()) {
if (IsSmiDouble(m.Value())) {
// Storing a smi doesn't need a write barrier.
return kNoWriteBarrier;
}
// The NumberConstant will be represented as HeapNumber.
return kPointerWriteBarrier;
}
return kFullWriteBarrier;
}
return kNoWriteBarrier;
}
WriteBarrierKind WriteBarrierKindFor(
BaseTaggedness base_taggedness,
MachineRepresentation field_representation, int field_offset,
Type* field_type, Node* value) {
if (base_taggedness == kTaggedBase &&
field_offset == HeapObject::kMapOffset) {
return kMapWriteBarrier;
}
return WriteBarrierKindFor(base_taggedness, field_representation,
field_type, value);
}
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
void VisitNode(Node* node, Truncation truncation,
......@@ -1136,6 +1203,16 @@ class RepresentationSelector {
access.machine_type.representation()));
ProcessRemainingInputs(node, 2);
SetOutput(node, MachineRepresentation::kNone);
if (lower()) {
WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
access.base_is_tagged, access.machine_type.representation(),
access.offset, access.type, node->InputAt(1));
if (write_barrier_kind < access.write_barrier_kind) {
access.write_barrier_kind = write_barrier_kind;
NodeProperties::ChangeOp(
node, jsgraph_->simplified()->StoreField(access));
}
}
break;
}
case IrOpcode::kLoadBuffer: {
......@@ -1201,6 +1278,16 @@ class RepresentationSelector {
access.machine_type.representation())); // value
ProcessRemainingInputs(node, 3);
SetOutput(node, MachineRepresentation::kNone);
if (lower()) {
WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
access.base_is_tagged, access.machine_type.representation(),
access.type, node->InputAt(2));
if (write_barrier_kind < access.write_barrier_kind) {
access.write_barrier_kind = write_barrier_kind;
NodeProperties::ChangeOp(
node, jsgraph_->simplified()->StoreElement(access));
}
}
break;
}
case IrOpcode::kObjectIsCallable:
......
......@@ -13,6 +13,10 @@ namespace v8 {
namespace internal {
namespace compiler {
size_t hash_value(BaseTaggedness base_taggedness) {
return static_cast<uint8_t>(base_taggedness);
}
std::ostream& operator<<(std::ostream& os, BaseTaggedness base_taggedness) {
switch (base_taggedness) {
case kUntaggedBase:
......@@ -84,6 +88,9 @@ BufferAccess const BufferAccessOf(const Operator* op) {
bool operator==(FieldAccess const& lhs, FieldAccess const& rhs) {
// On purpose we don't include the write barrier kind here, as this method is
// really only relevant for eliminating loads and they don't care about the
// write barrier mode.
return lhs.base_is_tagged == rhs.base_is_tagged && lhs.offset == rhs.offset &&
lhs.machine_type == rhs.machine_type;
}
......@@ -95,6 +102,9 @@ bool operator!=(FieldAccess const& lhs, FieldAccess const& rhs) {
size_t hash_value(FieldAccess const& access) {
// On purpose we don't include the write barrier kind here, as this method is
// really only relevant for eliminating loads and they don't care about the
// write barrier mode.
return base::hash_combine(access.base_is_tagged, access.offset,
access.machine_type);
}
......@@ -110,12 +120,15 @@ std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
}
#endif
access.type->PrintTo(os);
os << ", " << access.machine_type << "]";
os << ", " << access.machine_type << ", " << access.write_barrier_kind << "]";
return os;
}
bool operator==(ElementAccess const& lhs, ElementAccess const& rhs) {
// On purpose we don't include the write barrier kind here, as this method is
// really only relevant for eliminating loads and they don't care about the
// write barrier mode.
return lhs.base_is_tagged == rhs.base_is_tagged &&
lhs.header_size == rhs.header_size &&
lhs.machine_type == rhs.machine_type;
......@@ -128,6 +141,9 @@ bool operator!=(ElementAccess const& lhs, ElementAccess const& rhs) {
size_t hash_value(ElementAccess const& access) {
// On purpose we don't include the write barrier kind here, as this method is
// really only relevant for eliminating loads and they don't care about the
// write barrier mode.
return base::hash_combine(access.base_is_tagged, access.header_size,
access.machine_type);
}
......@@ -136,7 +152,7 @@ size_t hash_value(ElementAccess const& access) {
std::ostream& operator<<(std::ostream& os, ElementAccess const& access) {
os << access.base_is_tagged << ", " << access.header_size << ", ";
access.type->PrintTo(os);
os << ", " << access.machine_type;
os << ", " << access.machine_type << ", " << access.write_barrier_kind;
return os;
}
......
......@@ -25,8 +25,9 @@ namespace compiler {
class Operator;
struct SimplifiedOperatorGlobalCache;
enum BaseTaggedness : uint8_t { kUntaggedBase, kTaggedBase };
enum BaseTaggedness { kUntaggedBase, kTaggedBase };
size_t hash_value(BaseTaggedness);
std::ostream& operator<<(std::ostream&, BaseTaggedness);
......@@ -63,6 +64,7 @@ struct FieldAccess {
MaybeHandle<Name> name; // debugging only.
Type* type; // type of the field.
MachineType machine_type; // machine type of the field.
WriteBarrierKind write_barrier_kind; // write barrier hint.
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
......@@ -86,6 +88,7 @@ struct ElementAccess {
int header_size; // size of the header, without tag.
Type* type; // type of the element.
MachineType machine_type; // machine type of the element.
WriteBarrierKind write_barrier_kind; // write barrier hint.
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
......
......@@ -452,6 +452,33 @@ enum AllocationAlignment {
kSimd128Unaligned
};
// Supported write barrier modes.
enum WriteBarrierKind : uint8_t {
kNoWriteBarrier,
kMapWriteBarrier,
kPointerWriteBarrier,
kFullWriteBarrier
};
inline size_t hash_value(WriteBarrierKind kind) {
return static_cast<uint8_t>(kind);
}
inline std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
switch (kind) {
case kNoWriteBarrier:
return os << "NoWriteBarrier";
case kMapWriteBarrier:
return os << "MapWriteBarrier";
case kPointerWriteBarrier:
return os << "PointerWriteBarrier";
case kFullWriteBarrier:
return os << "FullWriteBarrier";
}
UNREACHABLE();
return os;
}
// A flag that indicates whether objects should be pretenured when
// allocated (allocated directly into the old generation) or not
// (allocated in the young generation if the object size and type
......
......@@ -1350,7 +1350,7 @@ TEST(LowerStoreField_to_store) {
StoreRepresentation rep = StoreRepresentationOf(store->op());
if (kMachineReps[i].representation() == MachineRepresentation::kTagged) {
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
}
CHECK_EQ(kMachineReps[i].representation(), rep.representation());
}
......@@ -1370,7 +1370,7 @@ TEST(LowerStoreField_to_store) {
CHECK_EQ(IrOpcode::kStore, store->opcode());
CHECK_EQ(t.p1, store->InputAt(2));
StoreRepresentation rep = StoreRepresentationOf(store->op());
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
}
}
......@@ -1415,7 +1415,7 @@ TEST(LowerStoreElement_to_store) {
StoreRepresentation rep = StoreRepresentationOf(store->op());
if (kMachineReps[i].representation() == MachineRepresentation::kTagged) {
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
}
CHECK_EQ(kMachineReps[i].representation(), rep.representation());
}
......@@ -1435,7 +1435,7 @@ TEST(LowerStoreElement_to_store) {
CHECK_EQ(IrOpcode::kStore, store->opcode());
CHECK_EQ(t.p2, store->InputAt(2));
StoreRepresentation rep = StoreRepresentationOf(store->op());
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
}
}
......
......@@ -1504,10 +1504,12 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
CHECK_EQ(boom_value, obj->RawFastDoublePropertyAt(double_field_index));
}
enum WriteBarrierKind { OLD_TO_OLD_WRITE_BARRIER, OLD_TO_NEW_WRITE_BARRIER };
enum OldToWriteBarrierKind {
OLD_TO_OLD_WRITE_BARRIER,
OLD_TO_NEW_WRITE_BARRIER
};
static void TestWriteBarrierObjectShiftFieldsRight(
WriteBarrierKind write_barrier_kind) {
OldToWriteBarrierKind write_barrier_kind) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
v8::HandleScope scope(CcTest::isolate());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment