Commit a782e93c authored by mlippautz's avatar mlippautz Committed by Commit bot

Revert of [turbofan] Restore basic write barrier elimination. (patchset #2...

Revert of [turbofan] Restore basic write barrier elimination. (patchset #2 id:20001 of https://codereview.chromium.org/1938993002/ )

Reason for revert:
Breaks WBs that should be there ;)

https://uberchromegw.corp.google.com/i/client.v8/builders/V8%20Linux%20-%20gc%20stress/builds/3305

Will open repro bug asap.

Original issue's description:
> [turbofan] Restore basic write barrier elimination.
>
> Restore the basic write barrier elimination that we used to run as part
> of the simplified lowering phase (in ChangeLowering actually) before, by
> moving the write barrier computation to SimplifiedLowering where we can
> still look at types and consider the heap/isolate, and just update the
> WriteBarrierKind in the FieldAccess/ElementAccess that we later use when
> lowering to a machine Load/Store.
>
> CQ_INCLUDE_TRYBOTS=tryserver.v8:v8_linux64_tsan_rel
> R=mstarzinger@chromium.org
> BUG=v8:4969,chromium:608636
> LOG=n
>
> Committed: https://crrev.com/7dcb6ad379fbacbc8bdc8e11a6e50d680ffa3f62
> Cr-Commit-Position: refs/heads/master@{#35969}

TBR=mstarzinger@chromium.org,bmeurer@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=v8:4969,chromium:608636

Review-Url: https://codereview.chromium.org/1943743003
Cr-Commit-Position: refs/heads/master@{#35983}
parent 2da181b0
This diff is collapsed.
......@@ -142,6 +142,12 @@ class AccessBuilder final : public AllStatic {
static ElementAccess ForTypedArrayElement(ExternalArrayType type,
bool is_external);
// ===========================================================================
// Access to global per-isolate variables (based on external reference).
// Provides access to the backing store of a StatsCounter.
static FieldAccess ForStatsCounter();
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
};
......
......@@ -9,6 +9,7 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/conversions-inl.h"
namespace v8 {
namespace internal {
......@@ -36,6 +37,38 @@ Reduction ChangeLowering::Reduce(Node* node) {
return NoChange();
}
namespace {
WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
MachineRepresentation representation,
Node* value) {
// TODO(bmeurer): Optimize write barriers based on input.
if (base_is_tagged == kTaggedBase &&
representation == MachineRepresentation::kTagged) {
if (value->opcode() == IrOpcode::kHeapConstant) {
return kPointerWriteBarrier;
} else if (value->opcode() == IrOpcode::kNumberConstant) {
double const number_value = OpParameter<double>(value);
if (IsSmiDouble(number_value)) return kNoWriteBarrier;
return kPointerWriteBarrier;
}
return kFullWriteBarrier;
}
return kNoWriteBarrier;
}
WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
MachineRepresentation representation,
int field_offset, Node* value) {
if (base_is_tagged == kTaggedBase && field_offset == HeapObject::kMapOffset) {
// Write barriers for storing maps are cheaper.
return kMapWriteBarrier;
}
return ComputeWriteBarrierKind(base_is_tagged, representation, value);
}
} // namespace
Reduction ChangeLowering::ReduceLoadField(Node* node) {
const FieldAccess& access = FieldAccessOf(node->op());
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
......@@ -46,11 +79,14 @@ Reduction ChangeLowering::ReduceLoadField(Node* node) {
Reduction ChangeLowering::ReduceStoreField(Node* node) {
const FieldAccess& access = FieldAccessOf(node->op());
WriteBarrierKind kind = ComputeWriteBarrierKind(
access.base_is_tagged, access.machine_type.representation(),
access.offset, node->InputAt(1));
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
NodeProperties::ChangeOp(node, machine()->Store(StoreRepresentation(
access.machine_type.representation(),
access.write_barrier_kind)));
NodeProperties::ChangeOp(node,
machine()->Store(StoreRepresentation(
access.machine_type.representation(), kind)));
return Changed(node);
}
......@@ -88,9 +124,12 @@ Reduction ChangeLowering::ReduceLoadElement(Node* node) {
Reduction ChangeLowering::ReduceStoreElement(Node* node) {
const ElementAccess& access = ElementAccessOf(node->op());
node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
NodeProperties::ChangeOp(node, machine()->Store(StoreRepresentation(
access.machine_type.representation(),
access.write_barrier_kind)));
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(),
ComputeWriteBarrierKind(access.base_is_tagged,
access.machine_type.representation(),
node->InputAt(2)))));
return Changed(node);
}
......
......@@ -279,9 +279,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
simplified()->LoadField(AccessBuilder::ForJSObjectProperties()),
this_storage, this_effect, this_control);
}
FieldAccess field_access = {
kTaggedBase, field_index.offset(), name,
field_type, MachineType::AnyTagged(), kFullWriteBarrier};
FieldAccess field_access = {kTaggedBase, field_index.offset(), name,
field_type, MachineType::AnyTagged()};
if (access_mode == AccessMode::kLoad) {
if (field_type->Is(Type::UntaggedFloat64())) {
if (!field_index.is_inobject() || field_index.is_hidden_field() ||
......@@ -724,8 +723,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
element_type = type_cache_.kSmi;
}
ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
element_type, element_machine_type,
kFullWriteBarrier};
element_type, element_machine_type};
// Access the actual element.
// TODO(bmeurer): Refactor this into separate methods or even a separate
......
......@@ -12,6 +12,22 @@ namespace v8 {
namespace internal {
namespace compiler {
std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
switch (kind) {
case kNoWriteBarrier:
return os << "NoWriteBarrier";
case kMapWriteBarrier:
return os << "MapWriteBarrier";
case kPointerWriteBarrier:
return os << "PointerWriteBarrier";
case kFullWriteBarrier:
return os << "FullWriteBarrier";
}
UNREACHABLE();
return os;
}
bool operator==(StoreRepresentation lhs, StoreRepresentation rhs) {
return lhs.representation() == rhs.representation() &&
lhs.write_barrier_kind() == rhs.write_barrier_kind();
......
......@@ -32,6 +32,16 @@ class OptionalOperator final {
const Operator* const op_;
};
// Supported write barrier modes.
enum WriteBarrierKind {
kNoWriteBarrier,
kMapWriteBarrier,
kPointerWriteBarrier,
kFullWriteBarrier
};
std::ostream& operator<<(std::ostream& os, WriteBarrierKind);
// A Load needs a MachineType.
typedef MachineType LoadRepresentation;
......
......@@ -6,7 +6,6 @@
#include <limits>
#include "src/address-map.h"
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/compiler/access-builder.h"
......@@ -19,7 +18,6 @@
#include "src/compiler/representation-change.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/source-position.h"
#include "src/conversions-inl.h"
#include "src/objects.h"
#include "src/type-cache.h"
......@@ -711,71 +709,6 @@ class RepresentationSelector {
return changer_->Float64OperatorFor(node->opcode());
}
WriteBarrierKind WriteBarrierKindFor(
BaseTaggedness base_taggedness,
MachineRepresentation field_representation, Type* field_type,
Node* value) {
if (base_taggedness == kTaggedBase &&
field_representation == MachineRepresentation::kTagged) {
Type* value_type = NodeProperties::GetType(value);
if (field_type->Is(Type::TaggedSigned()) ||
value_type->Is(Type::TaggedSigned())) {
// Write barriers are only for stores of heap objects.
return kNoWriteBarrier;
}
if (field_type->Is(Type::BooleanOrNullOrUndefined()) ||
value_type->Is(Type::BooleanOrNullOrUndefined())) {
// Write barriers are not necessary when storing true, false, null or
// undefined, because these special oddballs are always in the root set.
return kNoWriteBarrier;
}
if (value_type->IsConstant() &&
value_type->AsConstant()->Value()->IsHeapObject()) {
Handle<HeapObject> value_object =
Handle<HeapObject>::cast(value_type->AsConstant()->Value());
if (value_object->IsMap()) {
// Write barriers for storing maps are cheaper.
return kMapWriteBarrier;
}
RootIndexMap root_index_map(jsgraph_->isolate());
int root_index = root_index_map.Lookup(*value_object);
if (root_index != RootIndexMap::kInvalidRootIndex &&
jsgraph_->isolate()->heap()->RootIsImmortalImmovable(root_index)) {
// Write barriers are unnecessary for immortal immovable roots.
return kNoWriteBarrier;
}
}
if (field_type->Is(Type::TaggedPointer()) ||
value_type->Is(Type::TaggedPointer())) {
// Write barriers for heap objects are cheaper.
return kPointerWriteBarrier;
}
NumberMatcher m(value);
if (m.HasValue()) {
if (IsSmiDouble(m.Value())) {
// Storing a smi doesn't need a write barrier.
return kNoWriteBarrier;
}
// The NumberConstant will be represented as HeapNumber.
return kPointerWriteBarrier;
}
return kFullWriteBarrier;
}
return kNoWriteBarrier;
}
WriteBarrierKind WriteBarrierKindFor(
BaseTaggedness base_taggedness,
MachineRepresentation field_representation, int field_offset,
Type* field_type, Node* value) {
if (base_taggedness == kTaggedBase &&
field_offset == HeapObject::kMapOffset) {
return kMapWriteBarrier;
}
return WriteBarrierKindFor(base_taggedness, field_representation,
field_type, value);
}
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
void VisitNode(Node* node, Truncation truncation,
......@@ -1203,16 +1136,6 @@ class RepresentationSelector {
access.machine_type.representation()));
ProcessRemainingInputs(node, 2);
SetOutput(node, MachineRepresentation::kNone);
if (lower()) {
WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
access.base_is_tagged, access.machine_type.representation(),
access.offset, access.type, node->InputAt(1));
if (write_barrier_kind < access.write_barrier_kind) {
access.write_barrier_kind = write_barrier_kind;
NodeProperties::ChangeOp(
node, jsgraph_->simplified()->StoreField(access));
}
}
break;
}
case IrOpcode::kLoadBuffer: {
......@@ -1278,16 +1201,6 @@ class RepresentationSelector {
access.machine_type.representation())); // value
ProcessRemainingInputs(node, 3);
SetOutput(node, MachineRepresentation::kNone);
if (lower()) {
WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
access.base_is_tagged, access.machine_type.representation(),
access.type, node->InputAt(2));
if (write_barrier_kind < access.write_barrier_kind) {
access.write_barrier_kind = write_barrier_kind;
NodeProperties::ChangeOp(
node, jsgraph_->simplified()->StoreElement(access));
}
}
break;
}
case IrOpcode::kObjectIsCallable:
......
......@@ -13,10 +13,6 @@ namespace v8 {
namespace internal {
namespace compiler {
size_t hash_value(BaseTaggedness base_taggedness) {
return static_cast<uint8_t>(base_taggedness);
}
std::ostream& operator<<(std::ostream& os, BaseTaggedness base_taggedness) {
switch (base_taggedness) {
case kUntaggedBase:
......@@ -88,9 +84,6 @@ BufferAccess const BufferAccessOf(const Operator* op) {
bool operator==(FieldAccess const& lhs, FieldAccess const& rhs) {
// On purpose we don't include the write barrier kind here, as this method is
// really only relevant for eliminating loads and they don't care about the
// write barrier mode.
return lhs.base_is_tagged == rhs.base_is_tagged && lhs.offset == rhs.offset &&
lhs.machine_type == rhs.machine_type;
}
......@@ -102,9 +95,6 @@ bool operator!=(FieldAccess const& lhs, FieldAccess const& rhs) {
size_t hash_value(FieldAccess const& access) {
// On purpose we don't include the write barrier kind here, as this method is
// really only relevant for eliminating loads and they don't care about the
// write barrier mode.
return base::hash_combine(access.base_is_tagged, access.offset,
access.machine_type);
}
......@@ -120,15 +110,12 @@ std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
}
#endif
access.type->PrintTo(os);
os << ", " << access.machine_type << ", " << access.write_barrier_kind << "]";
os << ", " << access.machine_type << "]";
return os;
}
bool operator==(ElementAccess const& lhs, ElementAccess const& rhs) {
// On purpose we don't include the write barrier kind here, as this method is
// really only relevant for eliminating loads and they don't care about the
// write barrier mode.
return lhs.base_is_tagged == rhs.base_is_tagged &&
lhs.header_size == rhs.header_size &&
lhs.machine_type == rhs.machine_type;
......@@ -141,9 +128,6 @@ bool operator!=(ElementAccess const& lhs, ElementAccess const& rhs) {
size_t hash_value(ElementAccess const& access) {
// On purpose we don't include the write barrier kind here, as this method is
// really only relevant for eliminating loads and they don't care about the
// write barrier mode.
return base::hash_combine(access.base_is_tagged, access.header_size,
access.machine_type);
}
......@@ -152,7 +136,7 @@ size_t hash_value(ElementAccess const& access) {
std::ostream& operator<<(std::ostream& os, ElementAccess const& access) {
os << access.base_is_tagged << ", " << access.header_size << ", ";
access.type->PrintTo(os);
os << ", " << access.machine_type << ", " << access.write_barrier_kind;
os << ", " << access.machine_type;
return os;
}
......
......@@ -25,9 +25,8 @@ namespace compiler {
class Operator;
struct SimplifiedOperatorGlobalCache;
enum BaseTaggedness : uint8_t { kUntaggedBase, kTaggedBase };
size_t hash_value(BaseTaggedness);
enum BaseTaggedness { kUntaggedBase, kTaggedBase };
std::ostream& operator<<(std::ostream&, BaseTaggedness);
......@@ -64,7 +63,6 @@ struct FieldAccess {
MaybeHandle<Name> name; // debugging only.
Type* type; // type of the field.
MachineType machine_type; // machine type of the field.
WriteBarrierKind write_barrier_kind; // write barrier hint.
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
......@@ -88,7 +86,6 @@ struct ElementAccess {
int header_size; // size of the header, without tag.
Type* type; // type of the element.
MachineType machine_type; // machine type of the element.
WriteBarrierKind write_barrier_kind; // write barrier hint.
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
......
......@@ -452,33 +452,6 @@ enum AllocationAlignment {
kSimd128Unaligned
};
// Supported write barrier modes.
enum WriteBarrierKind : uint8_t {
kNoWriteBarrier,
kMapWriteBarrier,
kPointerWriteBarrier,
kFullWriteBarrier
};
inline size_t hash_value(WriteBarrierKind kind) {
return static_cast<uint8_t>(kind);
}
inline std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
switch (kind) {
case kNoWriteBarrier:
return os << "NoWriteBarrier";
case kMapWriteBarrier:
return os << "MapWriteBarrier";
case kPointerWriteBarrier:
return os << "PointerWriteBarrier";
case kFullWriteBarrier:
return os << "FullWriteBarrier";
}
UNREACHABLE();
return os;
}
// A flag that indicates whether objects should be pretenured when
// allocated (allocated directly into the old generation) or not
// (allocated in the young generation if the object size and type
......
......@@ -1350,7 +1350,7 @@ TEST(LowerStoreField_to_store) {
StoreRepresentation rep = StoreRepresentationOf(store->op());
if (kMachineReps[i].representation() == MachineRepresentation::kTagged) {
CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
}
CHECK_EQ(kMachineReps[i].representation(), rep.representation());
}
......@@ -1370,7 +1370,7 @@ TEST(LowerStoreField_to_store) {
CHECK_EQ(IrOpcode::kStore, store->opcode());
CHECK_EQ(t.p1, store->InputAt(2));
StoreRepresentation rep = StoreRepresentationOf(store->op());
CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
}
}
......@@ -1415,7 +1415,7 @@ TEST(LowerStoreElement_to_store) {
StoreRepresentation rep = StoreRepresentationOf(store->op());
if (kMachineReps[i].representation() == MachineRepresentation::kTagged) {
CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
}
CHECK_EQ(kMachineReps[i].representation(), rep.representation());
}
......@@ -1435,7 +1435,7 @@ TEST(LowerStoreElement_to_store) {
CHECK_EQ(IrOpcode::kStore, store->opcode());
CHECK_EQ(t.p2, store->InputAt(2));
StoreRepresentation rep = StoreRepresentationOf(store->op());
CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
}
}
......
......@@ -1504,12 +1504,10 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
CHECK_EQ(boom_value, obj->RawFastDoublePropertyAt(double_field_index));
}
enum OldToWriteBarrierKind {
OLD_TO_OLD_WRITE_BARRIER,
OLD_TO_NEW_WRITE_BARRIER
};
enum WriteBarrierKind { OLD_TO_OLD_WRITE_BARRIER, OLD_TO_NEW_WRITE_BARRIER };
static void TestWriteBarrierObjectShiftFieldsRight(
OldToWriteBarrierKind write_barrier_kind) {
WriteBarrierKind write_barrier_kind) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
v8::HandleScope scope(CcTest::isolate());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment