Commit 0301534c authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

Rename LoadKind to MemoryAccessKind

LoadKind is not longer just for load, we use it for stores as well
(starting with https://crrev.com/c/2473383). Rename it to something more
generic.

Bug: v8:10975,v8:10933
Change-Id: I5e5406ea475e06a83eb2eefe22d4824a99029944
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2481822
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70626}
parent 10b847c7
...@@ -669,7 +669,7 @@ void InstructionSelector::VisitLoadTransform(Node* node) { ...@@ -669,7 +669,7 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
// ARM64 supports unaligned loads // ARM64 supports unaligned loads
DCHECK_NE(params.kind, LoadKind::kUnaligned); DCHECK_NE(params.kind, MemoryAccessKind::kUnaligned);
Arm64OperandGenerator g(this); Arm64OperandGenerator g(this);
Node* base = node->InputAt(0); Node* base = node->InputAt(0);
......
...@@ -401,9 +401,9 @@ void InstructionSelector::VisitLoadTransform(Node* node) { ...@@ -401,9 +401,9 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
} }
// IA32 supports unaligned loads. // IA32 supports unaligned loads.
DCHECK_NE(params.kind, LoadKind::kUnaligned); DCHECK_NE(params.kind, MemoryAccessKind::kUnaligned);
// Trap handler is not supported on IA32. // Trap handler is not supported on IA32.
DCHECK_NE(params.kind, LoadKind::kProtected); DCHECK_NE(params.kind, MemoryAccessKind::kProtected);
IA32OperandGenerator g(this); IA32OperandGenerator g(this);
InstructionOperand outputs[1]; InstructionOperand outputs[1];
......
...@@ -378,8 +378,8 @@ void InstructionSelector::VisitLoadLane(Node* node) { ...@@ -378,8 +378,8 @@ void InstructionSelector::VisitLoadLane(Node* node) {
DCHECK_GE(5, input_count); DCHECK_GE(5, input_count);
// x64 supports unaligned loads. // x64 supports unaligned loads.
DCHECK_NE(params.kind, LoadKind::kUnaligned); DCHECK_NE(params.kind, MemoryAccessKind::kUnaligned);
if (params.kind == LoadKind::kProtected) { if (params.kind == MemoryAccessKind::kProtected) {
opcode |= MiscField::encode(kMemoryAccessProtected); opcode |= MiscField::encode(kMemoryAccessProtected);
} }
Emit(opcode, 1, outputs, input_count, inputs); Emit(opcode, 1, outputs, input_count, inputs);
...@@ -429,9 +429,9 @@ void InstructionSelector::VisitLoadTransform(Node* node) { ...@@ -429,9 +429,9 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
UNREACHABLE(); UNREACHABLE();
} }
// x64 supports unaligned loads // x64 supports unaligned loads
DCHECK_NE(params.kind, LoadKind::kUnaligned); DCHECK_NE(params.kind, MemoryAccessKind::kUnaligned);
InstructionCode code = opcode; InstructionCode code = opcode;
if (params.kind == LoadKind::kProtected) { if (params.kind == MemoryAccessKind::kProtected) {
code |= MiscField::encode(kMemoryAccessProtected); code |= MiscField::encode(kMemoryAccessProtected);
} }
VisitLoad(node, node, code); VisitLoad(node, node, code);
...@@ -562,7 +562,7 @@ void InstructionSelector::VisitStoreLane(Node* node) { ...@@ -562,7 +562,7 @@ void InstructionSelector::VisitStoreLane(Node* node) {
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count); g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
opcode |= AddressingModeField::encode(addressing_mode); opcode |= AddressingModeField::encode(addressing_mode);
if (params.kind == LoadKind::kProtected) { if (params.kind == MemoryAccessKind::kProtected) {
opcode |= MiscField::encode(kMemoryAccessProtected); opcode |= MiscField::encode(kMemoryAccessProtected);
} }
......
...@@ -32,15 +32,15 @@ std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) { ...@@ -32,15 +32,15 @@ std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) {
return os << rep.representation() << ", " << rep.write_barrier_kind(); return os << rep.representation() << ", " << rep.write_barrier_kind();
} }
size_t hash_value(LoadKind kind) { return static_cast<size_t>(kind); } size_t hash_value(MemoryAccessKind kind) { return static_cast<size_t>(kind); }
std::ostream& operator<<(std::ostream& os, LoadKind kind) { std::ostream& operator<<(std::ostream& os, MemoryAccessKind kind) {
switch (kind) { switch (kind) {
case LoadKind::kNormal: case MemoryAccessKind::kNormal:
return os << "kNormal"; return os << "kNormal";
case LoadKind::kUnaligned: case MemoryAccessKind::kUnaligned:
return os << "kUnaligned"; return os << "kUnaligned";
case LoadKind::kProtected: case MemoryAccessKind::kProtected:
return os << "kProtected"; return os << "kProtected";
} }
UNREACHABLE(); UNREACHABLE();
...@@ -802,24 +802,24 @@ struct ProtectedLoadOperator : public Operator1<LoadRepresentation> { ...@@ -802,24 +802,24 @@ struct ProtectedLoadOperator : public Operator1<LoadRepresentation> {
1, 1, 1, 1, 0, LoadRepresentation(rep, sem)) {} 1, 1, 1, 1, 0, LoadRepresentation(rep, sem)) {}
}; };
template <LoadKind kind, LoadTransformation type> template <MemoryAccessKind kind, LoadTransformation type>
struct LoadTransformOperator : public Operator1<LoadTransformParameters> { struct LoadTransformOperator : public Operator1<LoadTransformParameters> {
LoadTransformOperator() LoadTransformOperator()
: Operator1(IrOpcode::kLoadTransform, : Operator1(IrOpcode::kLoadTransform,
kind == LoadKind::kProtected kind == MemoryAccessKind::kProtected
? Operator::kNoDeopt | Operator::kNoThrow ? Operator::kNoDeopt | Operator::kNoThrow
: Operator::kEliminatable, : Operator::kEliminatable,
"LoadTransform", 2, 1, 1, 1, 1, 0, "LoadTransform", 2, 1, 1, 1, 1, 0,
LoadTransformParameters{kind, type}) {} LoadTransformParameters{kind, type}) {}
}; };
template <LoadKind kind, MachineRepresentation rep, MachineSemantic sem, template <MemoryAccessKind kind, MachineRepresentation rep, MachineSemantic sem,
uint8_t laneidx> uint8_t laneidx>
struct LoadLaneOperator : public Operator1<LoadLaneParameters> { struct LoadLaneOperator : public Operator1<LoadLaneParameters> {
LoadLaneOperator() LoadLaneOperator()
: Operator1( : Operator1(
IrOpcode::kLoadLane, IrOpcode::kLoadLane,
kind == LoadKind::kProtected kind == MemoryAccessKind::kProtected
? Operator::kNoDeopt | Operator::kNoThrow ? Operator::kNoDeopt | Operator::kNoThrow
: Operator::kEliminatable, : Operator::kEliminatable,
"LoadLane", 3, 1, 1, 1, 1, 0, "LoadLane", 3, 1, 1, 1, 1, 0,
...@@ -852,7 +852,7 @@ struct ProtectedStoreOperator : public Operator1<StoreRepresentation> { ...@@ -852,7 +852,7 @@ struct ProtectedStoreOperator : public Operator1<StoreRepresentation> {
StoreRepresentation(rep, kNoWriteBarrier)) {} StoreRepresentation(rep, kNoWriteBarrier)) {}
}; };
template <LoadKind kind, MachineRepresentation rep, uint8_t laneidx> template <MemoryAccessKind kind, MachineRepresentation rep, uint8_t laneidx>
struct StoreLaneOperator : public Operator1<StoreLaneParameters> { struct StoreLaneOperator : public Operator1<StoreLaneParameters> {
StoreLaneOperator() StoreLaneOperator()
: Operator1(IrOpcode::kStoreLane, : Operator1(IrOpcode::kStoreLane,
...@@ -1189,11 +1189,12 @@ const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) { ...@@ -1189,11 +1189,12 @@ const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
} }
const Operator* MachineOperatorBuilder::LoadTransform( const Operator* MachineOperatorBuilder::LoadTransform(
LoadKind kind, LoadTransformation transform) { MemoryAccessKind kind, LoadTransformation transform) {
#define LOAD_TRANSFORM_KIND(TYPE, KIND) \ #define LOAD_TRANSFORM_KIND(TYPE, KIND) \
if (kind == LoadKind::k##KIND && transform == LoadTransformation::k##TYPE) { \ if (kind == MemoryAccessKind::k##KIND && \
return GetCachedOperator<LoadTransformOperator< \ transform == LoadTransformation::k##TYPE) { \
LoadKind::k##KIND, LoadTransformation::k##TYPE>>(); \ return GetCachedOperator<LoadTransformOperator< \
MemoryAccessKind::k##KIND, LoadTransformation::k##TYPE>>(); \
} }
#define LOAD_TRANSFORM(TYPE) \ #define LOAD_TRANSFORM(TYPE) \
LOAD_TRANSFORM_KIND(TYPE, Normal) \ LOAD_TRANSFORM_KIND(TYPE, Normal) \
...@@ -1206,15 +1207,15 @@ const Operator* MachineOperatorBuilder::LoadTransform( ...@@ -1206,15 +1207,15 @@ const Operator* MachineOperatorBuilder::LoadTransform(
UNREACHABLE(); UNREACHABLE();
} }
const Operator* MachineOperatorBuilder::LoadLane(LoadKind kind, const Operator* MachineOperatorBuilder::LoadLane(MemoryAccessKind kind,
LoadRepresentation rep, LoadRepresentation rep,
uint8_t laneidx) { uint8_t laneidx) {
#define LOAD_LANE_KIND(TYPE, KIND, LANEIDX) \ #define LOAD_LANE_KIND(TYPE, KIND, LANEIDX) \
if (kind == LoadKind::k##KIND && rep == MachineType::TYPE() && \ if (kind == MemoryAccessKind::k##KIND && rep == MachineType::TYPE() && \
laneidx == LANEIDX) { \ laneidx == LANEIDX) { \
return GetCachedOperator<LoadLaneOperator< \ return GetCachedOperator<LoadLaneOperator< \
LoadKind::k##KIND, MachineType::TYPE().representation(), \ MemoryAccessKind::k##KIND, MachineType::TYPE().representation(), \
MachineType::TYPE().semantic(), LANEIDX>>(); \ MachineType::TYPE().semantic(), LANEIDX>>(); \
} }
#define LOAD_LANE_T(T, LANE) \ #define LOAD_LANE_T(T, LANE) \
...@@ -1240,14 +1241,14 @@ const Operator* MachineOperatorBuilder::LoadLane(LoadKind kind, ...@@ -1240,14 +1241,14 @@ const Operator* MachineOperatorBuilder::LoadLane(LoadKind kind,
UNREACHABLE(); UNREACHABLE();
} }
const Operator* MachineOperatorBuilder::StoreLane(LoadKind kind, const Operator* MachineOperatorBuilder::StoreLane(MemoryAccessKind kind,
MachineRepresentation rep, MachineRepresentation rep,
uint8_t laneidx) { uint8_t laneidx) {
#define STORE_LANE_KIND(REP, KIND, LANEIDX) \ #define STORE_LANE_KIND(REP, KIND, LANEIDX) \
if (kind == LoadKind::k##KIND && rep == MachineRepresentation::REP && \ if (kind == MemoryAccessKind::k##KIND && \
laneidx == LANEIDX) { \ rep == MachineRepresentation::REP && laneidx == LANEIDX) { \
return GetCachedOperator<StoreLaneOperator< \ return GetCachedOperator<StoreLaneOperator< \
LoadKind::k##KIND, MachineRepresentation::REP, LANEIDX>>(); \ MemoryAccessKind::k##KIND, MachineRepresentation::REP, LANEIDX>>(); \
} }
#define STORE_LANE_T(T, LANE) \ #define STORE_LANE_T(T, LANE) \
......
...@@ -49,16 +49,15 @@ using LoadRepresentation = MachineType; ...@@ -49,16 +49,15 @@ using LoadRepresentation = MachineType;
V8_EXPORT_PRIVATE LoadRepresentation LoadRepresentationOf(Operator const*) V8_EXPORT_PRIVATE LoadRepresentation LoadRepresentationOf(Operator const*)
V8_WARN_UNUSED_RESULT; V8_WARN_UNUSED_RESULT;
// TODO(zhin): This is used by StoreLane too, rename this. enum class MemoryAccessKind {
enum class LoadKind {
kNormal, kNormal,
kUnaligned, kUnaligned,
kProtected, kProtected,
}; };
size_t hash_value(LoadKind); size_t hash_value(MemoryAccessKind);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, LoadKind); V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, MemoryAccessKind);
enum class LoadTransformation { enum class LoadTransformation {
kS128Load8Splat, kS128Load8Splat,
...@@ -80,7 +79,7 @@ size_t hash_value(LoadTransformation); ...@@ -80,7 +79,7 @@ size_t hash_value(LoadTransformation);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, LoadTransformation); V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, LoadTransformation);
struct LoadTransformParameters { struct LoadTransformParameters {
LoadKind kind; MemoryAccessKind kind;
LoadTransformation transformation; LoadTransformation transformation;
}; };
...@@ -93,7 +92,7 @@ V8_EXPORT_PRIVATE LoadTransformParameters const& LoadTransformParametersOf( ...@@ -93,7 +92,7 @@ V8_EXPORT_PRIVATE LoadTransformParameters const& LoadTransformParametersOf(
Operator const*) V8_WARN_UNUSED_RESULT; Operator const*) V8_WARN_UNUSED_RESULT;
struct LoadLaneParameters { struct LoadLaneParameters {
LoadKind kind; MemoryAccessKind kind;
LoadRepresentation rep; LoadRepresentation rep;
uint8_t laneidx; uint8_t laneidx;
}; };
...@@ -137,7 +136,7 @@ UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf( ...@@ -137,7 +136,7 @@ UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
Operator const*) V8_WARN_UNUSED_RESULT; Operator const*) V8_WARN_UNUSED_RESULT;
struct StoreLaneParameters { struct StoreLaneParameters {
LoadKind kind; MemoryAccessKind kind;
MachineRepresentation rep; MachineRepresentation rep;
uint8_t laneidx; uint8_t laneidx;
}; };
...@@ -815,10 +814,11 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final ...@@ -815,10 +814,11 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* PoisonedLoad(LoadRepresentation rep); const Operator* PoisonedLoad(LoadRepresentation rep);
const Operator* ProtectedLoad(LoadRepresentation rep); const Operator* ProtectedLoad(LoadRepresentation rep);
const Operator* LoadTransform(LoadKind kind, LoadTransformation transform); const Operator* LoadTransform(MemoryAccessKind kind,
LoadTransformation transform);
// SIMD load: replace a specified lane with [base + index]. // SIMD load: replace a specified lane with [base + index].
const Operator* LoadLane(LoadKind kind, LoadRepresentation rep, const Operator* LoadLane(MemoryAccessKind kind, LoadRepresentation rep,
uint8_t laneidx); uint8_t laneidx);
// store [base + index], value // store [base + index], value
...@@ -826,7 +826,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final ...@@ -826,7 +826,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* ProtectedStore(MachineRepresentation rep); const Operator* ProtectedStore(MachineRepresentation rep);
// SIMD store: store a specified lane of value into [base + index]. // SIMD store: store a specified lane of value into [base + index].
const Operator* StoreLane(LoadKind kind, MachineRepresentation rep, const Operator* StoreLane(MemoryAccessKind kind, MachineRepresentation rep,
uint8_t laneidx); uint8_t laneidx);
// unaligned load [base + index] // unaligned load [base + index]
......
...@@ -616,13 +616,13 @@ void SimdScalarLowering::LowerLoadTransformOp(Node* node, SimdType type) { ...@@ -616,13 +616,13 @@ void SimdScalarLowering::LowerLoadTransformOp(Node* node, SimdType type) {
const Operator* load_op; const Operator* load_op;
switch (params.kind) { switch (params.kind) {
case LoadKind::kNormal: case MemoryAccessKind::kNormal:
load_op = machine()->Load(load_rep); load_op = machine()->Load(load_rep);
break; break;
case LoadKind::kUnaligned: case MemoryAccessKind::kUnaligned:
load_op = machine()->UnalignedLoad(load_rep); load_op = machine()->UnalignedLoad(load_rep);
break; break;
case LoadKind::kProtected: case MemoryAccessKind::kProtected:
load_op = machine()->ProtectedLoad(load_rep); load_op = machine()->ProtectedLoad(load_rep);
break; break;
} }
......
...@@ -3894,18 +3894,18 @@ LoadTransformation GetLoadTransformation( ...@@ -3894,18 +3894,18 @@ LoadTransformation GetLoadTransformation(
UNREACHABLE(); UNREACHABLE();
} }
LoadKind GetLoadKind(MachineGraph* mcgraph, MachineType memtype, MemoryAccessKind GetMemoryAccessKind(MachineGraph* mcgraph, MachineType memtype,
bool use_trap_handler) { bool use_trap_handler) {
if (memtype.representation() == MachineRepresentation::kWord8 || if (memtype.representation() == MachineRepresentation::kWord8 ||
mcgraph->machine()->UnalignedLoadSupported(memtype.representation())) { mcgraph->machine()->UnalignedLoadSupported(memtype.representation())) {
if (use_trap_handler) { if (use_trap_handler) {
return LoadKind::kProtected; return MemoryAccessKind::kProtected;
} }
return LoadKind::kNormal; return MemoryAccessKind::kNormal;
} }
// TODO(eholk): Support unaligned loads with trap handlers. // TODO(eholk): Support unaligned loads with trap handlers.
DCHECK(!use_trap_handler); DCHECK(!use_trap_handler);
return LoadKind::kUnaligned; return MemoryAccessKind::kUnaligned;
} }
} // namespace } // namespace
...@@ -3994,13 +3994,14 @@ Node* WasmGraphBuilder::LoadLane(MachineType memtype, Node* value, Node* index, ...@@ -3994,13 +3994,14 @@ Node* WasmGraphBuilder::LoadLane(MachineType memtype, Node* value, Node* index,
index = index =
BoundsCheckMem(access_size, index, offset, position, kCanOmitBoundsCheck); BoundsCheckMem(access_size, index, offset, position, kCanOmitBoundsCheck);
LoadKind load_kind = GetLoadKind(mcgraph(), memtype, use_trap_handler()); MemoryAccessKind load_kind =
GetMemoryAccessKind(mcgraph(), memtype, use_trap_handler());
load = SetEffect(graph()->NewNode( load = SetEffect(graph()->NewNode(
mcgraph()->machine()->LoadLane(load_kind, memtype, laneidx), mcgraph()->machine()->LoadLane(load_kind, memtype, laneidx),
MemBuffer(offset), index, value, effect(), control())); MemBuffer(offset), index, value, effect(), control()));
if (load_kind == LoadKind::kProtected) { if (load_kind == MemoryAccessKind::kProtected) {
SetSourcePosition(load, position); SetSourcePosition(load, position);
} }
...@@ -4029,7 +4030,7 @@ Node* WasmGraphBuilder::LoadTransform(wasm::ValueType type, MachineType memtype, ...@@ -4029,7 +4030,7 @@ Node* WasmGraphBuilder::LoadTransform(wasm::ValueType type, MachineType memtype,
// therefore we divide them into separate "load" and "operation" nodes. // therefore we divide them into separate "load" and "operation" nodes.
load = LoadTransformBigEndian(type, memtype, transform, index, offset, load = LoadTransformBigEndian(type, memtype, transform, index, offset,
alignment, position); alignment, position);
USE(GetLoadKind); USE(GetMemoryAccessKind);
#else #else
// Wasm semantics throw on OOB. Introduce explicit bounds check and // Wasm semantics throw on OOB. Introduce explicit bounds check and
// conditioning when not using the trap handler. // conditioning when not using the trap handler.
...@@ -4042,13 +4043,14 @@ Node* WasmGraphBuilder::LoadTransform(wasm::ValueType type, MachineType memtype, ...@@ -4042,13 +4043,14 @@ Node* WasmGraphBuilder::LoadTransform(wasm::ValueType type, MachineType memtype,
BoundsCheckMem(access_size, index, offset, position, kCanOmitBoundsCheck); BoundsCheckMem(access_size, index, offset, position, kCanOmitBoundsCheck);
LoadTransformation transformation = GetLoadTransformation(memtype, transform); LoadTransformation transformation = GetLoadTransformation(memtype, transform);
LoadKind load_kind = GetLoadKind(mcgraph(), memtype, use_trap_handler()); MemoryAccessKind load_kind =
GetMemoryAccessKind(mcgraph(), memtype, use_trap_handler());
load = SetEffect(graph()->NewNode( load = SetEffect(graph()->NewNode(
mcgraph()->machine()->LoadTransform(load_kind, transformation), mcgraph()->machine()->LoadTransform(load_kind, transformation),
MemBuffer(capped_offset), index, effect(), control())); MemBuffer(capped_offset), index, effect(), control()));
if (load_kind == LoadKind::kProtected) { if (load_kind == MemoryAccessKind::kProtected) {
SetSourcePosition(load, position); SetSourcePosition(load, position);
} }
#endif #endif
...@@ -4122,7 +4124,8 @@ Node* WasmGraphBuilder::StoreLane(MachineRepresentation mem_rep, Node* index, ...@@ -4122,7 +4124,8 @@ Node* WasmGraphBuilder::StoreLane(MachineRepresentation mem_rep, Node* index,
position, kCanOmitBoundsCheck); position, kCanOmitBoundsCheck);
MachineType memtype = MachineType(mem_rep, MachineSemantic::kNone); MachineType memtype = MachineType(mem_rep, MachineSemantic::kNone);
LoadKind load_kind = GetLoadKind(mcgraph(), memtype, use_trap_handler()); MemoryAccessKind load_kind =
GetMemoryAccessKind(mcgraph(), memtype, use_trap_handler());
// {offset} is validated to be within uintptr_t range in {BoundsCheckMem}. // {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
uintptr_t capped_offset = static_cast<uintptr_t>(offset); uintptr_t capped_offset = static_cast<uintptr_t>(offset);
...@@ -4131,7 +4134,7 @@ Node* WasmGraphBuilder::StoreLane(MachineRepresentation mem_rep, Node* index, ...@@ -4131,7 +4134,7 @@ Node* WasmGraphBuilder::StoreLane(MachineRepresentation mem_rep, Node* index,
mcgraph()->machine()->StoreLane(load_kind, mem_rep, laneidx), mcgraph()->machine()->StoreLane(load_kind, mem_rep, laneidx),
MemBuffer(capped_offset), index, val, effect(), control())); MemBuffer(capped_offset), index, val, effect(), control()));
if (load_kind == LoadKind::kProtected) { if (load_kind == MemoryAccessKind::kProtected) {
SetSourcePosition(store, position); SetSourcePosition(store, position);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment