Commit e301d71f authored by Georg Neis's avatar Georg Neis Committed by V8 LUCI CQ

[compiler] Teach InstructionScheduler about protected memory accesses

Because these instructions can trap, we don't want them to be reordered
as freely as unprotected accesses.

As part of this, make explicit which opcodes support a MemoryAccessMode.

Bug: v8:12018
Change-Id: I9db3053d7d62ffce6d3c95d62adce71ae40dae62
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3172770Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Commit-Queue: Georg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77031}
parent bb5fa039
......@@ -296,23 +296,50 @@ static_assert(
"All addressing modes must fit in the 5-bit AddressingModeField.");
using FlagsModeField = base::BitField<FlagsMode, 14, 3>;
using FlagsConditionField = base::BitField<FlagsCondition, 17, 5>;
using DeoptImmedArgsCountField = base::BitField<int, 22, 2>;
using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
using MiscField = base::BitField<int, 22, 10>;
// {MiscField} is used for a variety of things, depending on the opcode.
// TODO(turbofan): There should be an abstraction that ensures safe encoding and
// decoding. {HasMemoryAccessMode} and its uses are a small step in that
// direction.
// LaneSizeField and AccessModeField are helper types to encode/decode a lane
// size, an access mode, or both inside the overlapping MiscField.
using LaneSizeField = base::BitField<int, 22, 8>;
using AccessModeField = base::BitField<MemoryAccessMode, 30, 2>;
// TODO(turbofan): {HasMemoryAccessMode} is currently only used to guard
// decoding (in CodeGenerator and InstructionScheduler). Encoding (in
// InstructionSelector) is not yet guarded. There are in fact instructions for
// which InstructionSelector does set a MemoryAccessMode but CodeGenerator
// doesn't care to consume it (e.g. kArm64LdrDecompressTaggedSigned). This is
// scary. {HasMemoryAccessMode} does not include these instructions, so they can
// be easily found by guarding encoding.
inline bool HasMemoryAccessMode(ArchOpcode opcode) {
switch (opcode) {
#define CASE(Name) \
case k##Name: \
return true;
TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(CASE)
#undef CASE
default:
return false;
}
}
using DeoptImmedArgsCountField = base::BitField<int, 22, 2>;
using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
// AtomicWidthField overlaps with MiscField and is used for the various Atomic
// opcodes. Only used on 64bit architectures. All atomic instructions on 32bit
// architectures are assumed to be 32bit wide.
using AtomicWidthField = base::BitField<AtomicWidth, 22, 2>;
// AtomicMemoryOrderField overlaps with MiscField and is used for the various
// Atomic opcodes. This field is not used on all architectures. It is used on
// architectures where the codegen for kSeqCst and kAcqRel differ only by
// emitting fences.
using AtomicMemoryOrderField = base::BitField<AtomicMemoryOrder, 24, 2>;
using AtomicStoreRecordWriteModeField = base::BitField<RecordWriteMode, 26, 4>;
using MiscField = base::BitField<int, 22, 10>;
// This static assertion serves as an early warning if we are about to exhaust
// the available opcode space. If we are about to exhaust it, we should start
......
......@@ -167,7 +167,7 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
last_side_effect_instr_->AddSuccessor(new_node);
}
pending_loads_.push_back(new_node);
} else if (instr->IsDeoptimizeCall() || instr->IsTrap()) {
} else if (instr->IsDeoptimizeCall() || CanTrap(instr)) {
// Ensure that deopts or traps are not reordered with respect to
// side-effect instructions.
if (last_side_effect_instr_ != nullptr) {
......@@ -176,7 +176,7 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
}
// Update last deoptimization or trap point.
if (instr->IsDeoptimizeCall() || instr->IsTrap()) {
if (instr->IsDeoptimizeCall() || CanTrap(instr)) {
last_deopt_or_trap_ = new_node;
}
......
......@@ -169,6 +169,12 @@ class InstructionScheduler final : public ZoneObject {
return (GetInstructionFlags(instr) & kIsLoadOperation) != 0;
}
bool CanTrap(const Instruction* instr) const {
return instr->IsTrap() ||
(instr->HasMemoryAccessMode() &&
instr->memory_access_mode() == kMemoryAccessProtected);
}
// The scheduler will not move the following instructions before the last
// deopt/trap check:
// * loads (this is conservative)
......@@ -184,7 +190,7 @@ class InstructionScheduler final : public ZoneObject {
// trap point we encountered.
bool DependsOnDeoptOrTrap(const Instruction* instr) const {
return MayNeedDeoptOrTrapCheck(instr) || instr->IsDeoptimizeCall() ||
instr->IsTrap() || HasSideEffect(instr) || IsLoadOperation(instr);
CanTrap(instr) || HasSideEffect(instr) || IsLoadOperation(instr);
}
// Identify nops used as a definition point for live-in registers at
......
......@@ -882,6 +882,13 @@ class V8_EXPORT_PRIVATE Instruction final {
return FlagsConditionField::decode(opcode());
}
int misc() const { return MiscField::decode(opcode()); }
bool HasMemoryAccessMode() const {
return compiler::HasMemoryAccessMode(arch_opcode());
}
MemoryAccessMode memory_access_mode() const {
DCHECK(HasMemoryAccessMode());
return AccessModeField::decode(opcode());
}
static Instruction* New(Zone* zone, InstructionCode opcode) {
return New(zone, opcode, 0, nullptr, 0, nullptr, 0, nullptr);
......
......@@ -693,7 +693,7 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
int pc) {
const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
const MemoryAccessMode access_mode = instr->memory_access_mode();
if (access_mode == kMemoryAccessProtected) {
zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr);
}
......@@ -703,7 +703,7 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr, int pc) {
DCHECK_NE(kMemoryAccessProtected, AccessModeField::decode(opcode));
DCHECK_NE(kMemoryAccessProtected, instr->memory_access_mode());
}
#endif // V8_ENABLE_WEBASSEMBLY
......
......@@ -1472,13 +1472,6 @@
'regress/regress-779407': [SKIP],
}], # variant == experimental_regexp
##############################################################################
['variant == instruction_scheduling or variant == stress_instruction_scheduling', {
# BUG(12018): These tests currently fail with --turbo-instruction-scheduling.
'regress/wasm/regress-1231950': [SKIP],
'regress/wasm/regress-1242300': [SKIP],
}], # variant == instruction_scheduling or variant == stress_instruction_scheduling
################################################################################
['single_generation', {
# These tests rely on allocation site tracking which only works in the young generation.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment