Commit 2778b460 authored by Tobias Tebbi's avatar Tobias Tebbi Committed by Commit Bot

Reland "[turbofan] disable indirect jumps in Turbofan generated switches"

This is a reland of 957ac364.

To avoid a race condition TSAN found when accessing FLAG_turbo_disable_switch_jump_table
in the InstructionSelector, this now threads the flag through the CompilationInfo.

Original change's description:
> [turbofan] disable indirect jumps in Turbofan generated switches
>
> Bug:
> Change-Id: I326bf518f895e7c030376210e7797f3dd4a9ae1f
> Reviewed-on: https://chromium-review.googlesource.com/873643
> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
> Commit-Queue: Tobias Tebbi <tebbi@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#50984}

Change-Id: I76c2804f140cc116e30881bfd05365a09240e605
Reviewed-on: https://chromium-review.googlesource.com/895643Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Commit-Queue: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51014}
parent 1efdab82
......@@ -47,6 +47,7 @@ CompilationInfo::CompilationInfo(Zone* zone, Isolate* isolate,
if (FLAG_function_context_specialization) MarkAsFunctionContextSpecializing();
if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
if (!FLAG_turbo_disable_switch_jump_table) SetFlag(kSwitchJumpTableEnabled);
// Collect source positions for optimized code when profiling or if debugger
// is active, to be able to get more precise source positions at the price of
......
......@@ -51,6 +51,7 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
kBailoutOnUninitialized = 1 << 9,
kLoopPeelingEnabled = 1 << 10,
kUntrustedCodeMitigations = 1 << 11,
kSwitchJumpTableEnabled = 1 << 12,
};
// TODO(mtrofin): investigate if this might be generalized outside wasm, with
......@@ -169,6 +170,10 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
return GetFlag(kUntrustedCodeMitigations);
}
bool switch_jump_table_enabled() const {
return GetFlag(kSwitchJumpTableEnabled);
}
// Code getters and setters.
void SetCode(Handle<Code> code) { code_ = code; }
......
......@@ -1980,24 +1980,26 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_time_cost = sw.case_count;
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I),
index_operand, value_operand, g.TempImmediate(sw.min_value));
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_time_cost = sw.case_count;
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I),
index_operand, value_operand, g.TempImmediate(sw.min_value));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
......
......@@ -2361,24 +2361,26 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_time_cost = sw.case_count;
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kArm64Sub32, index_operand, value_operand,
g.TempImmediate(sw.min_value));
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_time_cost = sw.case_count;
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kArm64Sub32, index_operand, value_operand,
g.TempImmediate(sw.min_value));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
......
......@@ -1431,24 +1431,26 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_time_cost = sw.case_count;
if (sw.case_count > 4 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand,
value_operand, g.TempImmediate(-sw.min_value));
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_time_cost = sw.case_count;
if (sw.case_count > 4 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand,
value_operand, g.TempImmediate(-sw.min_value));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
......
......@@ -24,6 +24,7 @@ InstructionSelector::InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
EnableSwitchJumpTable enable_switch_jump_table,
SourcePositionMode source_position_mode, Features features,
EnableScheduling enable_scheduling,
EnableSerialization enable_serialization)
......@@ -45,6 +46,7 @@ InstructionSelector::InstructionSelector(
scheduler_(nullptr),
enable_scheduling_(enable_scheduling),
enable_serialization_(enable_serialization),
enable_switch_jump_table_(enable_switch_jump_table),
frame_(frame),
instruction_selection_failed_(false) {
instructions_.reserve(node_count);
......
......@@ -51,11 +51,16 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions };
enum EnableScheduling { kDisableScheduling, kEnableScheduling };
enum EnableSerialization { kDisableSerialization, kEnableSerialization };
enum EnableSwitchJumpTable {
kDisableSwitchJumpTable,
kEnableSwitchJumpTable
};
InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
EnableSwitchJumpTable enable_switch_jump_table,
SourcePositionMode source_position_mode = kCallSourcePositions,
Features features = SupportedFeatures(),
EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
......@@ -445,6 +450,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
InstructionScheduler* scheduler_;
EnableScheduling enable_scheduling_;
EnableSerialization enable_serialization_;
EnableSwitchJumpTable enable_switch_jump_table_;
Frame* frame_;
bool instruction_selection_failed_;
};
......
......@@ -1608,24 +1608,26 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 9 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 2 + 2 * sw.case_count;
size_t lookup_time_cost = sw.case_count;
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kMipsSub, index_operand, value_operand,
g.TempImmediate(sw.min_value));
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 9 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 2 + 2 * sw.case_count;
size_t lookup_time_cost = sw.case_count;
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kMipsSub, index_operand, value_operand,
g.TempImmediate(sw.min_value));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
......
......@@ -2216,24 +2216,26 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 10 + 2 * sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 2 + 2 * sw.case_count;
size_t lookup_time_cost = sw.case_count;
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kMips64Sub, index_operand, value_operand,
g.TempImmediate(sw.min_value));
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 10 + 2 * sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 2 + 2 * sw.case_count;
size_t lookup_time_cost = sw.case_count;
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kMips64Sub, index_operand, value_operand,
g.TempImmediate(sw.min_value));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
......
......@@ -1525,6 +1525,9 @@ struct InstructionSelectionPhase {
InstructionSelector selector(
temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
data->schedule(), data->source_positions(), data->frame(),
data->info()->switch_jump_table_enabled()
? InstructionSelector::kEnableSwitchJumpTable
: InstructionSelector::kDisableSwitchJumpTable,
data->info()->is_source_positions_enabled()
? InstructionSelector::kAllSourcePositions
: InstructionSelector::kCallSourcePositions,
......
......@@ -1762,24 +1762,26 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_time_cost = sw.case_count;
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kPPC_Sub, index_operand, value_operand,
g.TempImmediate(sw.min_value));
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_time_cost = sw.case_count;
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kPPC_Sub, index_operand, value_operand,
g.TempImmediate(sw.min_value));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
......
......@@ -2138,22 +2138,23 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_time_cost = sw.case_count;
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand,
value_operand, g.TempImmediate(-sw.min_value));
}
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_time_cost = sw.case_count;
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand,
value_operand, g.TempImmediate(-sw.min_value));
}
#if V8_TARGET_ARCH_S390X
InstructionOperand index_operand_zero_ext = g.TempRegister();
Emit(kS390_Uint32ToUint64, index_operand_zero_ext, index_operand);
......@@ -2162,6 +2163,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
}
// Generate a sequence of conditional jumps.
return EmitLookupSwitch(sw, value_operand);
......
......@@ -1959,27 +1959,30 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_time_cost = sw.case_count;
if (sw.case_count > 4 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = g.TempRegister();
if (sw.min_value) {
// The leal automatically zero extends, so result is a valid 64-bit index.
Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
value_operand, g.TempImmediate(-sw.min_value));
} else {
// Zero extend, because we use it as 64-bit index into the jump table.
Emit(kX64Movl, index_operand, value_operand);
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_time_cost = sw.case_count;
if (sw.case_count > 4 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = g.TempRegister();
if (sw.min_value) {
// The leal automatically zero extends, so result is a valid 64-bit
// index.
Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
value_operand, g.TempImmediate(-sw.min_value));
} else {
// Zero extend, because we use it as 64-bit index into the jump table.
Emit(kX64Movl, index_operand, value_operand);
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
......
......@@ -490,6 +490,10 @@ DEFINE_BOOL(untrusted_code_mitigations, V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS,
"Enable mitigations for executing untrusted code")
#undef V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS
DEFINE_BOOL(turbo_disable_switch_jump_table, false,
"do not emit jump-tables in Turbofan")
DEFINE_IMPLICATION(untrusted_code_mitigations, turbo_disable_switch_jump_table)
// Flags to help platform porters
DEFINE_BOOL(minimal, false,
"simplifies execution model to make porting "
......
......@@ -43,6 +43,7 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
SourcePositionTable source_position_table(graph());
InstructionSelector selector(test_->zone(), node_count, &linkage, &sequence,
schedule, &source_position_table, nullptr,
InstructionSelector::kEnableSwitchJumpTable,
source_position_mode, features,
InstructionSelector::kDisableScheduling);
selector.SelectInstructions();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment