Commit 0cfbaff4 authored by Jaroslav Sevcik's avatar Jaroslav Sevcik Committed by Commit Bot

Revert "[turbofan] disable indirect jumps in Turbofan generated switches"

This reverts commit 957ac364.

Reason for revert: Breaks roll (crbug.com/v8/7388)

Original change's description:
> [turbofan] disable indirect jumps in Turbofan generated switches
> 
> Bug: 
> Change-Id: I326bf518f895e7c030376210e7797f3dd4a9ae1f
> Reviewed-on: https://chromium-review.googlesource.com/873643
> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
> Commit-Queue: Tobias Tebbi <tebbi@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#50984}

TBR=jarin@chromium.org,tebbi@chromium.org

Change-Id: Id2546e722179e6d8f2f102ce02fb18d696a79764
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/894385Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Commit-Queue: Jaroslav Sevcik <jarin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50995}
parent 35ca0a01
...@@ -1980,26 +1980,24 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { ...@@ -1980,26 +1980,24 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch. // Emit either ArchTableSwitch or ArchLookupSwitch.
if (!FLAG_turbo_disable_switch_jump_table) { static const size_t kMaxTableSwitchValueRange = 2 << 16;
static const size_t kMaxTableSwitchValueRange = 2 << 16; size_t table_space_cost = 4 + sw.value_range;
size_t table_space_cost = 4 + sw.value_range; size_t table_time_cost = 3;
size_t table_time_cost = 3; size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_space_cost = 3 + 2 * sw.case_count; size_t lookup_time_cost = sw.case_count;
size_t lookup_time_cost = sw.case_count; if (sw.case_count > 0 &&
if (sw.case_count > 0 && table_space_cost + 3 * table_time_cost <=
table_space_cost + 3 * table_time_cost <= lookup_space_cost + 3 * lookup_time_cost &&
lookup_space_cost + 3 * lookup_time_cost && sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.min_value > std::numeric_limits<int32_t>::min() && sw.value_range <= kMaxTableSwitchValueRange) {
sw.value_range <= kMaxTableSwitchValueRange) { InstructionOperand index_operand = value_operand;
InstructionOperand index_operand = value_operand; if (sw.min_value) {
if (sw.min_value) { index_operand = g.TempRegister();
index_operand = g.TempRegister(); Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I),
Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I), index_operand, value_operand, g.TempImmediate(sw.min_value));
index_operand, value_operand, g.TempImmediate(sw.min_value));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
} }
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
} }
// Generate a sequence of conditional jumps. // Generate a sequence of conditional jumps.
......
...@@ -2361,26 +2361,24 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { ...@@ -2361,26 +2361,24 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch. // Emit either ArchTableSwitch or ArchLookupSwitch.
if (!FLAG_turbo_disable_switch_jump_table) { static const size_t kMaxTableSwitchValueRange = 2 << 16;
static const size_t kMaxTableSwitchValueRange = 2 << 16; size_t table_space_cost = 4 + sw.value_range;
size_t table_space_cost = 4 + sw.value_range; size_t table_time_cost = 3;
size_t table_time_cost = 3; size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_space_cost = 3 + 2 * sw.case_count; size_t lookup_time_cost = sw.case_count;
size_t lookup_time_cost = sw.case_count; if (sw.case_count > 0 &&
if (sw.case_count > 0 && table_space_cost + 3 * table_time_cost <=
table_space_cost + 3 * table_time_cost <= lookup_space_cost + 3 * lookup_time_cost &&
lookup_space_cost + 3 * lookup_time_cost && sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.min_value > std::numeric_limits<int32_t>::min() && sw.value_range <= kMaxTableSwitchValueRange) {
sw.value_range <= kMaxTableSwitchValueRange) { InstructionOperand index_operand = value_operand;
InstructionOperand index_operand = value_operand; if (sw.min_value) {
if (sw.min_value) { index_operand = g.TempRegister();
index_operand = g.TempRegister(); Emit(kArm64Sub32, index_operand, value_operand,
Emit(kArm64Sub32, index_operand, value_operand, g.TempImmediate(sw.min_value));
g.TempImmediate(sw.min_value));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
} }
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
} }
// Generate a sequence of conditional jumps. // Generate a sequence of conditional jumps.
......
...@@ -1431,26 +1431,24 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { ...@@ -1431,26 +1431,24 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch. // Emit either ArchTableSwitch or ArchLookupSwitch.
if (!FLAG_turbo_disable_switch_jump_table) { static const size_t kMaxTableSwitchValueRange = 2 << 16;
static const size_t kMaxTableSwitchValueRange = 2 << 16; size_t table_space_cost = 4 + sw.value_range;
size_t table_space_cost = 4 + sw.value_range; size_t table_time_cost = 3;
size_t table_time_cost = 3; size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_space_cost = 3 + 2 * sw.case_count; size_t lookup_time_cost = sw.case_count;
size_t lookup_time_cost = sw.case_count; if (sw.case_count > 4 &&
if (sw.case_count > 4 && table_space_cost + 3 * table_time_cost <=
table_space_cost + 3 * table_time_cost <= lookup_space_cost + 3 * lookup_time_cost &&
lookup_space_cost + 3 * lookup_time_cost && sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.min_value > std::numeric_limits<int32_t>::min() && sw.value_range <= kMaxTableSwitchValueRange) {
sw.value_range <= kMaxTableSwitchValueRange) { InstructionOperand index_operand = value_operand;
InstructionOperand index_operand = value_operand; if (sw.min_value) {
if (sw.min_value) { index_operand = g.TempRegister();
index_operand = g.TempRegister(); Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand,
Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand, value_operand, g.TempImmediate(-sw.min_value));
value_operand, g.TempImmediate(-sw.min_value));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
} }
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
} }
// Generate a sequence of conditional jumps. // Generate a sequence of conditional jumps.
......
...@@ -1608,26 +1608,24 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { ...@@ -1608,26 +1608,24 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch. // Emit either ArchTableSwitch or ArchLookupSwitch.
if (!FLAG_turbo_disable_switch_jump_table) { static const size_t kMaxTableSwitchValueRange = 2 << 16;
static const size_t kMaxTableSwitchValueRange = 2 << 16; size_t table_space_cost = 9 + sw.value_range;
size_t table_space_cost = 9 + sw.value_range; size_t table_time_cost = 3;
size_t table_time_cost = 3; size_t lookup_space_cost = 2 + 2 * sw.case_count;
size_t lookup_space_cost = 2 + 2 * sw.case_count; size_t lookup_time_cost = sw.case_count;
size_t lookup_time_cost = sw.case_count; if (sw.case_count > 0 &&
if (sw.case_count > 0 && table_space_cost + 3 * table_time_cost <=
table_space_cost + 3 * table_time_cost <= lookup_space_cost + 3 * lookup_time_cost &&
lookup_space_cost + 3 * lookup_time_cost && sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.min_value > std::numeric_limits<int32_t>::min() && sw.value_range <= kMaxTableSwitchValueRange) {
sw.value_range <= kMaxTableSwitchValueRange) { InstructionOperand index_operand = value_operand;
InstructionOperand index_operand = value_operand; if (sw.min_value) {
if (sw.min_value) { index_operand = g.TempRegister();
index_operand = g.TempRegister(); Emit(kMipsSub, index_operand, value_operand,
Emit(kMipsSub, index_operand, value_operand, g.TempImmediate(sw.min_value));
g.TempImmediate(sw.min_value));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
} }
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
} }
// Generate a sequence of conditional jumps. // Generate a sequence of conditional jumps.
......
...@@ -2216,26 +2216,24 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { ...@@ -2216,26 +2216,24 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch. // Emit either ArchTableSwitch or ArchLookupSwitch.
if (!FLAG_turbo_disable_switch_jump_table) { static const size_t kMaxTableSwitchValueRange = 2 << 16;
static const size_t kMaxTableSwitchValueRange = 2 << 16; size_t table_space_cost = 10 + 2 * sw.value_range;
size_t table_space_cost = 10 + 2 * sw.value_range; size_t table_time_cost = 3;
size_t table_time_cost = 3; size_t lookup_space_cost = 2 + 2 * sw.case_count;
size_t lookup_space_cost = 2 + 2 * sw.case_count; size_t lookup_time_cost = sw.case_count;
size_t lookup_time_cost = sw.case_count; if (sw.case_count > 0 &&
if (sw.case_count > 0 && table_space_cost + 3 * table_time_cost <=
table_space_cost + 3 * table_time_cost <= lookup_space_cost + 3 * lookup_time_cost &&
lookup_space_cost + 3 * lookup_time_cost && sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.min_value > std::numeric_limits<int32_t>::min() && sw.value_range <= kMaxTableSwitchValueRange) {
sw.value_range <= kMaxTableSwitchValueRange) { InstructionOperand index_operand = value_operand;
InstructionOperand index_operand = value_operand; if (sw.min_value) {
if (sw.min_value) { index_operand = g.TempRegister();
index_operand = g.TempRegister(); Emit(kMips64Sub, index_operand, value_operand,
Emit(kMips64Sub, index_operand, value_operand, g.TempImmediate(sw.min_value));
g.TempImmediate(sw.min_value));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
} }
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
} }
// Generate a sequence of conditional jumps. // Generate a sequence of conditional jumps.
......
...@@ -1762,26 +1762,24 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { ...@@ -1762,26 +1762,24 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch. // Emit either ArchTableSwitch or ArchLookupSwitch.
if (!FLAG_turbo_disable_switch_jump_table) { static const size_t kMaxTableSwitchValueRange = 2 << 16;
static const size_t kMaxTableSwitchValueRange = 2 << 16; size_t table_space_cost = 4 + sw.value_range;
size_t table_space_cost = 4 + sw.value_range; size_t table_time_cost = 3;
size_t table_time_cost = 3; size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_space_cost = 3 + 2 * sw.case_count; size_t lookup_time_cost = sw.case_count;
size_t lookup_time_cost = sw.case_count; if (sw.case_count > 0 &&
if (sw.case_count > 0 && table_space_cost + 3 * table_time_cost <=
table_space_cost + 3 * table_time_cost <= lookup_space_cost + 3 * lookup_time_cost &&
lookup_space_cost + 3 * lookup_time_cost && sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.min_value > std::numeric_limits<int32_t>::min() && sw.value_range <= kMaxTableSwitchValueRange) {
sw.value_range <= kMaxTableSwitchValueRange) { InstructionOperand index_operand = value_operand;
InstructionOperand index_operand = value_operand; if (sw.min_value) {
if (sw.min_value) { index_operand = g.TempRegister();
index_operand = g.TempRegister(); Emit(kPPC_Sub, index_operand, value_operand,
Emit(kPPC_Sub, index_operand, value_operand, g.TempImmediate(sw.min_value));
g.TempImmediate(sw.min_value));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
} }
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
} }
// Generate a sequence of conditional jumps. // Generate a sequence of conditional jumps.
......
...@@ -2138,23 +2138,22 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { ...@@ -2138,23 +2138,22 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch. // Emit either ArchTableSwitch or ArchLookupSwitch.
if (!FLAG_turbo_disable_switch_jump_table) { static const size_t kMaxTableSwitchValueRange = 2 << 16;
static const size_t kMaxTableSwitchValueRange = 2 << 16; size_t table_space_cost = 4 + sw.value_range;
size_t table_space_cost = 4 + sw.value_range; size_t table_time_cost = 3;
size_t table_time_cost = 3; size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_space_cost = 3 + 2 * sw.case_count; size_t lookup_time_cost = sw.case_count;
size_t lookup_time_cost = sw.case_count; if (sw.case_count > 0 &&
if (sw.case_count > 0 && table_space_cost + 3 * table_time_cost <=
table_space_cost + 3 * table_time_cost <= lookup_space_cost + 3 * lookup_time_cost &&
lookup_space_cost + 3 * lookup_time_cost && sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.min_value > std::numeric_limits<int32_t>::min() && sw.value_range <= kMaxTableSwitchValueRange) {
sw.value_range <= kMaxTableSwitchValueRange) { InstructionOperand index_operand = value_operand;
InstructionOperand index_operand = value_operand; if (sw.min_value) {
if (sw.min_value) { index_operand = g.TempRegister();
index_operand = g.TempRegister(); Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand,
Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand, value_operand, g.TempImmediate(-sw.min_value));
value_operand, g.TempImmediate(-sw.min_value)); }
}
#if V8_TARGET_ARCH_S390X #if V8_TARGET_ARCH_S390X
InstructionOperand index_operand_zero_ext = g.TempRegister(); InstructionOperand index_operand_zero_ext = g.TempRegister();
Emit(kS390_Uint32ToUint64, index_operand_zero_ext, index_operand); Emit(kS390_Uint32ToUint64, index_operand_zero_ext, index_operand);
...@@ -2163,7 +2162,6 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { ...@@ -2163,7 +2162,6 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
// Generate a table lookup. // Generate a table lookup.
return EmitTableSwitch(sw, index_operand); return EmitTableSwitch(sw, index_operand);
} }
}
// Generate a sequence of conditional jumps. // Generate a sequence of conditional jumps.
return EmitLookupSwitch(sw, value_operand); return EmitLookupSwitch(sw, value_operand);
......
...@@ -1959,30 +1959,27 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { ...@@ -1959,30 +1959,27 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch. // Emit either ArchTableSwitch or ArchLookupSwitch.
if (!FLAG_turbo_disable_switch_jump_table) { static const size_t kMaxTableSwitchValueRange = 2 << 16;
static const size_t kMaxTableSwitchValueRange = 2 << 16; size_t table_space_cost = 4 + sw.value_range;
size_t table_space_cost = 4 + sw.value_range; size_t table_time_cost = 3;
size_t table_time_cost = 3; size_t lookup_space_cost = 3 + 2 * sw.case_count;
size_t lookup_space_cost = 3 + 2 * sw.case_count; size_t lookup_time_cost = sw.case_count;
size_t lookup_time_cost = sw.case_count; if (sw.case_count > 4 &&
if (sw.case_count > 4 && table_space_cost + 3 * table_time_cost <=
table_space_cost + 3 * table_time_cost <= lookup_space_cost + 3 * lookup_time_cost &&
lookup_space_cost + 3 * lookup_time_cost && sw.min_value > std::numeric_limits<int32_t>::min() &&
sw.min_value > std::numeric_limits<int32_t>::min() && sw.value_range <= kMaxTableSwitchValueRange) {
sw.value_range <= kMaxTableSwitchValueRange) { InstructionOperand index_operand = g.TempRegister();
InstructionOperand index_operand = g.TempRegister(); if (sw.min_value) {
if (sw.min_value) { // The leal automatically zero extends, so result is a valid 64-bit index.
// The leal automatically zero extends, so result is a valid 64-bit Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
// index. value_operand, g.TempImmediate(-sw.min_value));
Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand, } else {
value_operand, g.TempImmediate(-sw.min_value)); // Zero extend, because we use it as 64-bit index into the jump table.
} else { Emit(kX64Movl, index_operand, value_operand);
// Zero extend, because we use it as 64-bit index into the jump table.
Emit(kX64Movl, index_operand, value_operand);
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
} }
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
} }
// Generate a sequence of conditional jumps. // Generate a sequence of conditional jumps.
......
...@@ -490,10 +490,6 @@ DEFINE_BOOL(untrusted_code_mitigations, V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS, ...@@ -490,10 +490,6 @@ DEFINE_BOOL(untrusted_code_mitigations, V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS,
"Enable mitigations for executing untrusted code") "Enable mitigations for executing untrusted code")
#undef V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS #undef V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS
DEFINE_BOOL(turbo_disable_switch_jump_table, false,
"do not emit jump-tables in Turbofan")
DEFINE_IMPLICATION(untrusted_code_mitigations, turbo_disable_switch_jump_table)
// Flags to help platform porters // Flags to help platform porters
DEFINE_BOOL(minimal, false, DEFINE_BOOL(minimal, false,
"simplifies execution model to make porting " "simplifies execution model to make porting "
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment