Commit af8ff984 authored by Jakob Kummerow's avatar Jakob Kummerow Committed by Commit Bot

[ubsan] Fix numerical overflows in the compiler

Mostly signed integer overflows, and a few cases of double
division by zero (which is defined by IEEE-754 to return
Infinity (or NaN for 0/0) but is UB in C++).

Bug: v8:3770
Change-Id: I8007987594ff534ca697c1c3247215a72a001343
Reviewed-on: https://chromium-review.googlesource.com/c/1403132
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58693}
parent 566a885d
......@@ -3106,6 +3106,7 @@ v8_component("v8_libbase") {
"src/base/once.cc",
"src/base/once.h",
"src/base/optional.h",
"src/base/overflowing-math.h",
"src/base/page-allocator.cc",
"src/base/page-allocator.h",
"src/base/platform/condition-variable.cc",
......
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_OVERFLOWING_MATH_H_
#define V8_BASE_OVERFLOWING_MATH_H_
#include <stdint.h>
#include <cmath>
#include <type_traits>
#include "src/base/macros.h"
namespace v8 {
namespace base {
// Helpers for performing overflowing arithmetic operations without relying
// on C++ undefined behavior.
#define ASSERT_SIGNED_INTEGER_TYPE(Type) \
static_assert(std::is_integral<Type>::value && std::is_signed<Type>::value, \
"use this for signed integer types");
#define OP_WITH_WRAPAROUND(Name, OP) \
template <typename signed_type> \
inline signed_type Name##WithWraparound(signed_type a, signed_type b) { \
ASSERT_SIGNED_INTEGER_TYPE(signed_type); \
typedef typename std::make_unsigned<signed_type>::type unsigned_type; \
unsigned_type a_unsigned = static_cast<unsigned_type>(a); \
unsigned_type b_unsigned = static_cast<unsigned_type>(b); \
unsigned_type result = a_unsigned OP b_unsigned; \
return static_cast<signed_type>(result); \
}
OP_WITH_WRAPAROUND(Add, +)
OP_WITH_WRAPAROUND(Sub, -)
OP_WITH_WRAPAROUND(Mul, *)
// 16-bit integers are special due to C++'s implicit conversion rules.
// See https://bugs.llvm.org/show_bug.cgi?id=25580.
template <>
inline int16_t MulWithWraparound(int16_t a, int16_t b) {
uint32_t a_unsigned = static_cast<uint32_t>(a);
uint32_t b_unsigned = static_cast<uint32_t>(b);
uint32_t result = a_unsigned * b_unsigned;
return static_cast<int16_t>(static_cast<uint16_t>(result));
}
#undef OP_WITH_WRAPAROUND
template <typename signed_type>
inline signed_type NegateWithWraparound(signed_type a) {
ASSERT_SIGNED_INTEGER_TYPE(signed_type);
if (a == std::numeric_limits<signed_type>::min()) return a;
return -a;
}
template <typename signed_type>
inline signed_type ShlWithWraparound(signed_type a, signed_type b) {
ASSERT_SIGNED_INTEGER_TYPE(signed_type);
typedef typename std::make_unsigned<signed_type>::type unsigned_type;
const unsigned_type kMask = (sizeof(a) * 8) - 1;
return static_cast<signed_type>(static_cast<unsigned_type>(a) << (b & kMask));
}
#undef ASSERT_SIGNED_INTEGER_TYPE
// Returns the quotient x/y, avoiding C++ undefined behavior if y == 0.
template <typename T>
inline T Divide(T x, T y) {
if (y != 0) return x / y;
if (x == 0 || x != x) return std::numeric_limits<T>::quiet_NaN();
if ((x >= 0) == (std::signbit(y) == 0)) {
return std::numeric_limits<T>::infinity();
}
return -std::numeric_limits<T>::infinity();
}
inline float Recip(float a) { return Divide(1.0f, a); }
inline float RecipSqrt(float a) {
if (a != 0) return 1.0f / std::sqrt(a);
if (std::signbit(a) == 0) return std::numeric_limits<float>::infinity();
return -std::numeric_limits<float>::infinity();
}
} // namespace base
} // namespace v8
#endif // V8_BASE_OVERFLOWING_MATH_H_
......@@ -5,6 +5,7 @@
#include "src/compiler/backend/code-generator.h"
#include "src/assembler-inl.h"
#include "src/base/overflowing-math.h"
#include "src/callable.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/gap-resolver.h"
......@@ -1667,7 +1668,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (constant_summand > 0) {
__ add(i.OutputRegister(), Immediate(constant_summand));
} else if (constant_summand < 0) {
__ sub(i.OutputRegister(), Immediate(-constant_summand));
__ sub(i.OutputRegister(),
Immediate(base::NegateWithWraparound(constant_summand)));
}
} else if (mode == kMode_MR1) {
if (i.InputRegister(1) == i.OutputRegister()) {
......
......@@ -6,6 +6,7 @@
#include <limits>
#include "src/base/overflowing-math.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
......@@ -2046,7 +2047,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (constant_summand > 0) {
__ addl(i.OutputRegister(), Immediate(constant_summand));
} else {
__ subl(i.OutputRegister(), Immediate(-constant_summand));
__ subl(i.OutputRegister(),
Immediate(base::NegateWithWraparound(constant_summand)));
}
} else if (mode == kMode_MR1) {
if (i.InputRegister(1) == i.OutputRegister()) {
......
......@@ -5,6 +5,7 @@
#include <algorithm>
#include "src/base/adapters.h"
#include "src/base/overflowing-math.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
......@@ -939,7 +940,7 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
// by negating the value.
Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(-m.right().Value()));
g.TempImmediate(base::NegateWithWraparound(m.right().Value())));
} else {
VisitBinop(this, node, kX64Sub32);
}
......
......@@ -7,6 +7,7 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/base/ieee754.h"
#include "src/base/overflowing-math.h"
#include "src/compiler/diamond.h"
#include "src/compiler/graph.h"
#include "src/compiler/machine-graph.h"
......@@ -220,7 +221,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.right().Is(0)) return Replace(m.right().node()); // x * 0 => 0
if (m.right().Is(1)) return Replace(m.left().node()); // x * 1 => x
if (m.IsFoldable()) { // K * K => K
return ReplaceInt32(m.left().Value() * m.right().Value());
return ReplaceInt32(
base::MulWithWraparound(m.left().Value(), m.right().Value()));
}
if (m.right().Is(-1)) { // x * -1 => 0 - x
node->ReplaceInput(0, Int32Constant(0));
......@@ -435,7 +437,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceFloat64(m.left().Value() - m.left().Value());
}
if (m.IsFoldable()) { // K / K => K
return ReplaceFloat64(m.left().Value() / m.right().Value());
return ReplaceFloat64(
base::Divide(m.left().Value(), m.right().Value()));
}
if (allow_signalling_nan_ && m.right().Is(-1)) { // x / -1.0 => -x
node->RemoveInput(1);
......@@ -724,8 +727,8 @@ Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x + 0 => x
if (m.IsFoldable()) { // K + K => K
return ReplaceUint32(bit_cast<uint32_t>(m.left().Value()) +
bit_cast<uint32_t>(m.right().Value()));
return ReplaceInt32(
base::AddWithWraparound(m.left().Value(), m.right().Value()));
}
if (m.left().IsInt32Sub()) {
Int32BinopMatcher mleft(m.left().node());
......@@ -754,8 +757,8 @@ Reduction MachineOperatorReducer::ReduceInt64Add(Node* node) {
Int64BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x + 0 => 0
if (m.IsFoldable()) {
return Replace(Uint64Constant(bit_cast<uint64_t>(m.left().Value()) +
bit_cast<uint64_t>(m.right().Value())));
return ReplaceInt64(
base::AddWithWraparound(m.left().Value(), m.right().Value()));
}
return NoChange();
}
......@@ -765,12 +768,13 @@ Reduction MachineOperatorReducer::ReduceInt32Sub(Node* node) {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x - 0 => x
if (m.IsFoldable()) { // K - K => K
return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) -
static_cast<uint32_t>(m.right().Value()));
return ReplaceInt32(
base::SubWithWraparound(m.left().Value(), m.right().Value()));
}
if (m.LeftEqualsRight()) return ReplaceInt32(0); // x - x => 0
if (m.right().HasValue()) { // x - K => x + -K
node->ReplaceInput(1, Int32Constant(-m.right().Value()));
node->ReplaceInput(
1, Int32Constant(base::NegateWithWraparound(m.right().Value())));
NodeProperties::ChangeOp(node, machine()->Int32Add());
Reduction const reduction = ReduceInt32Add(node);
return reduction.Changed() ? reduction : Changed(node);
......@@ -783,12 +787,13 @@ Reduction MachineOperatorReducer::ReduceInt64Sub(Node* node) {
Int64BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x - 0 => x
if (m.IsFoldable()) { // K - K => K
return Replace(Uint64Constant(bit_cast<uint64_t>(m.left().Value()) -
bit_cast<uint64_t>(m.right().Value())));
return ReplaceInt64(
base::SubWithWraparound(m.left().Value(), m.right().Value()));
}
if (m.LeftEqualsRight()) return Replace(Int64Constant(0)); // x - x => 0
if (m.right().HasValue()) { // x - K => x + -K
node->ReplaceInput(1, Int64Constant(-m.right().Value()));
node->ReplaceInput(
1, Int64Constant(base::NegateWithWraparound(m.right().Value())));
NodeProperties::ChangeOp(node, machine()->Int64Add());
Reduction const reduction = ReduceInt64Add(node);
return reduction.Changed() ? reduction : Changed(node);
......@@ -1195,6 +1200,7 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
}
if (m.right().IsNegativePowerOf2()) {
int32_t const mask = m.right().Value();
int32_t const neg_mask = base::NegateWithWraparound(mask);
if (m.left().IsWord32Shl()) {
Uint32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() &&
......@@ -1216,7 +1222,7 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
}
if (mleft.left().IsInt32Mul()) {
Int32BinopMatcher mleftleft(mleft.left().node());
if (mleftleft.right().IsMultipleOf(-mask)) {
if (mleftleft.right().IsMultipleOf(neg_mask)) {
// (y * (K << L) + x) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
node->ReplaceInput(0,
Word32And(mleft.right().node(), m.right().node()));
......@@ -1228,7 +1234,7 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
}
if (mleft.right().IsInt32Mul()) {
Int32BinopMatcher mleftright(mleft.right().node());
if (mleftright.right().IsMultipleOf(-mask)) {
if (mleftright.right().IsMultipleOf(neg_mask)) {
// (x + y * (K << L)) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
node->ReplaceInput(0,
Word32And(mleft.left().node(), m.right().node()));
......@@ -1264,7 +1270,7 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
}
} else if (m.left().IsInt32Mul()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().IsMultipleOf(-mask)) {
if (mleft.right().IsMultipleOf(neg_mask)) {
// (x * (K << L)) & (-1 << L) => x * (K << L)
return Replace(mleft.node());
}
......
......@@ -131,7 +131,8 @@ struct IntMatcher final : public ValueMatcher<T, kOpcode> {
}
bool IsNegativePowerOf2() const {
return this->HasValue() && this->Value() < 0 &&
(-this->Value() & (-this->Value() - 1)) == 0;
((this->Value() == kMinInt) ||
(-this->Value() & (-this->Value() - 1)) == 0);
}
bool IsNegative() const { return this->HasValue() && this->Value() < 0; }
};
......
......@@ -2848,7 +2848,7 @@ Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
Int32Matcher m(right);
if (m.HasValue()) {
return Binop(wasm::kExprI32Ror, left,
mcgraph()->Int32Constant(32 - m.Value()));
mcgraph()->Int32Constant(32 - (m.Value() & 0x1F)));
} else {
return Binop(wasm::kExprI32Ror, left,
Binop(wasm::kExprI32Sub, mcgraph()->Int32Constant(32), right));
......@@ -2861,7 +2861,7 @@ Node* WasmGraphBuilder::BuildI64Rol(Node* left, Node* right) {
Int64Matcher m(right);
if (m.HasValue()) {
return Binop(wasm::kExprI64Ror, left,
mcgraph()->Int64Constant(64 - m.Value()));
mcgraph()->Int64Constant(64 - (m.Value() & 0x3F)));
} else {
return Binop(wasm::kExprI64Ror, left,
Binop(wasm::kExprI64Sub, mcgraph()->Int64Constant(64), right));
......
......@@ -3,6 +3,8 @@
// found in the LICENSE file.
#include "test/cctest/compiler/codegen-tester.h"
#include "src/base/overflowing-math.h"
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
......@@ -419,11 +421,12 @@ TEST(RunInt32Constants) {
TEST(RunSmiConstants) {
for (int32_t i = 1; i < Smi::kMaxValue && i != 0; i = i << 1) {
for (int32_t i = 1; i < Smi::kMaxValue && i != 0;
i = base::ShlWithWraparound(i, 1)) {
RunSmiConstant(i);
RunSmiConstant(3 * i);
RunSmiConstant(5 * i);
RunSmiConstant(-i);
RunSmiConstant(base::MulWithWraparound(3, i));
RunSmiConstant(base::MulWithWraparound(5, i));
RunSmiConstant(base::NegateWithWraparound(i));
RunSmiConstant(i | 1);
RunSmiConstant(i | 3);
}
......@@ -444,9 +447,10 @@ TEST(RunNumberConstants) {
FOR_INT32_INPUTS(i) { RunNumberConstant(*i); }
}
for (int32_t i = 1; i < Smi::kMaxValue && i != 0; i = i << 1) {
for (int32_t i = 1; i < Smi::kMaxValue && i != 0;
i = base::ShlWithWraparound(i, 1)) {
RunNumberConstant(i);
RunNumberConstant(-i);
RunNumberConstant(base::NegateWithWraparound(i));
RunNumberConstant(i | 1);
RunNumberConstant(i | 3);
}
......@@ -576,6 +580,20 @@ TEST(RunBinopTester) {
#if V8_TARGET_ARCH_64_BIT
// TODO(ahaas): run int64 tests on all platforms when supported.
namespace {
int64_t Add4(int64_t a, int64_t b, int64_t c, int64_t d) {
// Operate on uint64_t values to avoid undefined behavior.
return static_cast<int64_t>(
static_cast<uint64_t>(a) + static_cast<uint64_t>(b) +
static_cast<uint64_t>(c) + static_cast<uint64_t>(d));
}
int64_t Add3(int64_t a, int64_t b, int64_t c) { return Add4(a, b, c, 0); }
} // namespace
TEST(RunBufferedRawMachineAssemblerTesterTester) {
{
BufferedRawMachineAssemblerTester<int64_t> m;
......@@ -593,8 +611,8 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
m.Return(m.Int64Add(m.Parameter(0), m.Parameter(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
CHECK_EQ(*i + *j, m.Call(*i, *j));
CHECK_EQ(*j + *i, m.Call(*j, *i));
CHECK_EQ(base::AddWithWraparound(*i, *j), m.Call(*i, *j));
CHECK_EQ(base::AddWithWraparound(*j, *i), m.Call(*j, *i));
}
}
}
......@@ -605,9 +623,9 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
m.Int64Add(m.Int64Add(m.Parameter(0), m.Parameter(1)), m.Parameter(2)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
CHECK_EQ(*i + *i + *j, m.Call(*i, *i, *j));
CHECK_EQ(*i + *j + *i, m.Call(*i, *j, *i));
CHECK_EQ(*j + *i + *i, m.Call(*j, *i, *i));
CHECK_EQ(Add3(*i, *i, *j), m.Call(*i, *i, *j));
CHECK_EQ(Add3(*i, *j, *i), m.Call(*i, *j, *i));
CHECK_EQ(Add3(*j, *i, *i), m.Call(*j, *i, *i));
}
}
}
......@@ -620,10 +638,10 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
m.Parameter(3)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
CHECK_EQ(*i + *i + *i + *j, m.Call(*i, *i, *i, *j));
CHECK_EQ(*i + *i + *j + *i, m.Call(*i, *i, *j, *i));
CHECK_EQ(*i + *j + *i + *i, m.Call(*i, *j, *i, *i));
CHECK_EQ(*j + *i + *i + *i, m.Call(*j, *i, *i, *i));
CHECK_EQ(Add4(*i, *i, *i, *j), m.Call(*i, *i, *i, *j));
CHECK_EQ(Add4(*i, *i, *j, *i), m.Call(*i, *i, *j, *i));
CHECK_EQ(Add4(*i, *j, *i, *i), m.Call(*i, *j, *i, *i));
CHECK_EQ(Add4(*j, *i, *i, *i), m.Call(*j, *i, *i, *i));
}
}
}
......@@ -659,10 +677,10 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
m.Call(*i, *j);
CHECK_EQ(*i + *j, result);
CHECK_EQ(base::AddWithWraparound(*i, *j), result);
m.Call(*j, *i);
CHECK_EQ(*j + *i, result);
CHECK_EQ(base::AddWithWraparound(*j, *i), result);
}
}
}
......@@ -678,13 +696,13 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
m.Call(*i, *i, *j);
CHECK_EQ(*i + *i + *j, result);
CHECK_EQ(Add3(*i, *i, *j), result);
m.Call(*i, *j, *i);
CHECK_EQ(*i + *j + *i, result);
CHECK_EQ(Add3(*i, *j, *i), result);
m.Call(*j, *i, *i);
CHECK_EQ(*j + *i + *i, result);
CHECK_EQ(Add3(*j, *i, *i), result);
}
}
}
......@@ -703,16 +721,16 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
m.Call(*i, *i, *i, *j);
CHECK_EQ(*i + *i + *i + *j, result);
CHECK_EQ(Add4(*i, *i, *i, *j), result);
m.Call(*i, *i, *j, *i);
CHECK_EQ(*i + *i + *j + *i, result);
CHECK_EQ(Add4(*i, *i, *j, *i), result);
m.Call(*i, *j, *i, *i);
CHECK_EQ(*i + *j + *i + *i, result);
CHECK_EQ(Add4(*i, *j, *i, *i), result);
m.Call(*j, *i, *i, *i);
CHECK_EQ(*j + *i + *i + *i, result);
CHECK_EQ(Add4(*j, *i, *i, *i), result);
}
}
}
......
......@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/overflowing-math.h"
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
......@@ -501,7 +502,8 @@ TEST(BranchCombineInt32AddLessThanZero) {
FOR_INT32_INPUTS(j) {
int32_t a = *i;
int32_t b = *j;
int32_t expect = (a + b < 0) ? t_constant : f_constant;
int32_t expect =
(base::AddWithWraparound(a, b) < 0) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
}
......@@ -529,7 +531,8 @@ TEST(BranchCombineInt32AddGreaterThanOrEqualZero) {
FOR_INT32_INPUTS(j) {
int32_t a = *i;
int32_t b = *j;
int32_t expect = (a + b >= 0) ? t_constant : f_constant;
int32_t expect =
(base::AddWithWraparound(a, b) >= 0) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
}
......@@ -557,7 +560,8 @@ TEST(BranchCombineInt32ZeroGreaterThanAdd) {
FOR_INT32_INPUTS(j) {
int32_t a = *i;
int32_t b = *j;
int32_t expect = (0 > a + b) ? t_constant : f_constant;
int32_t expect =
(0 > base::AddWithWraparound(a, b)) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
}
......@@ -585,7 +589,8 @@ TEST(BranchCombineInt32ZeroLessThanOrEqualAdd) {
FOR_INT32_INPUTS(j) {
int32_t a = *i;
int32_t b = *j;
int32_t expect = (0 <= a + b) ? t_constant : f_constant;
int32_t expect =
(0 <= base::AddWithWraparound(a, b)) ? t_constant : f_constant;
CHECK_EQ(expect, m.Call(a, b));
}
}
......@@ -609,8 +614,8 @@ TEST(BranchCombineUint32AddLessThanOrEqualZero) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t a = *i;
uint32_t b = *j;
int32_t expect = (a + b <= 0) ? t_constant : f_constant;
......@@ -637,8 +642,8 @@ TEST(BranchCombineUint32AddGreaterThanZero) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t a = *i;
uint32_t b = *j;
int32_t expect = (a + b > 0) ? t_constant : f_constant;
......@@ -665,8 +670,8 @@ TEST(BranchCombineUint32ZeroGreaterThanOrEqualAdd) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t a = *i;
uint32_t b = *j;
int32_t expect = (0 >= a + b) ? t_constant : f_constant;
......@@ -693,8 +698,8 @@ TEST(BranchCombineUint32ZeroLessThanAdd) {
m.Bind(&blockb);
m.Return(m.Int32Constant(f_constant));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t a = *i;
uint32_t b = *j;
int32_t expect = (0 < a + b) ? t_constant : f_constant;
......
......@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/overflowing-math.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen.h"
#include "src/compiler/js-graph.h"
......@@ -505,7 +506,7 @@ TEST(ReduceInt32Add) {
FOR_INT32_INPUTS(pl) {
FOR_INT32_INPUTS(pr) {
int32_t x = *pl, y = *pr;
R.CheckFoldBinop<int32_t>(x + y, x, y); // TODO(titzer): signed overflow
R.CheckFoldBinop<int32_t>(base::AddWithWraparound(x, y), x, y);
}
}
......@@ -526,7 +527,7 @@ TEST(ReduceInt64Add) {
FOR_INT64_INPUTS(pl) {
FOR_INT64_INPUTS(pr) {
int64_t x = *pl, y = *pr;
R.CheckFoldBinop<int64_t>(x + y, x, y);
R.CheckFoldBinop<int64_t>(base::AddWithWraparound(x, y), x, y);
}
}
......@@ -545,7 +546,7 @@ TEST(ReduceInt32Sub) {
FOR_INT32_INPUTS(pl) {
FOR_INT32_INPUTS(pr) {
int32_t x = *pl, y = *pr;
R.CheckFoldBinop<int32_t>(x - y, x, y);
R.CheckFoldBinop<int32_t>(base::SubWithWraparound(x, y), x, y);
}
}
......@@ -564,7 +565,7 @@ TEST(ReduceInt64Sub) {
FOR_INT64_INPUTS(pl) {
FOR_INT64_INPUTS(pr) {
int64_t x = *pl, y = *pr;
R.CheckFoldBinop<int64_t>(x - y, x, y);
R.CheckFoldBinop<int64_t>(base::SubWithWraparound(x, y), x, y);
}
}
......@@ -589,7 +590,7 @@ TEST(ReduceInt32Mul) {
FOR_INT32_INPUTS(pl) {
FOR_INT32_INPUTS(pr) {
int32_t x = *pl, y = *pr;
R.CheckFoldBinop<int32_t>(x * y, x, y); // TODO(titzer): signed overflow
R.CheckFoldBinop<int32_t>(base::MulWithWraparound(x, y), x, y);
}
}
......@@ -628,7 +629,8 @@ TEST(ReduceInt32Div) {
FOR_INT32_INPUTS(pr) {
int32_t x = *pl, y = *pr;
if (y == 0) continue; // TODO(titzer): test / 0
int32_t r = y == -1 ? -x : x / y; // INT_MIN / -1 may explode in C
int32_t r = y == -1 ? base::NegateWithWraparound(x)
: x / y; // INT_MIN / -1 may explode in C
R.CheckFoldBinop<int32_t>(r, x, y);
}
}
......
......@@ -7,6 +7,7 @@
#include <limits>
#include "src/base/bits.h"
#include "src/base/overflowing-math.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen.h"
#include "src/objects-inl.h"
......@@ -90,7 +91,8 @@ void RunLoadStoreFloat32Offset(TestAlignment t) {
float p2 = 0.0f; // and stores directly into this location.
FOR_INT32_INPUTS(i) {
int32_t magic = 0x2342AABB + *i * 3;
int32_t magic =
base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(*i, 3));
RawMachineAssemblerTester<int32_t> m;
int32_t offset = *i;
byte* from = reinterpret_cast<byte*>(&p1) - offset;
......@@ -127,7 +129,8 @@ void RunLoadStoreFloat64Offset(TestAlignment t) {
double p2 = 0; // and stores directly into this location.
FOR_INT32_INPUTS(i) {
int32_t magic = 0x2342AABB + *i * 3;
int32_t magic =
base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(*i, 3));
RawMachineAssemblerTester<int32_t> m;
int32_t offset = *i;
byte* from = reinterpret_cast<byte*>(&p1) - offset;
......
......@@ -8,6 +8,7 @@
#include "src/base/bits.h"
#include "src/base/ieee754.h"
#include "src/base/overflowing-math.h"
#include "src/base/utils/random-number-generator.h"
#include "src/boxed-float.h"
#include "src/codegen.h"
......@@ -2058,7 +2059,7 @@ TEST(RunInt32MulP) {
bt.AddReturn(m.Int32Mul(bt.param0, bt.param1));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
int expected = static_cast<int32_t>(*i * *j);
int expected = base::MulWithWraparound(*i, *j);
CHECK_EQ(expected, bt.call(*i, *j));
}
}
......@@ -2125,7 +2126,8 @@ TEST(RunInt32MulAndInt32AddP) {
m.Int32Mul(m.Parameter(0), m.Int32Constant(p1))));
FOR_INT32_INPUTS(k) {
int32_t p2 = *k;
int expected = p0 + static_cast<int32_t>(p1 * p2);
int expected =
base::AddWithWraparound(p0, base::MulWithWraparound(p1, p2));
CHECK_EQ(expected, m.Call(p2));
}
}
......@@ -2142,7 +2144,8 @@ TEST(RunInt32MulAndInt32AddP) {
int32_t p0 = *i;
int32_t p1 = *j;
int32_t p2 = *k;
int expected = p0 + static_cast<int32_t>(p1 * p2);
int expected =
base::AddWithWraparound(p0, base::MulWithWraparound(p1, p2));
CHECK_EQ(expected, m.Call(p0, p1, p2));
}
}
......@@ -2159,7 +2162,8 @@ TEST(RunInt32MulAndInt32AddP) {
int32_t p0 = *i;
int32_t p1 = *j;
int32_t p2 = *k;
int expected = static_cast<int32_t>(p0 * p1) + p2;
int expected =
base::AddWithWraparound(base::MulWithWraparound(p0, p1), p2);
CHECK_EQ(expected, m.Call(p0, p1, p2));
}
}
......@@ -2175,7 +2179,8 @@ TEST(RunInt32MulAndInt32AddP) {
FOR_INT32_INPUTS(k) {
int32_t p0 = *j;
int32_t p1 = *k;
int expected = *i + static_cast<int32_t>(p0 * p1);
int expected =
base::AddWithWraparound(*i, base::MulWithWraparound(p0, p1));
CHECK_EQ(expected, bt.call(p0, p1));
}
}
......@@ -2187,24 +2192,24 @@ TEST(RunInt32MulAndInt32AddP) {
TEST(RunInt32MulAndInt32SubP) {
{
RawMachineAssemblerTester<int32_t> m(
MachineType::Uint32(), MachineType::Int32(), MachineType::Int32());
MachineType::Int32(), MachineType::Int32(), MachineType::Int32());
m.Return(
m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
FOR_INT32_INPUTS(k) {
uint32_t p0 = *i;
int32_t p0 = *i;
int32_t p1 = *j;
int32_t p2 = *k;
// Use uint32_t because signed overflow is UB in C.
int expected = p0 - static_cast<uint32_t>(p1 * p2);
int expected =
base::SubWithWraparound(p0, base::MulWithWraparound(p1, p2));
CHECK_EQ(expected, m.Call(p0, p1, p2));
}
}
}
}
{
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(i) {
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
bt.AddReturn(
......@@ -2213,8 +2218,8 @@ TEST(RunInt32MulAndInt32SubP) {
FOR_INT32_INPUTS(k) {
int32_t p0 = *j;
int32_t p1 = *k;
// Use uint32_t because signed overflow is UB in C.
int expected = *i - static_cast<uint32_t>(p0 * p1);
int expected =
base::SubWithWraparound(*i, base::MulWithWraparound(p0, p1));
CHECK_EQ(expected, bt.call(p0, p1));
}
}
......@@ -2262,7 +2267,8 @@ TEST(RunInt32DivP) {
int p0 = *i;
int p1 = *j;
if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
int expected = static_cast<int32_t>(p0 + (p0 / p1));
int expected =
static_cast<int32_t>(base::AddWithWraparound(p0, (p0 / p1)));
CHECK_EQ(expected, bt.call(p0, p1));
}
}
......@@ -2330,7 +2336,8 @@ TEST(RunInt32ModP) {
int p0 = *i;
int p1 = *j;
if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
int expected = static_cast<int32_t>(p0 + (p0 % p1));
int expected =
static_cast<int32_t>(base::AddWithWraparound(p0, (p0 % p1)));
CHECK_EQ(expected, bt.call(p0, p1));
}
}
......@@ -3463,7 +3470,7 @@ TEST(RunInt32NegP) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
m.Return(m.Int32Neg(m.Parameter(0)));
FOR_INT32_INPUTS(i) {
int expected = -*i;
int expected = base::NegateWithWraparound(*i);
CHECK_EQ(expected, m.Call(*i));
}
}
......@@ -3676,7 +3683,9 @@ TEST(RunFloat32Div) {
m.Return(m.Float32Div(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT32_INPUTS(i) {
FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(*i / *j, m.Call(*i, *j)); }
FOR_FLOAT32_INPUTS(j) {
CHECK_FLOAT_EQ(base::Divide(*i, *j), m.Call(*i, *j));
}
}
}
......@@ -3725,7 +3734,9 @@ TEST(RunFloat64Div) {
m.Return(m.Float64Div(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(*i / *j, m.Call(*i, *j)); }
FOR_FLOAT64_INPUTS(j) {
CHECK_DOUBLE_EQ(base::Divide(*i, *j), m.Call(*i, *j));
}
}
}
......@@ -4056,7 +4067,9 @@ TEST(RunFloat32DivP) {
bt.AddReturn(m.Float32Div(bt.param0, bt.param1));
FOR_FLOAT32_INPUTS(pl) {
FOR_FLOAT32_INPUTS(pr) { CHECK_FLOAT_EQ(*pl / *pr, bt.call(*pl, *pr)); }
FOR_FLOAT32_INPUTS(pr) {
CHECK_FLOAT_EQ(base::Divide(*pl, *pr), bt.call(*pl, *pr));
}
}
}
......@@ -4068,7 +4081,9 @@ TEST(RunFloat64DivP) {
bt.AddReturn(m.Float64Div(bt.param0, bt.param1));
FOR_FLOAT64_INPUTS(pl) {
FOR_FLOAT64_INPUTS(pr) { CHECK_DOUBLE_EQ(*pl / *pr, bt.call(*pl, *pr)); }
FOR_FLOAT64_INPUTS(pr) {
CHECK_DOUBLE_EQ(base::Divide(*pl, *pr), bt.call(*pl, *pr));
}
}
}
......@@ -5250,7 +5265,7 @@ TEST(RunSpillConstantsAndParameters) {
Node* accs[kInputSize];
Node* acc = m.Int32Constant(0);
for (int i = 0; i < kInputSize; i++) {
csts[i] = m.Int32Constant(static_cast<int32_t>(kBase + i));
csts[i] = m.Int32Constant(base::AddWithWraparound(kBase, i));
}
for (int i = 0; i < kInputSize; i++) {
acc = m.Int32Add(acc, csts[i]);
......@@ -5262,9 +5277,9 @@ TEST(RunSpillConstantsAndParameters) {
m.Return(m.Int32Add(acc, m.Int32Add(m.Parameter(0), m.Parameter(1))));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
int32_t expected = *i + *j;
int32_t expected = base::AddWithWraparound(*i, *j);
for (int k = 0; k < kInputSize; k++) {
expected += kBase + k;
expected = base::AddWithWraparound(expected, kBase + k);
}
CHECK_EQ(expected, m.Call(*i, *j));
expected = 0;
......@@ -6238,17 +6253,15 @@ int32_t foo0() { return kMagicFoo0; }
int32_t foo1(int32_t x) { return x; }
int32_t foo2(int32_t x, int32_t y) { return base::SubWithWraparound(x, y); }
int32_t foo2(int32_t x, int32_t y) { return x - y; }
int32_t foo8(int32_t a, int32_t b, int32_t c, int32_t d, int32_t e, int32_t f,
int32_t g, int32_t h) {
uint32_t foo8(uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e,
uint32_t f, uint32_t g, uint32_t h) {
return a + b + c + d + e + f + g + h;
}
int32_t foo9(int32_t a, int32_t b, int32_t c, int32_t d, int32_t e, int32_t f,
int32_t g, int32_t h, int32_t i) {
uint32_t foo9(uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e,
uint32_t f, uint32_t g, uint32_t h, uint32_t i) {
return a + b + c + d + e + f + g + h + i;
}
......@@ -6289,7 +6302,7 @@ TEST(RunCallCFunction2) {
int32_t const x = *i;
FOR_INT32_INPUTS(j) {
int32_t const y = *j;
CHECK_EQ(x - y, m.Call(x, y));
CHECK_EQ(base::SubWithWraparound(x, y), m.Call(x, y));
}
}
}
......@@ -6307,7 +6320,7 @@ TEST(RunCallCFunction8) {
function, param, param, param, param, param, param, param, param));
FOR_INT32_INPUTS(i) {
int32_t const x = *i;
CHECK_EQ(x * 8, m.Call(x));
CHECK_EQ(base::MulWithWraparound(x, 8), m.Call(x));
}
}
......@@ -6331,7 +6344,8 @@ TEST(RunCallCFunction9) {
m.Int32Add(param, m.Int32Constant(8))));
FOR_INT32_INPUTS(i) {
int32_t const x = *i;
CHECK_EQ(x * 9 + 36, m.Call(x));
CHECK_EQ(base::AddWithWraparound(base::MulWithWraparound(x, 9), 36),
m.Call(x));
}
}
#endif // USE_SIMULATOR
......
......@@ -5,6 +5,7 @@
#include <vector>
#include "src/assembler.h"
#include "src/base/overflowing-math.h"
#include "src/codegen.h"
#include "src/compiler/linkage.h"
#include "src/compiler/raw-machine-assembler.h"
......@@ -1101,7 +1102,8 @@ void MixedParamTest(int start) {
CHECK_NOT_NULL(konst);
inputs[input_count++] = konst;
constant += 0x1010101010101010;
const int64_t kIncrement = 0x1010101010101010;
constant = base::AddWithWraparound(constant, kIncrement);
}
Node* call = raw.CallN(desc, input_count, inputs);
......
......@@ -6,6 +6,7 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/base/ieee754.h"
#include "src/base/overflowing-math.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/typer.h"
#include "src/conversions-inl.h"
......@@ -978,7 +979,8 @@ TEST_F(MachineOperatorReducerTest, Word32SarWithWord32ShlAndLoad) {
TEST_F(MachineOperatorReducerTest, Word32ShrWithWord32And) {
Node* const p0 = Parameter(0);
TRACED_FORRANGE(int32_t, shift, 1, 31) {
uint32_t mask = (1 << shift) - 1;
uint32_t mask =
base::SubWithWraparound(base::ShlWithWraparound(1, shift), 1);
Node* node = graph()->NewNode(
machine()->Word32Shr(),
graph()->NewNode(machine()->Word32And(), p0, Int32Constant(mask)),
......@@ -1067,7 +1069,9 @@ TEST_F(MachineOperatorReducerTest, Int32SubWithConstant) {
if (k == 0) {
EXPECT_EQ(p0, r.replacement());
} else {
EXPECT_THAT(r.replacement(), IsInt32Add(p0, IsInt32Constant(-k)));
EXPECT_THAT(
r.replacement(),
IsInt32Add(p0, IsInt32Constant(base::NegateWithWraparound(k))));
}
}
}
......
......@@ -83,19 +83,19 @@ TEST(PersistentMap, Zip) {
// Provoke hash collisions to stress the iterator.
struct bad_hash {
size_t operator()(int key) {
size_t operator()(uint32_t key) {
return base::hash_value(static_cast<size_t>(key) % 1000);
}
};
PersistentMap<int, int, bad_hash> a(&zone);
PersistentMap<int, int, bad_hash> b(&zone);
PersistentMap<int, uint32_t, bad_hash> a(&zone);
PersistentMap<int, uint32_t, bad_hash> b(&zone);
int sum_a = 0;
int sum_b = 0;
uint32_t sum_a = 0;
uint32_t sum_b = 0;
for (int i = 0; i < 30000; ++i) {
int key = small_big_distr(&rand);
int value = small_big_distr(&rand);
uint32_t value = small_big_distr(&rand);
if (rand.NextBool()) {
sum_a += value;
a.Set(key, a.Get(key) + value);
......@@ -105,28 +105,28 @@ TEST(PersistentMap, Zip) {
}
}
int sum = sum_a + sum_b;
uint32_t sum = sum_a + sum_b;
for (auto pair : a) {
sum_a -= pair.second;
}
ASSERT_EQ(0, sum_a);
ASSERT_EQ(0u, sum_a);
for (auto pair : b) {
sum_b -= pair.second;
}
ASSERT_EQ(0, sum_b);
ASSERT_EQ(0u, sum_b);
for (auto triple : a.Zip(b)) {
int key = std::get<0>(triple);
int value_a = std::get<1>(triple);
int value_b = std::get<2>(triple);
uint32_t value_a = std::get<1>(triple);
uint32_t value_b = std::get<2>(triple);
ASSERT_EQ(value_a, a.Get(key));
ASSERT_EQ(value_b, b.Get(key));
sum -= value_a;
sum -= value_b;
}
ASSERT_EQ(0, sum);
ASSERT_EQ(0u, sum);
}
} // namespace compiler
......
......@@ -4,6 +4,7 @@
#include <functional>
#include "src/base/overflowing-math.h"
#include "src/codegen.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-properties.h"
......@@ -308,6 +309,7 @@ int32_t shift_right(int32_t x, int32_t y) { return x >> (y & 0x1F); }
int32_t bit_or(int32_t x, int32_t y) { return x | y; }
int32_t bit_and(int32_t x, int32_t y) { return x & y; }
int32_t bit_xor(int32_t x, int32_t y) { return x ^ y; }
double divide_double_double(double x, double y) { return base::Divide(x, y); }
double modulo_double_double(double x, double y) { return Modulo(x, y); }
} // namespace
......@@ -332,7 +334,7 @@ TEST_F(TyperTest, TypeJSMultiply) {
}
TEST_F(TyperTest, TypeJSDivide) {
TestBinaryArithOp(javascript_.Divide(), std::divides<double>());
TestBinaryArithOp(javascript_.Divide(), divide_double_double);
}
TEST_F(TyperTest, TypeJSModulus) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment