Commit 5736eb0c authored by chunyang.dai's avatar chunyang.dai Committed by Commit bot

X87: Change the test case for X87 float operations

       The CL https://codereview.chromium.org/1409013004 added / changed some test cases.
       Some new test cases use CheckFloatEq(...) and CheckDoubleEq(...) function for result
       check. When GCC compiling the CheckFloatEq() and CheckDoubleEq() function, those inlined
       functions has different behavior comparing with GCC ia32 build and x87 build. The major
       difference is sse float register still has single precision rounding semantic. While X87
       register has no such rounding precsion semantic when directly use register value. The V8
       turbofan JITTed has exactly same result in both X87 and IA32 port.

       So we add the following sentence to do type case to keep the same precision.
          float expect = *i * *j; // *i + *j, etc.

       For test case "RunFloat64MulAndFloat64Add1 / RunFloat64MulAndFloat64Add2 / RunFloat64MulAndFloat64Sub1
       / RunFloat64MulAndFloat64Sub2", the expected result calcaulated by GCC has difference precsion
       when comparing with V8 turbofan result for X87 platform. (Turbofan X87 result is the same as
       IA32 GCC and IA32 Turbofan). So we have to disable those four cases for X87 port.

BUG=

Review URL: https://codereview.chromium.org/1430943002

Cr-Commit-Position: refs/heads/master@{#31808}
parent 0bc51ef6
...@@ -322,6 +322,10 @@ ...@@ -322,6 +322,10 @@
'test-run-native-calls/MixedParams_1': [SKIP], 'test-run-native-calls/MixedParams_1': [SKIP],
'test-run-native-calls/MixedParams_2': [SKIP], 'test-run-native-calls/MixedParams_2': [SKIP],
'test-run-native-calls/MixedParams_3': [SKIP], 'test-run-native-calls/MixedParams_3': [SKIP],
'test-run-machops/RunFloat64MulAndFloat64Add1': [SKIP],
'test-run-machops/RunFloat64MulAndFloat64Add2': [SKIP],
'test-run-machops/RunFloat64MulAndFloat64Sub1': [SKIP],
'test-run-machops/RunFloat64MulAndFloat64Sub2': [SKIP],
}], # 'arch == x87' }], # 'arch == x87'
############################################################################## ##############################################################################
......
...@@ -3242,7 +3242,10 @@ TEST(RunFloat32Add) { ...@@ -3242,7 +3242,10 @@ TEST(RunFloat32Add) {
m.Return(m.Float32Add(m.Parameter(0), m.Parameter(1))); m.Return(m.Float32Add(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT32_INPUTS(i) { FOR_FLOAT32_INPUTS(i) {
FOR_FLOAT32_INPUTS(j) { CheckFloatEq(*i + *j, m.Call(*i, *j)); } FOR_FLOAT32_INPUTS(j) {
volatile float expected = *i + *j;
CheckFloatEq(expected, m.Call(*i, *j));
}
} }
} }
...@@ -3252,7 +3255,10 @@ TEST(RunFloat32Sub) { ...@@ -3252,7 +3255,10 @@ TEST(RunFloat32Sub) {
m.Return(m.Float32Sub(m.Parameter(0), m.Parameter(1))); m.Return(m.Float32Sub(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT32_INPUTS(i) { FOR_FLOAT32_INPUTS(i) {
FOR_FLOAT32_INPUTS(j) { CheckFloatEq(*i - *j, m.Call(*i, *j)); } FOR_FLOAT32_INPUTS(j) {
volatile float expected = *i - *j;
CheckFloatEq(expected, m.Call(*i, *j));
}
} }
} }
...@@ -3262,7 +3268,10 @@ TEST(RunFloat32Mul) { ...@@ -3262,7 +3268,10 @@ TEST(RunFloat32Mul) {
m.Return(m.Float32Mul(m.Parameter(0), m.Parameter(1))); m.Return(m.Float32Mul(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT32_INPUTS(i) { FOR_FLOAT32_INPUTS(i) {
FOR_FLOAT32_INPUTS(j) { CheckFloatEq(*i * *j, m.Call(*i, *j)); } FOR_FLOAT32_INPUTS(j) {
volatile float expected = *i * *j;
CheckFloatEq(expected, m.Call(*i, *j));
}
} }
} }
...@@ -3272,7 +3281,10 @@ TEST(RunFloat32Div) { ...@@ -3272,7 +3281,10 @@ TEST(RunFloat32Div) {
m.Return(m.Float32Div(m.Parameter(0), m.Parameter(1))); m.Return(m.Float32Div(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT32_INPUTS(i) { FOR_FLOAT32_INPUTS(i) {
FOR_FLOAT32_INPUTS(j) { CheckFloatEq(*i / *j, m.Call(*i, *j)); } FOR_FLOAT32_INPUTS(j) {
volatile float expected = *i / *j;
CheckFloatEq(expected, m.Call(*i, *j));
}
} }
} }
...@@ -3302,7 +3314,10 @@ TEST(RunFloat64Mul) { ...@@ -3302,7 +3314,10 @@ TEST(RunFloat64Mul) {
m.Return(m.Float64Mul(m.Parameter(0), m.Parameter(1))); m.Return(m.Float64Mul(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) { FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(*i * *j, m.Call(*i, *j)); } FOR_FLOAT64_INPUTS(j) {
volatile double expected = *i * *j;
CheckDoubleEq(expected, m.Call(*i, *j));
}
} }
} }
...@@ -3312,7 +3327,10 @@ TEST(RunFloat64Div) { ...@@ -3312,7 +3327,10 @@ TEST(RunFloat64Div) {
m.Return(m.Float64Div(m.Parameter(0), m.Parameter(1))); m.Return(m.Float64Div(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) { FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(*i / *j, m.Call(*i, *j)); } FOR_FLOAT64_INPUTS(j) {
volatile double expected = *i / *j;
CheckDoubleEq(expected, m.Call(*i, *j));
}
} }
} }
...@@ -3475,7 +3493,10 @@ TEST(RunFloat32SubImm1) { ...@@ -3475,7 +3493,10 @@ TEST(RunFloat32SubImm1) {
BufferedRawMachineAssemblerTester<float> m(kMachFloat32); BufferedRawMachineAssemblerTester<float> m(kMachFloat32);
m.Return(m.Float32Sub(m.Float32Constant(*i), m.Parameter(0))); m.Return(m.Float32Sub(m.Float32Constant(*i), m.Parameter(0)));
FOR_FLOAT32_INPUTS(j) { CheckFloatEq(*i - *j, m.Call(*j)); } FOR_FLOAT32_INPUTS(j) {
volatile float expected = *i - *j;
CheckFloatEq(expected, m.Call(*j));
}
} }
} }
...@@ -3485,7 +3506,10 @@ TEST(RunFloat32SubImm2) { ...@@ -3485,7 +3506,10 @@ TEST(RunFloat32SubImm2) {
BufferedRawMachineAssemblerTester<float> m(kMachFloat32); BufferedRawMachineAssemblerTester<float> m(kMachFloat32);
m.Return(m.Float32Sub(m.Parameter(0), m.Float32Constant(*i))); m.Return(m.Float32Sub(m.Parameter(0), m.Float32Constant(*i)));
FOR_FLOAT32_INPUTS(j) { CheckFloatEq(*j - *i, m.Call(*j)); } FOR_FLOAT32_INPUTS(j) {
volatile float expected = *j - *i;
CheckFloatEq(expected, m.Call(*j));
}
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment