Commit f5bec4bc authored by binji's avatar binji Committed by Commit bot

[Atomics] Remove support for atomic accesses on floating-point values.

R=jarin@chromium.org

Review URL: https://codereview.chromium.org/1318713007

Cr-Commit-Position: refs/heads/master@{#30755}
parent f44efd6b
......@@ -24,12 +24,6 @@ utils.Import(function(from) {
// -------------------------------------------------------------------
function CheckSharedTypedArray(sta) {
if (!%IsSharedTypedArray(sta)) {
throw MakeTypeError(kNotSharedTypedArray, sta);
}
}
function CheckSharedIntegerTypedArray(ia) {
if (!%IsSharedIntegerTypedArray(ia)) {
throw MakeTypeError(kNotIntegerSharedTypedArray, ia);
......@@ -46,7 +40,7 @@ function CheckSharedInteger32TypedArray(ia) {
//-------------------------------------------------------------------
function AtomicsCompareExchangeJS(sta, index, oldValue, newValue) {
CheckSharedTypedArray(sta);
CheckSharedIntegerTypedArray(sta);
index = $toInteger(index);
if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
return UNDEFINED;
......@@ -57,7 +51,7 @@ function AtomicsCompareExchangeJS(sta, index, oldValue, newValue) {
}
function AtomicsLoadJS(sta, index) {
CheckSharedTypedArray(sta);
CheckSharedIntegerTypedArray(sta);
index = $toInteger(index);
if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
return UNDEFINED;
......@@ -66,7 +60,7 @@ function AtomicsLoadJS(sta, index) {
}
function AtomicsStoreJS(sta, index, value) {
CheckSharedTypedArray(sta);
CheckSharedIntegerTypedArray(sta);
index = $toInteger(index);
if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
return UNDEFINED;
......
......@@ -12,33 +12,17 @@
// Implement Atomic accesses to SharedArrayBuffers as defined in the
// SharedArrayBuffer draft spec, found here
// https://docs.google.com/document/d/1NDGA_gZJ7M7w1Bh8S0AoDyEqwDdRh4uSoTPSNn77PFk
// https://github.com/lars-t-hansen/ecmascript_sharedmem
namespace v8 {
namespace internal {
namespace {
// Assume that 32-bit architectures don't have 64-bit atomic ops.
// TODO(binji): can we do better here?
#if V8_TARGET_ARCH_64_BIT && V8_HOST_ARCH_64_BIT
#define ATOMICS_REQUIRE_LOCK_64_BIT 0
inline bool AtomicIsLockFree(uint32_t size) {
return size == 1 || size == 2 || size == 4 || size == 8;
}
#else
#define ATOMICS_REQUIRE_LOCK_64_BIT 1
inline bool AtomicIsLockFree(uint32_t size) {
return size == 1 || size == 2 || size == 4;
}
#endif
#if V8_CC_GNU
template <typename T>
......@@ -90,37 +74,6 @@ inline T ExchangeSeqCst(T* p, T value) {
return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
}
#if ATOMICS_REQUIRE_LOCK_64_BIT
// We only need to implement the following functions, because the rest of the
// atomic operations only work on integer types, and the only 64-bit type is
// float64. Similarly, because the values are being bit_cast from double ->
// uint64_t, we don't need to implement these functions for int64_t either.
static base::LazyMutex atomic_mutex = LAZY_MUTEX_INITIALIZER;
inline uint64_t CompareExchangeSeqCst(uint64_t* p, uint64_t oldval,
uint64_t newval) {
base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer());
uint64_t result = *p;
if (result == oldval) *p = newval;
return result;
}
inline uint64_t LoadSeqCst(uint64_t* p) {
base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer());
return *p;
}
inline void StoreSeqCst(uint64_t* p, uint64_t value) {
base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer());
*p = value;
}
#endif // ATOMICS_REQUIRE_LOCK_64_BIT
#elif V8_CC_MSVC
#define InterlockedCompareExchange32 _InterlockedCompareExchange
......@@ -133,33 +86,32 @@ inline void StoreSeqCst(uint64_t* p, uint64_t value) {
#define InterlockedCompareExchange8 _InterlockedCompareExchange8
#define InterlockedExchangeAdd8 _InterlockedExchangeAdd8
#define ATOMIC_OPS_INTEGER(type, suffix, vctype) \
inline type AddSeqCst(type* p, type value) { \
return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(value)); \
} \
inline type SubSeqCst(type* p, type value) { \
return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
-bit_cast<vctype>(value)); \
} \
inline type AndSeqCst(type* p, type value) { \
return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(value)); \
} \
inline type OrSeqCst(type* p, type value) { \
return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(value)); \
} \
inline type XorSeqCst(type* p, type value) { \
return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(value)); \
} \
inline type ExchangeSeqCst(type* p, type value) { \
return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(value)); \
}
#define ATOMIC_OPS_FLOAT(type, suffix, vctype) \
#define ATOMIC_OPS(type, suffix, vctype) \
inline type AddSeqCst(type* p, type value) { \
return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(value)); \
} \
inline type SubSeqCst(type* p, type value) { \
return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
-bit_cast<vctype>(value)); \
} \
inline type AndSeqCst(type* p, type value) { \
return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(value)); \
} \
inline type OrSeqCst(type* p, type value) { \
return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(value)); \
} \
inline type XorSeqCst(type* p, type value) { \
return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(value)); \
} \
inline type ExchangeSeqCst(type* p, type value) { \
return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(value)); \
} \
\
inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \
return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(newval), \
......@@ -171,20 +123,14 @@ inline void StoreSeqCst(uint64_t* p, uint64_t value) {
bit_cast<vctype>(value)); \
}
#define ATOMIC_OPS(type, suffix, vctype) \
ATOMIC_OPS_INTEGER(type, suffix, vctype) \
ATOMIC_OPS_FLOAT(type, suffix, vctype)
ATOMIC_OPS(int8_t, 8, char)
ATOMIC_OPS(uint8_t, 8, char)
ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */
ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */
ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */
ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
ATOMIC_OPS_FLOAT(uint64_t, 64, LONGLONG)
#undef ATOMIC_OPS_INTEGER
#undef ATOMIC_OPS_FLOAT
#undef ATOMIC_OPS
#undef InterlockedCompareExchange32
......@@ -216,46 +162,16 @@ inline int32_t FromObject<int32_t>(Handle<Object> number) {
return NumberToInt32(*number);
}
template <>
inline float FromObject<float>(Handle<Object> number) {
return static_cast<float>(number->Number());
}
template <>
inline double FromObject<double>(Handle<Object> number) {
return number->Number();
}
template <typename T, typename F>
inline T ToAtomic(F from) {
return static_cast<T>(from);
}
template <>
inline uint32_t ToAtomic<uint32_t, float>(float from) {
return bit_cast<uint32_t, float>(from);
}
template <>
inline uint64_t ToAtomic<uint64_t, double>(double from) {
return bit_cast<uint64_t, double>(from);
}
template <typename T, typename F>
inline T FromAtomic(F from) {
return static_cast<T>(from);
}
template <>
inline float FromAtomic<float, uint32_t>(uint32_t from) {
return bit_cast<float, uint32_t>(from);
}
template <>
inline double FromAtomic<double, uint64_t>(uint64_t from) {
return bit_cast<double, uint64_t>(from);
}
template <typename T>
inline Object* ToObject(Isolate* isolate, T t);
......@@ -289,16 +205,6 @@ inline Object* ToObject<uint32_t>(Isolate* isolate, uint32_t t) {
return *isolate->factory()->NewNumber(t);
}
template <>
inline Object* ToObject<float>(Isolate* isolate, float t) {
return *isolate->factory()->NewNumber(t);
}
template <>
inline Object* ToObject<double>(Isolate* isolate, double t) {
return *isolate->factory()->NewNumber(t);
}
template <typename T>
struct FromObjectTraits {};
......@@ -338,18 +244,6 @@ struct FromObjectTraits<uint32_t> {
typedef uint32_t atomic_type;
};
template <>
struct FromObjectTraits<float> {
typedef float convert_type;
typedef uint32_t atomic_type;
};
template <>
struct FromObjectTraits<double> {
typedef double convert_type;
typedef uint64_t atomic_type;
};
template <typename T>
inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
......@@ -560,12 +454,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalFloat32Array:
return DoCompareExchange<float>(isolate, buffer, index, oldobj, newobj);
case kExternalFloat64Array:
return DoCompareExchange<double>(isolate, buffer, index, oldobj, newobj);
case kExternalUint8ClampedArray:
return DoCompareExchangeUint8Clamped(isolate, buffer, index, oldobj,
newobj);
......@@ -594,9 +482,12 @@ RUNTIME_FUNCTION(Runtime_AtomicsLoad) {
case kExternal##Type##Array: \
return DoLoad<ctype>(isolate, buffer, index);
TYPED_ARRAYS(TYPED_ARRAY_CASE)
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
return DoLoad<uint8_t>(isolate, buffer, index);
default:
break;
}
......@@ -625,12 +516,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsStore) {
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalFloat32Array:
return DoStore<float>(isolate, buffer, index, value);
case kExternalFloat64Array:
return DoStore<double>(isolate, buffer, index, value);
case kExternalUint8ClampedArray:
return DoStoreUint8Clamped(isolate, buffer, index, value);
......@@ -665,8 +550,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
case kExternalUint8ClampedArray:
return DoAddUint8Clamped(isolate, buffer, index, value);
case kExternalFloat32Array:
case kExternalFloat64Array:
default:
break;
}
......@@ -698,8 +581,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsSub) {
case kExternalUint8ClampedArray:
return DoSubUint8Clamped(isolate, buffer, index, value);
case kExternalFloat32Array:
case kExternalFloat64Array:
default:
break;
}
......@@ -731,8 +612,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
case kExternalUint8ClampedArray:
return DoAndUint8Clamped(isolate, buffer, index, value);
case kExternalFloat32Array:
case kExternalFloat64Array:
default:
break;
}
......@@ -764,8 +643,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsOr) {
case kExternalUint8ClampedArray:
return DoOrUint8Clamped(isolate, buffer, index, value);
case kExternalFloat32Array:
case kExternalFloat64Array:
default:
break;
}
......@@ -797,8 +674,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsXor) {
case kExternalUint8ClampedArray:
return DoXorUint8Clamped(isolate, buffer, index, value);
case kExternalFloat32Array:
case kExternalFloat64Array:
default:
break;
}
......@@ -830,8 +705,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
case kExternalUint8ClampedArray:
return DoExchangeUint8Clamped(isolate, buffer, index, value);
case kExternalFloat32Array:
case kExternalFloat64Array:
default:
break;
}
......
......@@ -12,8 +12,6 @@ function Module(stdlib, foreign, heap) {
var MEMU8 = new stdlib.Uint8Array(heap);
var MEMU16 = new stdlib.Uint16Array(heap);
var MEMU32 = new stdlib.Uint32Array(heap);
var MEMF32 = new stdlib.Float32Array(heap);
var MEMF64 = new stdlib.Float64Array(heap);
var compareExchange = stdlib.Atomics.compareExchange;
var fround = stdlib.Math.fround;
......@@ -59,20 +57,6 @@ function Module(stdlib, foreign, heap) {
return compareExchange(MEMU32, i, o, n)>>>0;
}
function compareExchangef32(i, o, n) {
i = i | 0;
o = fround(o);
n = fround(n);
return fround(compareExchange(MEMF32, i, o, n));
}
function compareExchangef64(i, o, n) {
i = i | 0;
o = +o;
n = +n;
return +compareExchange(MEMF64, i, o, n);
}
return {
compareExchangei8: compareExchangei8,
compareExchangei16: compareExchangei16,
......@@ -80,8 +64,6 @@ function Module(stdlib, foreign, heap) {
compareExchangeu8: compareExchangeu8,
compareExchangeu16: compareExchangeu16,
compareExchangeu32: compareExchangeu32,
compareExchangef32: compareExchangef32,
compareExchangef64: compareExchangef64
};
}
......@@ -117,5 +99,3 @@ testElementType(Int32Array, m.compareExchangei32, 0);
testElementType(Uint8Array, m.compareExchangeu8, 0);
testElementType(Uint16Array, m.compareExchangeu16, 0);
testElementType(Uint32Array, m.compareExchangeu32, 0);
testElementType(Float32Array, m.compareExchangef32, NaN);
testElementType(Float64Array, m.compareExchangef64, NaN);
......@@ -12,8 +12,6 @@ function Module(stdlib, foreign, heap) {
var MEMU8 = new stdlib.Uint8Array(heap);
var MEMU16 = new stdlib.Uint16Array(heap);
var MEMU32 = new stdlib.Uint32Array(heap);
var MEMF32 = new stdlib.Float32Array(heap);
var MEMF64 = new stdlib.Float64Array(heap);
var load = stdlib.Atomics.load;
var fround = stdlib.Math.fround;
......@@ -47,16 +45,6 @@ function Module(stdlib, foreign, heap) {
return load(MEMU32, i)>>>0;
}
function loadf32(i) {
i = i | 0;
return fround(load(MEMF32, i));
}
function loadf64(i) {
i = i | 0;
return +load(MEMF64, i);
}
return {
loadi8: loadi8,
loadi16: loadi16,
......@@ -64,8 +52,6 @@ function Module(stdlib, foreign, heap) {
loadu8: loadu8,
loadu16: loadu16,
loadu32: loadu32,
loadf32: loadf32,
loadf64: loadf64
};
}
......@@ -98,5 +84,3 @@ testElementType(Int32Array, m.loadi32, 0);
testElementType(Uint8Array, m.loadu8, 0);
testElementType(Uint16Array, m.loadu16, 0);
testElementType(Uint32Array, m.loadu32, 0);
testElementType(Float32Array, m.loadf32, NaN);
testElementType(Float64Array, m.loadf64, NaN);
......@@ -12,8 +12,6 @@ function Module(stdlib, foreign, heap) {
var MEMU8 = new stdlib.Uint8Array(heap);
var MEMU16 = new stdlib.Uint16Array(heap);
var MEMU32 = new stdlib.Uint32Array(heap);
var MEMF32 = new stdlib.Float32Array(heap);
var MEMF64 = new stdlib.Float64Array(heap);
var store = stdlib.Atomics.store;
var fround = stdlib.Math.fround;
......@@ -53,18 +51,6 @@ function Module(stdlib, foreign, heap) {
return store(MEMU32, i, x)>>>0;
}
function storef32(i, x) {
i = i | 0;
x = fround(x);
return fround(store(MEMF32, i, x));
}
function storef64(i, x) {
i = i | 0;
x = +x;
return +store(MEMF64, i, x);
}
return {
storei8: storei8,
storei16: storei16,
......@@ -72,8 +58,6 @@ function Module(stdlib, foreign, heap) {
storeu8: storeu8,
storeu16: storeu16,
storeu32: storeu32,
storef32: storef32,
storef64: storef64
};
}
......@@ -105,5 +89,3 @@ testElementType(Int32Array, m.storei32, 0);
testElementType(Uint8Array, m.storeu8, 0);
testElementType(Uint16Array, m.storeu16, 0);
testElementType(Uint32Array, m.storeu32, 0);
testElementType(Float32Array, m.storef32, NaN);
testElementType(Float64Array, m.storef64, NaN);
......@@ -38,11 +38,6 @@ var IntegerTypedArrayConstructors = [
makeConstructorObject(Uint32Array, 0, 0xffffffff, toRangeWrapped),
];
var TypedArrayConstructors = IntegerTypedArrayConstructors.concat([
{constr: Float32Array},
{constr: Float64Array},
]);
(function TestBadArray() {
var ab = new ArrayBuffer(16);
var u32a = new Uint32Array(16);
......@@ -50,8 +45,8 @@ var TypedArrayConstructors = IntegerTypedArrayConstructors.concat([
var sf32a = new Float32Array(sab);
var sf64a = new Float64Array(sab);
// Atomic ops required shared typed arrays
[undefined, 1, 'hi', 3.4, ab, u32a, sab].forEach(function(o) {
// Atomic ops required integer shared typed arrays
[undefined, 1, 'hi', 3.4, ab, u32a, sab, sf32a, sf64a].forEach(function(o) {
assertThrows(function() { Atomics.compareExchange(o, 0, 0, 0); },
TypeError);
assertThrows(function() { Atomics.load(o, 0); }, TypeError);
......@@ -63,16 +58,6 @@ var TypedArrayConstructors = IntegerTypedArrayConstructors.concat([
assertThrows(function() { Atomics.xor(o, 0, 0); }, TypeError);
assertThrows(function() { Atomics.exchange(o, 0, 0); }, TypeError);
});
// Arithmetic atomic ops require integer shared arrays
[sab, sf32a, sf64a].forEach(function(o) {
assertThrows(function() { Atomics.add(o, 0, 0); }, TypeError);
assertThrows(function() { Atomics.sub(o, 0, 0); }, TypeError);
assertThrows(function() { Atomics.and(o, 0, 0); }, TypeError);
assertThrows(function() { Atomics.or(o, 0, 0); }, TypeError);
assertThrows(function() { Atomics.xor(o, 0, 0); }, TypeError);
assertThrows(function() { Atomics.exchange(o, 0, 0); }, TypeError);
});
})();
function testAtomicOp(op, ia, index, expectedIndex, name) {
......@@ -163,7 +148,7 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
})();
(function TestCompareExchange() {
TypedArrayConstructors.forEach(function(t) {
IntegerTypedArrayConstructors.forEach(function(t) {
var sab = new SharedArrayBuffer(10 * t.constr.BYTES_PER_ELEMENT);
var sta = new t.constr(sab);
var name = Object.prototype.toString.call(sta);
......@@ -177,32 +162,10 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
assertEquals(50, sta[i], name);
}
});
// * Exact float values should be OK
// * Infinity, -Infinity should be OK (has exact representation)
// * NaN is not OK, it has many representations, cannot ensure successful CAS
// because it does a bitwise compare
[1.5, 4.25, -1e8, -Infinity, Infinity].forEach(function(v) {
var sab = new SharedArrayBuffer(10 * Float32Array.BYTES_PER_ELEMENT);
var sf32a = new Float32Array(sab);
sf32a[0] = 0;
assertEquals(0, Atomics.compareExchange(sf32a, 0, 0, v));
assertEquals(v, sf32a[0]);
assertEquals(v, Atomics.compareExchange(sf32a, 0, v, 0));
assertEquals(0, sf32a[0]);
var sab2 = new SharedArrayBuffer(10 * Float64Array.BYTES_PER_ELEMENT);
var sf64a = new Float64Array(sab2);
sf64a[0] = 0;
assertEquals(0, Atomics.compareExchange(sf64a, 0, 0, v));
assertEquals(v, sf64a[0]);
assertEquals(v, Atomics.compareExchange(sf64a, 0, v, 0));
assertEquals(0, sf64a[0]);
});
})();
(function TestLoad() {
TypedArrayConstructors.forEach(function(t) {
IntegerTypedArrayConstructors.forEach(function(t) {
var sab = new SharedArrayBuffer(10 * t.constr.BYTES_PER_ELEMENT);
var sta = new t.constr(sab);
var name = Object.prototype.toString.call(sta);
......@@ -216,7 +179,7 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
})();
(function TestStore() {
TypedArrayConstructors.forEach(function(t) {
IntegerTypedArrayConstructors.forEach(function(t) {
var sab = new SharedArrayBuffer(10 * t.constr.BYTES_PER_ELEMENT);
var sta = new t.constr(sab);
var name = Object.prototype.toString.call(sta);
......@@ -228,20 +191,6 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
assertEquals(100, sta[i], name);
}
});
[1.5, 4.25, -1e8, -Infinity, Infinity, NaN].forEach(function(v) {
var sab = new SharedArrayBuffer(10 * Float32Array.BYTES_PER_ELEMENT);
var sf32a = new Float32Array(sab);
sf32a[0] = 0;
assertEquals(v, Atomics.store(sf32a, 0, v));
assertEquals(v, sf32a[0]);
var sab2 = new SharedArrayBuffer(10 * Float64Array.BYTES_PER_ELEMENT);
var sf64a = new Float64Array(sab2);
sf64a[0] = 0;
assertEquals(v, Atomics.store(sf64a, 0, v));
assertEquals(v, sf64a[0]);
});
})();
(function TestAdd() {
......@@ -348,7 +297,7 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
// Sizes that aren't equal to a typedarray BYTES_PER_ELEMENT always return
// false.
var validSizes = {};
TypedArrayConstructors.forEach(function(t) {
IntegerTypedArrayConstructors.forEach(function(t) {
validSizes[t.constr.BYTES_PER_ELEMENT] = true;
});
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment