Commit 84e078c6 authored by Maya Lekova's avatar Maya Lekova Committed by V8 LUCI CQ

[fastcall] Support EnforceRange annotation

This CL implements checks in case EnforceRange is requested for a
given parameter by using TryTruncate* operators. It implements 2 such
truncations on x64 and arm64 - TryTruncateFloat64ToInt32 and
TryTruncateFloat64ToUint32.

Bug: chromium:1052746
Change-Id: I32f34d9dc1265af568cc576663620a8f7f8245f6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3721618Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Commit-Queue: Maya Lekova <mslekova@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81512}
parent 93f5f1a7
......@@ -802,6 +802,16 @@ class CFunctionBuilderWithFunction {
std::make_index_sequence<sizeof...(ArgBuilders)>());
}
// Provided for testing purposes.
template <typename Ret, typename... Args>
auto Patch(Ret (*patching_func)(Args...)) {
static_assert(
sizeof...(Args) == sizeof...(ArgBuilders),
"The patching function must have the same number of arguments.");
fn_ = reinterpret_cast<void*>(patching_func);
return *this;
}
auto Build() {
static CFunctionInfoImpl<RetBuilder, ArgBuilders...> instance;
return CFunction(fn_, &instance);
......
......@@ -101,7 +101,7 @@ void TurboAssembler::CcmpTagged(const Register& rn, const Operand& operand,
}
}
void MacroAssembler::Ccmn(const Register& rn, const Operand& operand,
void TurboAssembler::Ccmn(const Register& rn, const Operand& operand,
StatusFlags nzcv, Condition cond) {
DCHECK(allow_macro_instructions());
if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
......@@ -531,6 +531,15 @@ void TurboAssembler::Fccmp(const VRegister& fn, const VRegister& fm,
fccmp(fn, fm, nzcv, cond);
}
void TurboAssembler::Fccmp(const VRegister& fn, const double value,
StatusFlags nzcv, Condition cond) {
DCHECK(allow_macro_instructions());
UseScratchRegisterScope temps(this);
VRegister tmp = temps.AcquireSameSizeAs(fn);
Fmov(tmp, value);
Fccmp(fn, tmp, nzcv, cond);
}
void TurboAssembler::Fcmp(const VRegister& fn, const VRegister& fm) {
DCHECK(allow_macro_instructions());
fcmp(fn, fm);
......
......@@ -1096,6 +1096,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Condition cond);
inline void CcmpTagged(const Register& rn, const Operand& operand,
StatusFlags nzcv, Condition cond);
inline void Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv,
Condition cond);
inline void Clz(const Register& rd, const Register& rn);
......@@ -1148,6 +1150,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
inline void Csetm(const Register& rd, Condition cond);
inline void Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv,
Condition cond);
inline void Fccmp(const VRegister& fn, const double value, StatusFlags nzcv,
Condition cond);
inline void Csinc(const Register& rd, const Register& rn, const Register& rm,
Condition cond);
......@@ -1541,9 +1545,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
inline void Ngc(const Register& rd, const Operand& operand);
inline void Ngcs(const Register& rd, const Operand& operand);
inline void Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv,
Condition cond);
#define DECLARE_FUNCTION(FN, OP) \
inline void FN(const Register& rs, const Register& rt, const Register& rn);
STLX_MACRO_LIST(DECLARE_FUNCTION)
......
......@@ -1163,7 +1163,7 @@ void ConvertFloatToUint64(TurboAssembler* tasm, Register dst,
tasm->Cvttss2siq(dst, kScratchDoubleReg);
}
tasm->testq(dst, dst);
// The only possible negative value here is 0x80000000000000000, which is
// The only possible negative value here is 0x8000000000000000, which is
// used on x64 to indicate an integer overflow.
tasm->j(negative, fail ? fail : &success);
// The input value is within uint64 range and the second conversion worked
......@@ -1173,6 +1173,44 @@ void ConvertFloatToUint64(TurboAssembler* tasm, Register dst,
tasm->orq(dst, kScratchRegister);
tasm->bind(&success);
}
template <typename OperandOrXMMRegister, bool is_double>
void ConvertFloatToUint32(TurboAssembler* tasm, Register dst,
OperandOrXMMRegister src, Label* fail) {
Label success;
// There does not exist a native float-to-uint instruction, so we have to use
// a float-to-int, and postprocess the result.
if (is_double) {
tasm->Cvttsd2si(dst, src);
} else {
tasm->Cvttss2si(dst, src);
}
// If the result of the conversion is positive, we are already done.
tasm->testl(dst, dst);
tasm->j(positive, &success);
// The result of the first conversion was negative, which means that the
// input value was not within the positive int32 range. We subtract 2^31
// and convert it again to see if it is within the uint32 range.
if (is_double) {
tasm->Move(kScratchDoubleReg, -2147483648.0);
tasm->Addsd(kScratchDoubleReg, src);
tasm->Cvttsd2si(dst, kScratchDoubleReg);
} else {
tasm->Move(kScratchDoubleReg, -2147483648.0f);
tasm->Addss(kScratchDoubleReg, src);
tasm->Cvttss2si(dst, kScratchDoubleReg);
}
tasm->testl(dst, dst);
// The only possible negative value here is 0x80000000, which is
// used on x64 to indicate an integer overflow.
tasm->j(negative, fail ? fail : &success);
// The input value is within uint32 range and the second conversion worked
// successfully, but we still have to undo the subtraction we did
// earlier.
tasm->Move(kScratchRegister, 0x80000000);
tasm->orl(dst, kScratchRegister);
tasm->bind(&success);
}
} // namespace
void TurboAssembler::Cvttsd2uiq(Register dst, Operand src, Label* fail) {
......@@ -1183,6 +1221,14 @@ void TurboAssembler::Cvttsd2uiq(Register dst, XMMRegister src, Label* fail) {
ConvertFloatToUint64<XMMRegister, true>(this, dst, src, fail);
}
void TurboAssembler::Cvttsd2ui(Register dst, Operand src, Label* fail) {
ConvertFloatToUint32<Operand, true>(this, dst, src, fail);
}
void TurboAssembler::Cvttsd2ui(Register dst, XMMRegister src, Label* fail) {
ConvertFloatToUint32<XMMRegister, true>(this, dst, src, fail);
}
void TurboAssembler::Cvttss2uiq(Register dst, Operand src, Label* fail) {
ConvertFloatToUint64<Operand, false>(this, dst, src, fail);
}
......@@ -1191,6 +1237,14 @@ void TurboAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* fail) {
ConvertFloatToUint64<XMMRegister, false>(this, dst, src, fail);
}
void TurboAssembler::Cvttss2ui(Register dst, Operand src, Label* fail) {
ConvertFloatToUint32<Operand, false>(this, dst, src, fail);
}
void TurboAssembler::Cvttss2ui(Register dst, XMMRegister src, Label* fail) {
ConvertFloatToUint32<XMMRegister, false>(this, dst, src, fail);
}
void TurboAssembler::Cmpeqss(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
......
......@@ -152,8 +152,12 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Cvtqui2sd(XMMRegister dst, Operand src);
void Cvttsd2uiq(Register dst, Operand src, Label* fail = nullptr);
void Cvttsd2uiq(Register dst, XMMRegister src, Label* fail = nullptr);
void Cvttsd2ui(Register dst, Operand src, Label* fail = nullptr);
void Cvttsd2ui(Register dst, XMMRegister src, Label* fail = nullptr);
void Cvttss2uiq(Register dst, Operand src, Label* fail = nullptr);
void Cvttss2uiq(Register dst, XMMRegister src, Label* fail = nullptr);
void Cvttss2ui(Register dst, Operand src, Label* fail = nullptr);
void Cvttss2ui(Register dst, XMMRegister src, Label* fail = nullptr);
// cvtsi2sd and cvtsi2ss instructions only write to the low 64/32-bit of dst
// register, which hinders register renaming and makes dependence chains
......
......@@ -1726,6 +1726,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArm64Float64ToInt32:
__ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
if (i.OutputCount() > 1) {
// Check for inputs below INT32_MIN and NaN.
__ Fcmp(i.InputDoubleRegister(0), static_cast<double>(INT32_MIN));
__ Cset(i.OutputRegister(1).W(), ge);
__ Fcmp(i.InputDoubleRegister(0), static_cast<double>(INT32_MAX) + 1);
__ CmovX(i.OutputRegister(1), xzr, ge);
}
break;
case kArm64Float32ToUint32: {
__ Fcvtzu(i.OutputRegister32(), i.InputFloat32Register(0));
......@@ -1740,6 +1747,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArm64Float64ToUint32:
__ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
if (i.OutputCount() > 1) {
__ Fcmp(i.InputDoubleRegister(0), -1.0);
__ Cset(i.OutputRegister(1).W(), gt);
__ Fcmp(i.InputDoubleRegister(0), static_cast<double>(UINT32_MAX) + 1);
__ CmovX(i.OutputRegister(1), xzr, ge);
}
break;
case kArm64Float32ToInt64:
__ Fcvtzs(i.OutputRegister64(), i.InputFloat32Register(0));
......
......@@ -1934,6 +1934,36 @@ void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
Emit(kArm64Float64ToUint64, output_count, outputs, 1, inputs);
}
void InstructionSelector::VisitTryTruncateFloat64ToInt32(Node* node) {
Arm64OperandGenerator g(this);
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
InstructionOperand outputs[2];
size_t output_count = 0;
outputs[output_count++] = g.DefineAsRegister(node);
Node* success_output = NodeProperties::FindProjection(node, 1);
if (success_output) {
outputs[output_count++] = g.DefineAsRegister(success_output);
}
Emit(kArm64Float64ToInt32, output_count, outputs, 1, inputs);
}
void InstructionSelector::VisitTryTruncateFloat64ToUint32(Node* node) {
Arm64OperandGenerator g(this);
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
InstructionOperand outputs[2];
size_t output_count = 0;
outputs[output_count++] = g.DefineAsRegister(node);
Node* success_output = NodeProperties::FindProjection(node, 1);
if (success_output) {
outputs[output_count++] = g.DefineAsRegister(success_output);
}
Emit(kArm64Float64ToUint32, output_count, outputs, 1, inputs);
}
void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
DCHECK(SmiValuesAre31Bits());
DCHECK(COMPRESS_POINTERS_BOOL);
......
......@@ -1656,6 +1656,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord64(node), VisitTryTruncateFloat32ToUint64(node);
case IrOpcode::kTryTruncateFloat64ToUint64:
return MarkAsWord64(node), VisitTryTruncateFloat64ToUint64(node);
case IrOpcode::kTryTruncateFloat64ToInt32:
return MarkAsWord32(node), VisitTryTruncateFloat64ToInt32(node);
case IrOpcode::kTryTruncateFloat64ToUint32:
return MarkAsWord32(node), VisitTryTruncateFloat64ToUint32(node);
case IrOpcode::kBitcastWord32ToWord64:
return MarkAsWord64(node), VisitBitcastWord32ToWord64(node);
case IrOpcode::kChangeInt32ToInt64:
......@@ -2628,6 +2632,14 @@ void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitTryTruncateFloat64ToInt32(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitTryTruncateFloat64ToUint32(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
UNIMPLEMENTED();
}
......@@ -2911,6 +2923,8 @@ void InstructionSelector::VisitProjection(Node* node) {
case IrOpcode::kTryTruncateFloat64ToInt64:
case IrOpcode::kTryTruncateFloat32ToUint64:
case IrOpcode::kTryTruncateFloat64ToUint64:
case IrOpcode::kTryTruncateFloat64ToInt32:
case IrOpcode::kTryTruncateFloat64ToUint32:
case IrOpcode::kInt32PairAdd:
case IrOpcode::kInt32PairSub:
case IrOpcode::kInt32PairMul:
......
......@@ -2010,22 +2010,80 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEFloat64ToFloat32:
ASSEMBLE_SSE_UNOP(Cvtsd2ss);
break;
case kSSEFloat64ToInt32:
if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
case kSSEFloat64ToInt32: {
Register output_reg = i.OutputRegister(0);
if (instr->OutputCount() == 1) {
if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
__ Cvttsd2si(i.OutputRegister(), i.InputOperand(0));
}
break;
}
DCHECK_EQ(2, instr->OutputCount());
Register success_reg = i.OutputRegister(1);
if (CpuFeatures::IsSupported(SSE4_1) || CpuFeatures::IsSupported(AVX)) {
DoubleRegister rounded = kScratchDoubleReg;
if (instr->InputAt(0)->IsFPRegister()) {
__ Roundsd(rounded, i.InputDoubleRegister(0), kRoundToZero);
__ Cvttsd2si(output_reg, i.InputDoubleRegister(0));
} else {
__ Roundsd(rounded, i.InputOperand(0), kRoundToZero);
// Convert {rounded} instead of the input operand, to avoid another
// load.
__ Cvttsd2si(output_reg, rounded);
}
DoubleRegister converted_back = i.TempSimd128Register(0);
__ Cvtlsi2sd(converted_back, output_reg);
// Compare the converted back value to the rounded value, set
// success_reg to 0 if they differ, or 1 on success.
__ Cmpeqsd(converted_back, rounded);
__ Movq(success_reg, converted_back);
__ And(success_reg, Immediate(1));
} else {
__ Cvttsd2si(i.OutputRegister(), i.InputOperand(0));
// Less efficient code for non-AVX and non-SSE4_1 CPUs.
if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttsd2si(i.OutputRegister(0), i.InputDoubleRegister(0));
} else {
__ Cvttsd2si(i.OutputRegister(0), i.InputOperand(0));
}
__ Move(success_reg, 1);
Label done;
Label fail;
__ Move(kScratchDoubleReg, double{INT32_MIN});
if (instr->InputAt(0)->IsFPRegister()) {
__ Ucomisd(kScratchDoubleReg, i.InputDoubleRegister(0));
} else {
__ Ucomisd(kScratchDoubleReg, i.InputOperand(0));
}
// If the input is NaN, then the conversion fails.
__ j(parity_even, &fail, Label::kNear);
// If the input is INT32_MIN, then the conversion succeeds.
__ j(equal, &done, Label::kNear);
__ cmpq(output_reg, Immediate(1));
// If the conversion results in INT32_MIN, but the input was not
// INT32_MIN, then the conversion fails.
__ j(no_overflow, &done, Label::kNear);
__ bind(&fail);
__ Move(success_reg, 0);
__ bind(&done);
}
break;
}
case kSSEFloat64ToUint32: {
Label fail;
// Set Projection(1) to 0, denoting value out of range.
if (instr->OutputCount() > 1) __ Move(i.OutputRegister(1), 0);
if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
__ Cvttsd2ui(i.OutputRegister(), i.InputDoubleRegister(0), &fail);
} else {
__ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
}
if (MiscField::decode(instr->opcode())) {
__ AssertZeroExtended(i.OutputRegister());
__ Cvttsd2ui(i.OutputRegister(), i.InputOperand(0), &fail);
}
// Set Projection(1) to 1, denoting value in range (otherwise the
// conversion above would have jumped to `fail`), which is the success
// case.
if (instr->OutputCount() > 1) __ Move(i.OutputRegister(1), 1);
__ bind(&fail);
break;
}
case kSSEFloat32ToInt64: {
......@@ -2149,6 +2207,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEFloat32ToUint64: {
// See kSSEFloat64ToUint32 for explanation.
Label fail;
if (instr->OutputCount() > 1) __ Move(i.OutputRegister(1), 0);
if (instr->InputAt(0)->IsFPRegister()) {
......@@ -2161,6 +2220,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEFloat64ToUint64: {
// See kSSEFloat64ToUint32 for explanation.
Label fail;
if (instr->OutputCount() > 1) __ Move(i.OutputRegister(1), 0);
if (instr->InputAt(0)->IsFPRegister()) {
......
......@@ -1456,6 +1456,17 @@ void InstructionSelector::VisitUint32MulHigh(Node* node) {
VisitMulHigh(this, node, kX64UmulHigh32);
}
// TryTruncateFloat32ToInt64 and TryTruncateFloat64ToInt64 operations attempt
// truncation from 32|64-bit float to 64-bit integer by performing roughly the
// following steps:
// 1. Round the original FP value to zero, store in `rounded`;
// 2. Convert the original FP value to integer;
// 3. Convert the integer value back to floating point, store in
// `converted_back`;
// 4. If `rounded` == `converted_back`:
// Set Projection(1) := 1; -- the value was in range
// Else:
// Set Projection(1) := 0; -- the value was out of range
void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
X64OperandGenerator g(this);
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
......@@ -1474,22 +1485,27 @@ void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
Emit(kSSEFloat32ToInt64, output_count, outputs, 1, inputs, temp_count, temps);
}
void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
// TryTruncateFloatNNToUintDD operations attempt truncation from NN-bit
// float to DD-bit integer by using ConvertFloatToUintDD macro instructions.
// It performs a float-to-int instruction, rounding to zero and tests whether
// the result is positive integer (the default, fast case), which means the
// value is in range. Then, we set Projection(1) := 1. Else, we perform
// additional subtraction, conversion and (in case the value was originally
// negative, but still within range) we restore it and set Projection(1) := 1.
// In all other cases we set Projection(1) := 0, denoting value out of range.
void InstructionSelector::VisitTryTruncateFloat64ToUint32(Node* node) {
X64OperandGenerator g(this);
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
InstructionOperand outputs[2];
InstructionOperand temps[1];
size_t output_count = 0;
size_t temp_count = 0;
outputs[output_count++] = g.DefineAsRegister(node);
Node* success_output = NodeProperties::FindProjection(node, 1);
if (success_output) {
outputs[output_count++] = g.DefineAsRegister(success_output);
temps[temp_count++] = g.TempSimd128Register();
}
Emit(kSSEFloat64ToInt64, output_count, outputs, 1, inputs, temp_count, temps);
Emit(kSSEFloat64ToUint32, output_count, outputs, 1, inputs);
}
void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
......@@ -1522,6 +1538,42 @@ void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
Emit(kSSEFloat64ToUint64, output_count, outputs, 1, inputs);
}
void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
X64OperandGenerator g(this);
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
InstructionOperand outputs[2];
InstructionOperand temps[1];
size_t output_count = 0;
size_t temp_count = 0;
outputs[output_count++] = g.DefineAsRegister(node);
Node* success_output = NodeProperties::FindProjection(node, 1);
if (success_output) {
outputs[output_count++] = g.DefineAsRegister(success_output);
temps[temp_count++] = g.TempSimd128Register();
}
Emit(kSSEFloat64ToInt64, output_count, outputs, 1, inputs, temp_count, temps);
}
void InstructionSelector::VisitTryTruncateFloat64ToInt32(Node* node) {
X64OperandGenerator g(this);
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
InstructionOperand outputs[2];
InstructionOperand temps[1];
size_t output_count = 0;
size_t temp_count = 0;
outputs[output_count++] = g.DefineAsRegister(node);
Node* success_output = NodeProperties::FindProjection(node, 1);
if (success_output) {
outputs[output_count++] = g.DefineAsRegister(success_output);
temps[temp_count++] = g.TempSimd128Register();
}
Emit(kSSEFloat64ToInt32, output_count, outputs, 1, inputs, temp_count, temps);
}
void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
DCHECK(SmiValuesAre31Bits());
DCHECK(COMPRESS_POINTERS_BOOL);
......
......@@ -4954,20 +4954,46 @@ Node* EffectControlLinearizer::AdaptFastCallArgument(
int kSize = sizeof(uintptr_t);
switch (arg_type.GetSequenceType()) {
case CTypeInfo::SequenceType::kScalar: {
switch (arg_type.GetType()) {
case CTypeInfo::Type::kV8Value: {
Node* stack_slot = __ StackSlot(kSize, kAlign);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
stack_slot, 0, node);
return stack_slot;
}
case CTypeInfo::Type::kFloat32: {
return __ TruncateFloat64ToFloat32(node);
if (uint8_t(arg_type.GetFlags()) &
uint8_t(CTypeInfo::Flags::kEnforceRangeBit)) {
Node* truncation;
switch (arg_type.GetType()) {
case CTypeInfo::Type::kInt32:
truncation = __ TryTruncateFloat64ToInt32(node);
__ GotoIfNot(__ Projection(1, truncation), if_error);
return __ Projection(0, truncation);
case CTypeInfo::Type::kUint32:
truncation = __ TryTruncateFloat64ToUint32(node);
__ GotoIfNot(__ Projection(1, truncation), if_error);
return __ Projection(0, truncation);
case CTypeInfo::Type::kInt64:
truncation = __ TryTruncateFloat64ToInt64(node);
__ GotoIfNot(__ Projection(1, truncation), if_error);
return __ Projection(0, truncation);
case CTypeInfo::Type::kUint64:
truncation = __ TryTruncateFloat64ToUint64(node);
__ GotoIfNot(__ Projection(1, truncation), if_error);
return __ Projection(0, truncation);
default: {
return node;
}
}
default: {
return node;
} else {
switch (arg_type.GetType()) {
case CTypeInfo::Type::kV8Value: {
Node* stack_slot = __ StackSlot(kSize, kAlign);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
stack_slot, 0, node);
return stack_slot;
}
case CTypeInfo::Type::kFloat32: {
return __ TruncateFloat64ToFloat32(node);
}
default: {
return node;
}
}
}
}
......
......@@ -51,6 +51,10 @@ class Reducer;
V(TruncateFloat64ToFloat32) \
V(TruncateFloat64ToWord32) \
V(TruncateInt64ToInt32) \
V(TryTruncateFloat64ToInt64) \
V(TryTruncateFloat64ToUint64) \
V(TryTruncateFloat64ToInt32) \
V(TryTruncateFloat64ToUint32) \
V(Word32ReverseBytes) \
V(Word64ReverseBytes)
......
......@@ -54,6 +54,11 @@ class MachineRepresentationInferrer {
CHECK_LE(index, static_cast<size_t>(1));
return index == 0 ? MachineRepresentation::kWord64
: MachineRepresentation::kBit;
case IrOpcode::kTryTruncateFloat64ToInt32:
case IrOpcode::kTryTruncateFloat64ToUint32:
CHECK_LE(index, static_cast<size_t>(1));
return index == 0 ? MachineRepresentation::kWord32
: MachineRepresentation::kBit;
case IrOpcode::kTryTruncateFloat32ToInt64:
case IrOpcode::kTryTruncateFloat64ToInt64:
case IrOpcode::kTryTruncateFloat32ToUint64:
......@@ -413,6 +418,8 @@ class MachineRepresentationChecker {
case IrOpcode::kFloat64ExtractHighWord32:
case IrOpcode::kBitcastFloat64ToInt64:
case IrOpcode::kTryTruncateFloat64ToInt64:
case IrOpcode::kTryTruncateFloat64ToInt32:
case IrOpcode::kTryTruncateFloat64ToUint32:
CheckValueInputForFloat64Op(node, 0);
break;
case IrOpcode::kWord64Equal:
......
......@@ -350,6 +350,8 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(TryTruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 2) \
V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2) \
V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2) \
V(TryTruncateFloat64ToInt32, Operator::kNoProperties, 1, 0, 2) \
V(TryTruncateFloat64ToUint32, Operator::kNoProperties, 1, 0, 2) \
V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(Float64SilenceNaN, Operator::kNoProperties, 1, 0, 1) \
......
......@@ -588,6 +588,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* TryTruncateFloat64ToInt64();
const Operator* TryTruncateFloat32ToUint64();
const Operator* TryTruncateFloat64ToUint64();
const Operator* TryTruncateFloat64ToInt32();
const Operator* TryTruncateFloat64ToUint32();
const Operator* ChangeInt32ToFloat64();
const Operator* BitcastWord32ToWord64();
const Operator* ChangeInt32ToInt64();
......
......@@ -731,6 +731,8 @@
V(TryTruncateFloat64ToInt64) \
V(TryTruncateFloat32ToUint64) \
V(TryTruncateFloat64ToUint64) \
V(TryTruncateFloat64ToInt32) \
V(TryTruncateFloat64ToUint32) \
V(ChangeInt32ToFloat64) \
V(BitcastWord32ToWord64) \
V(ChangeInt32ToInt64) \
......
......@@ -804,6 +804,12 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* TryTruncateFloat64ToUint64(Node* a) {
return AddNode(machine()->TryTruncateFloat64ToUint64(), a);
}
Node* TryTruncateFloat64ToInt32(Node* a) {
return AddNode(machine()->TryTruncateFloat64ToInt32(), a);
}
Node* TryTruncateFloat64ToUint32(Node* a) {
return AddNode(machine()->TryTruncateFloat64ToUint32(), a);
}
Node* ChangeInt32ToInt64(Node* a) {
return AddNode(machine()->ChangeInt32ToInt64(), a);
}
......
......@@ -517,6 +517,8 @@ void SimplifiedLoweringVerifier::VisitNode(Node* node,
CASE(TryTruncateFloat64ToInt64)
CASE(TryTruncateFloat32ToUint64)
CASE(TryTruncateFloat64ToUint64)
CASE(TryTruncateFloat64ToInt32)
CASE(TryTruncateFloat64ToUint32)
CASE(ChangeInt32ToFloat64)
CASE(BitcastWord32ToWord64)
CASE(ChangeInt64ToFloat64)
......
......@@ -1893,6 +1893,11 @@ class RepresentationSelector {
FeedbackSource const& feedback) {
switch (type.GetSequenceType()) {
case CTypeInfo::SequenceType::kScalar: {
// TODO(mslekova): Add clamp.
if (uint8_t(type.GetFlags()) &
uint8_t(CTypeInfo::Flags::kEnforceRangeBit)) {
return UseInfo::CheckedNumberAsFloat64(kIdentifyZeros, feedback);
}
switch (type.GetType()) {
case CTypeInfo::Type::kVoid:
UNREACHABLE();
......
......@@ -1831,6 +1831,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kTryTruncateFloat64ToInt64:
case IrOpcode::kTryTruncateFloat32ToUint64:
case IrOpcode::kTryTruncateFloat64ToUint64:
case IrOpcode::kTryTruncateFloat64ToInt32:
case IrOpcode::kTryTruncateFloat64ToUint32:
case IrOpcode::kFloat64ExtractLowWord32:
case IrOpcode::kFloat64ExtractHighWord32:
case IrOpcode::kFloat64InsertLowWord32:
......
This diff is collapsed.
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// These tests exercise WebIDL annotations support in the fast API.
// Flags: --turbo-fast-api-calls --expose-fast-api --allow-natives-syntax --turbofan
// TODO(mslekova): Implement support for TryTruncateFloat64ToInt32.
// Flags: --no-turboshaft
// --always-turbofan is disabled because we rely on particular feedback for
// optimizing to the fastest path.
// Flags: --no-always-turbofan
// The test relies on optimizing/deoptimizing at predictable moments, so
// it's not suitable for deoptimization fuzzing.
// Flags: --deopt-every-n-times=0
const fast_c_api = new d8.test.FastCAPI();
// ----------- add_all_annotate_enforce_range -----------
// `add_all_annotate_enforce_range` has the following signature:
// double add_all_annotate_enforce_range(bool /*should_fallback*/,
// int32_t, uint32_t,
// int64_t, uint64_t)
const limits_params = [
-(2 ** 31), // i32
2 ** 32 - 1, // u32
Number.MIN_SAFE_INTEGER, // i64
Number.MAX_SAFE_INTEGER // u64
];
const limits_result = limits_params[0] + limits_params[1] + limits_params[2] + limits_params[3];
function add_all_annotate_enforce_range(params, should_fallback = false) {
return fast_c_api.add_all_annotate_enforce_range(should_fallback,
params[0], params[1], params[2], params[3]);
}
%PrepareFunctionForOptimization(add_all_annotate_enforce_range);
assertEquals(limits_result, add_all_annotate_enforce_range(limits_params));
%OptimizeFunctionOnNextCall(add_all_annotate_enforce_range);
assertEquals(limits_result, add_all_annotate_enforce_range(limits_params));
// ----------- enforce_range_compare -----------
// `enforce_range_compare` has the following signature:
// double enforce_range_compare(bool /*should_fallback*/,
// double, int64_t)
// ----------- i32 -----------
function compare_i32(in_range, arg) {
return fast_c_api.enforce_range_compare_i32(in_range, arg, arg);
}
%PrepareFunctionForOptimization(compare_i32);
assertFalse(compare_i32(true, 123));
%OptimizeFunctionOnNextCall(compare_i32);
assertTrue(compare_i32(true, 123));
assertTrue(compare_i32(true, -0.5));
assertTrue(compare_i32(true, 0.5));
assertTrue(compare_i32(true, 1.5));
assertTrue(compare_i32(true, -(2 ** 31)));
assertTrue(compare_i32(true, 2 ** 31 - 1));
assertThrows(() => compare_i32(false, -(2 ** 32)));
assertThrows(() => compare_i32(false, -(2 ** 32 + 1)));
assertThrows(() => compare_i32(false, 2 ** 32));
assertThrows(() => compare_i32(false, 2 ** 32 + 3.15));
assertThrows(() => compare_i32(false, Number.MIN_SAFE_INTEGER));
assertThrows(() => compare_i32(false, Number.MAX_SAFE_INTEGER));
// ----------- u32 -----------
function compare_u32(in_range, arg) {
return fast_c_api.enforce_range_compare_u32(in_range, arg, arg);
}
%PrepareFunctionForOptimization(compare_u32);
assertFalse(compare_u32(true, 123));
%OptimizeFunctionOnNextCall(compare_u32);
assertTrue(compare_u32(true, 123));
assertTrue(compare_u32(true, 0));
assertTrue(compare_u32(true, -0.5));
assertTrue(compare_u32(true, 0.5));
assertTrue(compare_u32(true, 2 ** 32 - 1));
assertThrows(() => compare_u32(false, -(2 ** 31)));
assertThrows(() => compare_u32(false, 2 ** 32));
assertThrows(() => compare_u32(false, -1));
assertThrows(() => compare_u32(false, -1.5));
assertThrows(() => compare_u32(false, Number.MIN_SAFE_INTEGER));
assertThrows(() => compare_u32(false, Number.MAX_SAFE_INTEGER));
// ----------- i64 -----------
function compare_i64(in_range, arg) {
return fast_c_api.enforce_range_compare_i64(in_range, arg, arg);
}
%PrepareFunctionForOptimization(compare_i64);
assertFalse(compare_i64(true, 123));
%OptimizeFunctionOnNextCall(compare_i64);
assertTrue(compare_i64(true, 123));
assertTrue(compare_i64(true, -0.5));
assertTrue(compare_i64(true, 0.5));
assertTrue(compare_i64(true, 1.5));
assertTrue(compare_i64(true, -(2 ** 63)));
assertTrue(compare_i64(true, Number.MIN_SAFE_INTEGER));
assertTrue(compare_i64(true, Number.MAX_SAFE_INTEGER));
assertThrows(() => compare_i64(false, -(2 ** 64)));
assertThrows(() => compare_i64(false, -(2 ** 64 + 1)));
assertThrows(() => compare_i64(false, 2 ** 64));
assertThrows(() => compare_i64(false, 2 ** 64 + 3.15));
// ----------- u64 -----------
function compare_u64(in_range, arg) {
return fast_c_api.enforce_range_compare_u64(in_range, arg, arg);
}
%PrepareFunctionForOptimization(compare_u64);
assertFalse(compare_u64(true, 123));
%OptimizeFunctionOnNextCall(compare_u64);
assertTrue(compare_u64(true, 123));
assertTrue(compare_u64(true, 0));
assertTrue(compare_u64(true, -0.5));
assertTrue(compare_u64(true, 0.5));
assertTrue(compare_u64(true, 2 ** 32 - 1));
assertTrue(compare_u64(true, Number.MAX_SAFE_INTEGER));
assertThrows(() => compare_u64(false, 2 ** 64));
assertThrows(() => compare_u64(false, -1));
assertThrows(() => compare_u64(false, -1.5));
assertThrows(() => compare_u64(false, Number.MIN_SAFE_INTEGER));
assertThrows(() => compare_u64(false, 2 ** 64 + 3.15));
......@@ -1703,12 +1703,18 @@
##############################################################################
['arch != x64', {
# Tests that include types only supported on x64.
'compiler/fast-api-sequences-x64': [SKIP],
# Stack switching is only supported on x64.
'wasm/stack-switching': [SKIP],
}], # arch != x64
##############################################################################
['arch != x64 and arch != arm64', {
# Tests that include types only supported on x64/arm64.
'compiler/fast-api-sequences-x64': [SKIP],
'compiler/fast-api-annotations': [SKIP],
}], # arch != x64 and arch != arm64
##############################################################################
# Skip failing tests in google3
['variant == google3_noicu or variant == google3_icu', {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment