Commit 2f21f07e authored by Daniel Clifford's avatar Daniel Clifford Committed by Commit Bot

[torque] Detect overflow in indexed field allocation

Also cleanup variable class size calculation for 'new'
statements as well as spread-based indexed field
initialization in Torque rather than in hand-written
ImplementationVisitor code. This is done with new
%-intrinsics. With this change, %-intrinsics can also
have bodies, in which case they are treated as macros.

Bug: v8:7793
Change-Id: I012d57166761688747eb683cb618263e8d0953db
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1554695
Commit-Queue: Daniel Clifford <danno@chromium.org>
Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61585}
parent 59cbcb7f
......@@ -211,7 +211,36 @@ extern class DescriptorArray extends HeapObject {
// than building the definition from C++.
intrinsic %GetAllocationBaseSize<Class: type>(map: Map): intptr;
intrinsic %Allocate<Class: type>(size: intptr): Class;
intrinsic %AllocateInternalClass<Class: type>(slotCount: constexpr intptr): Class;
intrinsic %AllocateInternalClass<Class: type>(slotCount: constexpr intptr):
Class;
intrinsic %AddIndexedFieldSizeToObjectSize<T: type>(
baseSize: intptr, indexSize: T, fieldSize: int32): intptr {
const convertedIndexSize = Convert<int32>(indexSize);
const variableSize: int32 =
TryInt32Mul(convertedIndexSize, fieldSize) otherwise unreachable;
const convertedVariableSize = Convert<intptr>(variableSize);
return TryIntPtrAdd(baseSize, convertedVariableSize) otherwise unreachable;
}
intrinsic
%InitializeFieldsFromIterator<Container: type, Index: type, Iterator: type>(
c: Container, length: Index, i: Iterator) {
try {
let mutableIterator = i;
let current: Index = 0;
while (current < length) {
// TODO(danno): The indexed accessor on the container requires that the
// '[]=' operator be defined explicitly for the Container
// (e.g. FixedArray). We should change this to use slice references
// once they are implemented.
c[current++] = mutableIterator.Next() otherwise NoMore;
}
}
label NoMore deferred {
unreachable;
}
}
@abstract
@noVerifier
......@@ -1540,6 +1569,10 @@ macro Max(x: Number, y: Number): Number {
return NumberMax(x, y);
}
extern macro TryIntPtrAdd(intptr, intptr): intptr labels Overflow;
extern macro TryIntPtrSub(intptr, intptr): intptr labels Overflow;
extern macro TryInt32Mul(int32, int32): int32 labels Overflow;
extern operator '<<' macro ConstexprUintPtrShl(
constexpr uintptr, constexpr int31): constexpr uintptr;
extern operator '>>>' macro ConstexprUintPtrShr(
......@@ -1919,11 +1952,6 @@ extern macro LoadJSArrayElementsMap(constexpr ElementsKind, Context): Map;
extern macro LoadJSArrayElementsMap(ElementsKind, Context): Map;
extern macro ChangeNonnegativeNumberToUintPtr(Number): uintptr;
extern macro TryNumberToUintPtr(Number): uintptr labels IfNegative;
macro TryUintPtrToPositiveSmi(ui: uintptr): PositiveSmi labels IfOverflow {
if (ui > kSmiMaxValue) goto IfOverflow;
return %RawDownCast<PositiveSmi>(SmiTag(Signed(ui)));
}
extern macro NumberConstant(constexpr float64): Number;
extern macro NumberConstant(constexpr int32): Number;
extern macro NumberConstant(constexpr uint32): Number;
......@@ -2041,6 +2069,10 @@ macro Convert<To: type, From: type>(i: From): To {
return i;
}
macro Convert<To: type, From: type>(i: From): To labels Overflow {
return i;
}
extern macro ConvertElementsKindToInt(ElementsKind): int32;
Convert<int32, ElementsKind>(elementsKind: ElementsKind): int32 {
return ConvertElementsKindToInt(elementsKind);
......@@ -2096,6 +2128,20 @@ Convert<PositiveSmi, intptr>(i: intptr): PositiveSmi {
assert(IsValidPositiveSmi(i));
return %RawDownCast<PositiveSmi>(SmiTag(i));
}
Convert<PositiveSmi, uintptr>(ui: uintptr): PositiveSmi labels IfOverflow {
if (ui > kSmiMaxValue) deferred {
goto IfOverflow;
}
return %RawDownCast<PositiveSmi>(SmiTag(Signed(ui)));
}
Convert<PositiveSmi, intptr>(i: intptr): PositiveSmi labels IfOverflow {
if (IsValidPositiveSmi(i)) {
return %RawDownCast<PositiveSmi>(SmiTag(i));
} else
deferred {
goto IfOverflow;
}
}
Convert<int32, Smi>(s: Smi): int32 {
return SmiToInt32(s);
}
......@@ -2374,7 +2420,7 @@ macro GetReflectApply(implicit context: Context)(): Callable {
}
macro GetRegExpLastMatchInfo(implicit context: Context)(): RegExpMatchInfo {
return %RawDownCast<RegExpMatchInfo>(
LoadNativeContext(context)[REGEXP_LAST_MATCH_INFO_INDEX]);
LoadNativeContext(context)[REGEXP_LAST_MATCH_INFO_INDEX]);
}
extern transitioning macro Call(Context, Callable, Object): Object;
......@@ -2681,8 +2727,6 @@ extern macro AllocateSeqOneByteString(implicit context: Context)(uint32):
String;
extern macro AllocateSeqTwoByteString(implicit context: Context)(uint32):
String;
extern macro TryIntPtrAdd(intptr, intptr): intptr
labels IfOverflow;
extern macro ConvertToRelativeIndex(implicit context: Context)(
Object, intptr): intptr;
......
......@@ -19,7 +19,7 @@ namespace typed_array {
// Calculates the maximum number of elements supported by a specified number
// of bytes.
CalculateLength(byteLength: uintptr): PositiveSmi labels IfInvalid {
return TryUintPtrToPositiveSmi(byteLength >>> this.sizeLog2)
return Convert<PositiveSmi>(byteLength >>> this.sizeLog2)
otherwise IfInvalid;
}
......
......@@ -640,6 +640,23 @@ TNode<IntPtrT> CodeStubAssembler::TryIntPtrAdd(TNode<IntPtrT> a,
return Projection<0>(pair);
}
TNode<IntPtrT> CodeStubAssembler::TryIntPtrSub(TNode<IntPtrT> a,
TNode<IntPtrT> b,
Label* if_overflow) {
TNode<PairT<IntPtrT, BoolT>> pair = IntPtrSubWithOverflow(a, b);
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, if_overflow);
return Projection<0>(pair);
}
TNode<Int32T> CodeStubAssembler::TryInt32Mul(TNode<Int32T> a, TNode<Int32T> b,
Label* if_overflow) {
TNode<PairT<Int32T, BoolT>> pair = Int32MulWithOverflow(a, b);
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, if_overflow);
return Projection<0>(pair);
}
TNode<Smi> CodeStubAssembler::TrySmiAdd(TNode<Smi> lhs, TNode<Smi> rhs,
Label* if_overflow) {
if (SmiValuesAre32Bits()) {
......
......@@ -515,6 +515,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<IntPtrT> TryIntPtrAdd(TNode<IntPtrT> a, TNode<IntPtrT> b,
Label* if_overflow);
TNode<IntPtrT> TryIntPtrSub(TNode<IntPtrT> a, TNode<IntPtrT> b,
Label* if_overflow);
TNode<Int32T> TryInt32Mul(TNode<Int32T> a, TNode<Int32T> b,
Label* if_overflow);
TNode<Smi> TrySmiAdd(TNode<Smi> a, TNode<Smi> b, Label* if_overflow);
TNode<Smi> TrySmiSub(TNode<Smi> a, TNode<Smi> b, Label* if_overflow);
......
......@@ -311,6 +311,9 @@ class Macro : public Callable {
if (type->IsStructType()) return true;
}
}
// Intrinsics that are used internally in Torque and implemented as torque
// code should be inlined and not generate C++ definitions.
if (ReadableName()[0] == '%') return true;
return Callable::ShouldBeInlined();
}
......
......@@ -1340,65 +1340,17 @@ size_t ImplementationVisitor::InitializeAggregateHelper(
void ImplementationVisitor::InitializeFieldFromSpread(
VisitResult object, const Field& field,
const InitializerResults& initializer_results) {
StackScope stack_scope(this);
VisitResult zero(TypeOracle::GetConstInt31Type(), "0");
const Type* index_type = (*field.index)->name_and_type.type;
VisitResult index = GenerateImplicitConvert(index_type, zero);
Block* post_exit_block = assembler().NewBlock(assembler().CurrentStack());
Block* exit_block = assembler().NewBlock(assembler().CurrentStack());
Block* body_block = assembler().NewBlock(assembler().CurrentStack());
Block* fail_block = assembler().NewBlock(assembler().CurrentStack(), true);
Block* header_block = assembler().NewBlock(assembler().CurrentStack());
assembler().Goto(header_block);
assembler().Bind(header_block);
Arguments compare_arguments;
compare_arguments.parameters.push_back(index);
compare_arguments.parameters.push_back(initializer_results.field_value_map.at(
(*field.index)->name_and_type.name));
GenerateExpressionBranch(
[&]() { return GenerateCall("<", compare_arguments); }, body_block,
exit_block);
assembler().Bind(body_block);
{
VisitResult spreadee =
initializer_results.field_value_map.at(field.name_and_type.name);
LocationReference target = LocationReference::VariableAccess(spreadee);
Binding<LocalLabel> no_more{&LabelBindingsManager::Get(), "_Done",
LocalLabel{fail_block}};
// Call the Next() method of the iterator
Arguments next_arguments;
next_arguments.labels.push_back(&no_more);
Callable* callable = LookupMethod("Next", target, next_arguments, {});
VisitResult next_result =
GenerateCall(callable, target, next_arguments, {}, false);
Arguments assign_arguments;
assign_arguments.parameters.push_back(object);
assign_arguments.parameters.push_back(index);
assign_arguments.parameters.push_back(next_result);
GenerateCall("[]=", assign_arguments);
// Increment the indexed field index.
LocationReference index_ref = LocationReference::VariableAccess(index);
Arguments increment_arguments;
VisitResult one = {TypeOracle::GetConstInt31Type(), "1"};
increment_arguments.parameters = {index, one};
VisitResult assignment_value = GenerateCall("+", increment_arguments);
GenerateAssignToLocation(index_ref, assignment_value);
}
assembler().Goto(header_block);
assembler().Bind(fail_block);
assembler().Emit(AbortInstruction(AbortInstruction::Kind::kUnreachable));
assembler().Bind(exit_block);
assembler().Goto(post_exit_block);
NameAndType index = (*field.index)->name_and_type;
VisitResult iterator =
initializer_results.field_value_map.at(field.name_and_type.name);
VisitResult length = initializer_results.field_value_map.at(index.name);
assembler().Bind(post_exit_block);
Arguments assign_arguments;
assign_arguments.parameters.push_back(object);
assign_arguments.parameters.push_back(length);
assign_arguments.parameters.push_back(iterator);
GenerateCall("%InitializeFieldsFromIterator", assign_arguments,
{field.aggregate, index.type, iterator.type()});
}
void ImplementationVisitor::InitializeAggregate(
......@@ -1429,13 +1381,13 @@ VisitResult ImplementationVisitor::AddVariableObjectSize(
VisitResult(TypeOracle::GetConstInt31Type(), "kTaggedSize");
VisitResult initializer_value = initializer_results.field_value_map.at(
(*current_field->index)->name_and_type.name);
VisitResult index_intptr_size =
GenerateCall("Convert", {{initializer_value}, {}},
{TypeOracle::GetIntPtrType()}, false);
VisitResult variable_size = GenerateCall(
"*", {{index_intptr_size, index_field_size}, {}}, {}, false);
Arguments args;
args.parameters.push_back(object_size);
args.parameters.push_back(initializer_value);
args.parameters.push_back(index_field_size);
object_size =
GenerateCall("+", {{object_size, variable_size}, {}}, {}, false);
GenerateCall("%AddIndexedFieldSizeToObjectSize", args,
{(*current_field->index)->name_and_type.type}, false);
}
++current_field;
}
......
......@@ -476,13 +476,20 @@ base::Optional<ParseResult> MakeIntrinsicDeclaration(
auto args = child_results->NextAs<ParameterList>();
auto return_type = child_results->NextAs<TypeExpression*>();
IntrinsicDeclaration* macro =
MakeNode<IntrinsicDeclaration>(name, args, return_type);
auto body = child_results->NextAs<base::Optional<Statement*>>();
LabelAndTypesVector labels;
CallableNode* callable = nullptr;
if (body) {
callable = MakeNode<TorqueMacroDeclaration>(
false, name, base::Optional<std::string>{}, args, return_type, labels);
} else {
callable = MakeNode<IntrinsicDeclaration>(name, args, return_type);
}
Declaration* result;
if (generic_parameters.empty()) {
result = MakeNode<StandardDeclaration>(macro, base::nullopt);
result = MakeNode<StandardDeclaration>(callable, body);
} else {
result = MakeNode<GenericDeclaration>(macro, generic_parameters);
result = MakeNode<GenericDeclaration>(callable, generic_parameters, body);
}
return ParseResult{result};
}
......@@ -1823,7 +1830,7 @@ struct TorqueGrammar : Grammar {
AsSingletonVector<Declaration*, MakeTypeAliasDeclaration>()),
Rule({Token("intrinsic"), &intrinsicName,
TryOrDefault<GenericParameters>(&genericParameters),
&parameterListNoVararg, &optionalReturnType, Token(";")},
&parameterListNoVararg, &optionalReturnType, &optionalBody},
AsSingletonVector<Declaration*, MakeIntrinsicDeclaration>()),
Rule({Token("extern"), CheckIf(Token("transitioning")),
Optional<std::string>(
......
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
......@@ -14,6 +15,8 @@ import sys
import re
from subprocess import Popen, PIPE
kPercentEscape = r'α'; # Unicode alpha
def preprocess(input):
input = re.sub(r'(if\s+)constexpr(\s*\()', r'\1/*COxp*/\2', input)
input = re.sub(r'(\s+)operator\s*(\'[^\']+\')', r'\1/*_OPE \2*/', input)
......@@ -46,14 +49,15 @@ def preprocess(input):
r'\n otherwise', input)
input = re.sub(r'(\n\s*\S[^\n]*\s)otherwise',
r'\1_OtheSaLi', input)
# Special handing of '%' for intrinsics, turn the percent
# into a unicode character so that it gets treated as part of the
# intrinsic's name if it's already adjacent to it.
input = re.sub(r'%([A-Za-z])', kPercentEscape + r'\1', input)
return input
def postprocess(output):
output = re.sub(r'%\s*RawDownCast', r'%RawDownCast', output)
output = re.sub(r'%\s*RawConstexprCast', r'%RawConstexprCast', output)
output = re.sub(r'%\s*FromConstexpr', r'%FromConstexpr', output)
output = re.sub(r'%\s*Allocate', r'%Allocate', output)
output = re.sub(r'%\s*GetAllocationBaseSize', r'%GetAllocationBaseSize', output)
output = re.sub(r'\/\*COxp\*\/', r'constexpr', output)
output = re.sub(r'(\S+)\s*: type([,>])', r'\1: type\2', output)
output = re.sub(r'(\n\s*)labels( [A-Z])', r'\1 labels\2', output)
......@@ -87,6 +91,8 @@ def postprocess(output):
if old == output:
break;
output = re.sub(kPercentEscape, r'%', output)
return output
def process(filename, lint, should_format):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment