Commit 0a47bc63 authored by Georg Neis's avatar Georg Neis Committed by Commit Bot

[turbofan] Eliminate more non-const reference arguments

... mostly by turning them into pointer arguments.

After this CL, all remaining non-const reference arguments in
the compiler directory are in the backend.

Bug: v8:9429
Change-Id: I6a546da0fe93179e1a0b12296632591cbf209808
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1719185Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Commit-Queue: Georg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62930}
parent 0cedd864
......@@ -245,7 +245,7 @@ bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test,
int32_t constant_test;
Smi smi_test;
if (mode == INTPTR_PARAMETERS) {
if (ToInt32Constant(test, constant_test) && constant_test == 0) {
if (ToInt32Constant(test, &constant_test) && constant_test == 0) {
return true;
}
} else {
......@@ -262,7 +262,7 @@ bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue(Node* maybe_constant,
ParameterMode mode) {
int32_t int32_constant;
if (mode == INTPTR_PARAMETERS) {
if (ToInt32Constant(maybe_constant, int32_constant)) {
if (ToInt32Constant(maybe_constant, &int32_constant)) {
*value = int32_constant;
return true;
}
......@@ -542,7 +542,7 @@ TNode<Smi> CodeStubAssembler::SmiFromInt32(SloppyTNode<Int32T> value) {
TNode<BoolT> CodeStubAssembler::IsValidPositiveSmi(TNode<IntPtrT> value) {
intptr_t constant_value;
if (ToIntPtrConstant(value, constant_value)) {
if (ToIntPtrConstant(value, &constant_value)) {
return (static_cast<uintptr_t>(constant_value) <=
static_cast<uintptr_t>(Smi::kMaxValue))
? Int32TrueConstant()
......@@ -554,7 +554,7 @@ TNode<BoolT> CodeStubAssembler::IsValidPositiveSmi(TNode<IntPtrT> value) {
TNode<Smi> CodeStubAssembler::SmiTag(SloppyTNode<IntPtrT> value) {
int32_t constant_value;
if (ToInt32Constant(value, constant_value) && Smi::IsValid(constant_value)) {
if (ToInt32Constant(value, &constant_value) && Smi::IsValid(constant_value)) {
return SmiConstant(constant_value);
}
TNode<Smi> smi =
......@@ -564,7 +564,7 @@ TNode<Smi> CodeStubAssembler::SmiTag(SloppyTNode<IntPtrT> value) {
TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
intptr_t constant_value;
if (ToIntPtrConstant(value, constant_value)) {
if (ToIntPtrConstant(value, &constant_value)) {
return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize));
}
return Signed(
......@@ -1065,7 +1065,7 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
intptr_t size_in_bytes_constant;
bool size_in_bytes_is_constant = false;
if (ToIntPtrConstant(size_in_bytes, size_in_bytes_constant)) {
if (ToIntPtrConstant(size_in_bytes, &size_in_bytes_constant)) {
size_in_bytes_is_constant = true;
CHECK(Internals::IsValidSmi(size_in_bytes_constant));
CHECK_GT(size_in_bytes_constant, 0);
......@@ -1224,7 +1224,7 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
!new_space || !allow_large_objects || FLAG_young_generation_large_objects;
if (!allow_large_objects) {
intptr_t size_constant;
if (ToIntPtrConstant(size_in_bytes, size_constant)) {
if (ToIntPtrConstant(size_in_bytes, &size_constant)) {
CHECK_LE(size_constant, kMaxRegularHeapObjectSize);
} else {
CSA_ASSERT(this, IsRegularHeapObjectSize(size_in_bytes));
......@@ -2717,7 +2717,7 @@ void CodeStubAssembler::StoreObjectField(Node* object, int offset,
void CodeStubAssembler::StoreObjectField(Node* object, Node* offset,
Node* value) {
int const_offset;
if (ToInt32Constant(offset, const_offset)) {
if (ToInt32Constant(offset, &const_offset)) {
StoreObjectField(object, const_offset, value);
} else {
Store(object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), value);
......@@ -2745,7 +2745,7 @@ void CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
Node* object, SloppyTNode<IntPtrT> offset, Node* value,
MachineRepresentation rep) {
int const_offset;
if (ToInt32Constant(offset, const_offset)) {
if (ToInt32Constant(offset, &const_offset)) {
return StoreObjectFieldNoWriteBarrier(object, const_offset, value, rep);
}
StoreNoWriteBarrier(rep, object,
......@@ -5237,8 +5237,8 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
int to_index_constant = 0, from_index_constant = 0;
bool index_same = (from_encoding == to_encoding) &&
(from_index == to_index ||
(ToInt32Constant(from_index, from_index_constant) &&
ToInt32Constant(to_index, to_index_constant) &&
(ToInt32Constant(from_index, &from_index_constant) &&
ToInt32Constant(to_index, &to_index_constant) &&
from_index_constant == to_index_constant));
BuildFastLoop(
vars, from_offset, limit_offset,
......@@ -8464,8 +8464,8 @@ TNode<IntPtrT> CodeStubAssembler::IntPtrMax(SloppyTNode<IntPtrT> left,
SloppyTNode<IntPtrT> right) {
intptr_t left_constant;
intptr_t right_constant;
if (ToIntPtrConstant(left, left_constant) &&
ToIntPtrConstant(right, right_constant)) {
if (ToIntPtrConstant(left, &left_constant) &&
ToIntPtrConstant(right, &right_constant)) {
return IntPtrConstant(std::max(left_constant, right_constant));
}
return SelectConstant<IntPtrT>(IntPtrGreaterThanOrEqual(left, right), left,
......@@ -8476,8 +8476,8 @@ TNode<IntPtrT> CodeStubAssembler::IntPtrMin(SloppyTNode<IntPtrT> left,
SloppyTNode<IntPtrT> right) {
intptr_t left_constant;
intptr_t right_constant;
if (ToIntPtrConstant(left, left_constant) &&
ToIntPtrConstant(right, right_constant)) {
if (ToIntPtrConstant(left, &left_constant) &&
ToIntPtrConstant(right, &right_constant)) {
return IntPtrConstant(std::min(left_constant, right_constant));
}
return SelectConstant<IntPtrT>(IntPtrLessThanOrEqual(left, right), left,
......@@ -10080,7 +10080,7 @@ TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
index_node = BitcastTaggedSignedToWord(index_node);
} else {
DCHECK(mode == INTPTR_PARAMETERS);
constant_index = ToIntPtrConstant(index_node, index);
constant_index = ToIntPtrConstant(index_node, &index);
}
if (constant_index) {
return IntPtrConstant(base_size + element_size * index);
......@@ -11102,7 +11102,7 @@ Node* CodeStubAssembler::BuildFastLoop(
// loop actually iterates.
Node* first_check = WordEqual(var.value(), end_index);
int32_t first_check_val;
if (ToInt32Constant(first_check, first_check_val)) {
if (ToInt32Constant(first_check, &first_check_val)) {
if (first_check_val) return var.value();
Goto(&loop);
} else {
......@@ -11135,9 +11135,9 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
CSA_SLOW_ASSERT(this, Word32Or(IsFixedArrayWithKind(fixed_array, kind),
IsPropertyArray(fixed_array)));
int32_t first_val;
bool constant_first = ToInt32Constant(first_element_inclusive, first_val);
bool constant_first = ToInt32Constant(first_element_inclusive, &first_val);
int32_t last_val;
bool constent_last = ToInt32Constant(last_element_exclusive, last_val);
bool constent_last = ToInt32Constant(last_element_exclusive, &last_val);
if (constant_first && constent_last) {
int delta = last_val - first_val;
DCHECK_GE(delta, 0);
......
......@@ -268,7 +268,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
} else {
DCHECK_EQ(mode, ParameterMode::INTPTR_PARAMETERS);
intptr_t constant;
if (ToIntPtrConstant(node, constant)) {
if (ToIntPtrConstant(node, &constant)) {
*out = constant;
return true;
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -853,15 +853,10 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return value ? Int32TrueConstant() : Int32FalseConstant();
}
// TODO(jkummerow): The style guide wants pointers for output parameters.
// https://google.github.io/styleguide/cppguide.html#Output_Parameters
bool ToInt32Constant(Node* node,
int32_t& out_value); // NOLINT(runtime/references)
bool ToInt64Constant(Node* node,
int64_t& out_value); // NOLINT(runtime/references)
bool ToInt32Constant(Node* node, int32_t* out_value);
bool ToInt64Constant(Node* node, int64_t* out_value);
bool ToIntPtrConstant(Node* node, intptr_t* out_value);
bool ToSmiConstant(Node* node, Smi* out_value);
bool ToIntPtrConstant(Node* node,
intptr_t& out_value); // NOLINT(runtime/references)
bool IsUndefinedConstant(TNode<Object> node);
bool IsNullConstant(TNode<Object> node);
......
......@@ -1133,10 +1133,10 @@ bool CanInlineArrayIteratingBuiltin(JSHeapBroker* broker,
return true;
}
bool CanInlineArrayResizingBuiltin(
JSHeapBroker* broker, MapHandles const& receiver_maps,
std::vector<ElementsKind>& kinds, // NOLINT(runtime/references)
bool builtin_is_push = false) {
bool CanInlineArrayResizingBuiltin(JSHeapBroker* broker,
MapHandles const& receiver_maps,
std::vector<ElementsKind>* kinds,
bool builtin_is_push = false) {
DCHECK_NE(0, receiver_maps.size());
for (auto receiver_map : receiver_maps) {
MapRef map(broker, receiver_map);
......@@ -1147,14 +1147,14 @@ bool CanInlineArrayResizingBuiltin(
return false;
}
ElementsKind current_kind = map.elements_kind();
auto kind_ptr = kinds.data();
auto kind_ptr = kinds->data();
size_t i;
for (i = 0; i < kinds.size(); i++, kind_ptr++) {
for (i = 0; i < kinds->size(); i++, kind_ptr++) {
if (UnionElementsKindUptoPackedness(kind_ptr, current_kind)) {
break;
}
}
if (i == kinds.size()) kinds.push_back(current_kind);
if (i == kinds->size()) kinds->push_back(current_kind);
}
return true;
}
......@@ -4497,7 +4497,7 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
MapHandles const& receiver_maps = inference.GetMaps();
std::vector<ElementsKind> kinds;
if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds, true)) {
if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds, true)) {
return inference.NoChange();
}
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
......@@ -4632,7 +4632,7 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
MapHandles const& receiver_maps = inference.GetMaps();
std::vector<ElementsKind> kinds;
if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds)) {
if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds)) {
return inference.NoChange();
}
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
......@@ -4770,7 +4770,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
MapHandles const& receiver_maps = inference.GetMaps();
std::vector<ElementsKind> kinds;
if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds)) {
if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds)) {
return inference.NoChange();
}
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
......
......@@ -332,7 +332,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
// - context : The context (as SSA value) bound by the call target.
// - feedback_vector : The target is guaranteed to use this feedback vector.
FeedbackVectorRef JSInliner::DetermineCallContext(Node* node,
Node*& context_out) {
Node** context_out) {
DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
HeapObjectMatcher match(node->InputAt(0));
......@@ -342,7 +342,7 @@ FeedbackVectorRef JSInliner::DetermineCallContext(Node* node,
CHECK(function.has_feedback_vector());
// The inlinee specializes to the context from the JSFunction object.
context_out = jsgraph()->Constant(function.context());
*context_out = jsgraph()->Constant(function.context());
return function.feedback_vector();
}
......@@ -354,7 +354,7 @@ FeedbackVectorRef JSInliner::DetermineCallContext(Node* node,
FeedbackCellRef cell(FeedbackCellRef(broker(), p.feedback_cell()));
// The inlinee uses the locally provided context at instantiation.
context_out = NodeProperties::GetContextInput(match.node());
*context_out = NodeProperties::GetContextInput(match.node());
return cell.value().AsFeedbackVector();
}
......@@ -425,7 +425,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
: ""));
// Determine the targets feedback vector and its context.
Node* context;
FeedbackVectorRef feedback_vector = DetermineCallContext(node, context);
FeedbackVectorRef feedback_vector = DetermineCallContext(node, &context);
if (FLAG_concurrent_inlining &&
!shared_info.value().IsSerializedForCompilation(feedback_vector)) {
......
......@@ -59,8 +59,7 @@ class JSInliner final : public AdvancedReducer {
SourcePositionTable* const source_positions_;
base::Optional<SharedFunctionInfoRef> DetermineCallTarget(Node* node);
FeedbackVectorRef DetermineCallContext(
Node* node, Node*& context_out); // NOLINT(runtime/references)
FeedbackVectorRef DetermineCallContext(Node* node, Node** context_out);
Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
int parameter_count, BailoutId bailout_id,
......
......@@ -549,8 +549,8 @@ JSTypeHintLowering::ReduceStoreKeyedOperation(const Operator* op, Node* obj,
return LoweringResult::NoChange();
}
Node* JSTypeHintLowering::TryBuildSoftDeopt(FeedbackNexus& nexus, Node* effect,
Node* control,
Node* JSTypeHintLowering::TryBuildSoftDeopt(FeedbackNexus const& nexus,
Node* effect, Node* control,
DeoptimizeReason reason) const {
if ((flags() & kBailoutOnUninitialized) && nexus.IsUninitialized()) {
Node* deoptimize = jsgraph()->graph()->NewNode(
......
......@@ -153,9 +153,8 @@ class JSTypeHintLowering {
private:
friend class JSSpeculativeBinopBuilder;
Node* TryBuildSoftDeopt(FeedbackNexus& nexus, // NOLINT(runtime/references)
Node* effect, Node* control,
DeoptimizeReason reson) const;
Node* TryBuildSoftDeopt(FeedbackNexus const& nexus, Node* effect,
Node* control, DeoptimizeReason reson) const;
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
......
......@@ -820,7 +820,7 @@ class SpecialRPONumberer : public ZoneObject {
if (num_loops > static_cast<int>(loops_.size())) {
// Otherwise, compute the loop information from the backedges in order
// to perform a traversal that groups loop bodies together.
ComputeLoopInfo(stack_, num_loops, &backedges_);
ComputeLoopInfo(&stack_, num_loops, &backedges_);
// Initialize the "loop stack". Note the entry could be a loop header.
LoopInfo* loop =
......@@ -962,9 +962,8 @@ class SpecialRPONumberer : public ZoneObject {
}
// Computes loop membership from the backedges of the control flow graph.
void ComputeLoopInfo(
ZoneVector<SpecialRPOStackFrame>& queue, // NOLINT(runtime/references)
size_t num_loops, ZoneVector<Backedge>* backedges) {
void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>* queue,
size_t num_loops, ZoneVector<Backedge>* backedges) {
// Extend existing loop membership vectors.
for (LoopInfo& loop : loops_) {
loop.members->Resize(static_cast<int>(schedule_->BasicBlockCount()),
......@@ -993,19 +992,19 @@ class SpecialRPONumberer : public ZoneObject {
if (!loops_[loop_num].members->Contains(member->id().ToInt())) {
loops_[loop_num].members->Add(member->id().ToInt());
}
queue[queue_length++].block = member;
(*queue)[queue_length++].block = member;
}
// Propagate loop membership backwards. All predecessors of M up to the
// loop header H are members of the loop too. O(|blocks between M and H|).
while (queue_length > 0) {
BasicBlock* block = queue[--queue_length].block;
BasicBlock* block = (*queue)[--queue_length].block;
for (size_t i = 0; i < block->PredecessorCount(); i++) {
BasicBlock* pred = block->PredecessorAt(i);
if (pred != header) {
if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
loops_[loop_num].members->Add(pred->id().ToInt());
queue[queue_length++].block = pred;
(*queue)[queue_length++].block = pred;
}
}
}
......
......@@ -357,9 +357,8 @@ class SerializerForBackgroundCompilation {
bool with_spread = false);
void ProcessApiCall(Handle<SharedFunctionInfo> target,
const HintsVector& arguments);
void ProcessReceiverMapForApiCall(
FunctionTemplateInfoRef& target, // NOLINT(runtime/references)
Handle<Map> receiver);
void ProcessReceiverMapForApiCall(FunctionTemplateInfoRef target,
Handle<Map> receiver);
void ProcessBuiltinCall(Handle<SharedFunctionInfo> target,
const HintsVector& arguments,
SpeculationMode speculation_mode);
......@@ -399,8 +398,8 @@ class SerializerForBackgroundCompilation {
void ProcessContextAccess(const Hints& context_hints, int slot, int depth,
ContextProcessingMode mode);
void ProcessImmutableLoad(ContextRef& context, // NOLINT(runtime/references)
int slot, ContextProcessingMode mode);
void ProcessImmutableLoad(ContextRef const& context, int slot,
ContextProcessingMode mode);
void ProcessLdaLookupGlobalSlot(interpreter::BytecodeArrayIterator* iterator);
void ProcessLdaLookupContextSlot(
interpreter::BytecodeArrayIterator* iterator);
......@@ -631,7 +630,7 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
// Appends the hints for the given register range to {dst} (in order).
void ExportRegisterHints(interpreter::Register first, size_t count,
HintsVector& dst); // NOLINT(runtime/references)
HintsVector* dst);
private:
friend std::ostream& operator<<(std::ostream& out, const Environment& env);
......@@ -1066,7 +1065,7 @@ void SerializerForBackgroundCompilation::VisitInvokeIntrinsic(
size_t reg_count = iterator->GetRegisterCountOperand(2);
CHECK_EQ(reg_count, 3);
HintsVector arguments(zone());
environment()->ExportRegisterHints(first_reg, reg_count, arguments);
environment()->ExportRegisterHints(first_reg, reg_count, &arguments);
Hints const& resolution_hints = arguments[1]; // The resolution object.
ProcessHintsForPromiseResolve(resolution_hints);
environment()->accumulator_hints().Clear();
......@@ -1107,7 +1106,7 @@ void SerializerForBackgroundCompilation::VisitPopContext(
}
void SerializerForBackgroundCompilation::ProcessImmutableLoad(
ContextRef& context_ref, int slot, ContextProcessingMode mode) {
ContextRef const& context_ref, int slot, ContextProcessingMode mode) {
DCHECK(mode == kSerializeSlot || mode == kSerializeSlotAndAddToAccumulator);
base::Optional<ObjectRef> slot_value = context_ref.get(slot, true);
......@@ -1555,7 +1554,7 @@ void SerializerForBackgroundCompilation::ProcessCallVarArgs(
receiver.AddConstant(broker()->isolate()->factory()->undefined_value());
arguments.push_back(receiver);
}
environment()->ExportRegisterHints(first_reg, reg_count, arguments);
environment()->ExportRegisterHints(first_reg, reg_count, &arguments);
ProcessCallOrConstruct(callee, base::nullopt, arguments, slot);
}
......@@ -1601,7 +1600,7 @@ void SerializerForBackgroundCompilation::ProcessApiCall(
}
void SerializerForBackgroundCompilation::ProcessReceiverMapForApiCall(
FunctionTemplateInfoRef& target, Handle<Map> receiver) {
FunctionTemplateInfoRef target, Handle<Map> receiver) {
if (receiver->is_access_check_needed()) {
return;
}
......@@ -1871,10 +1870,10 @@ void SerializerForBackgroundCompilation::VisitSwitchOnGeneratorState(
}
void SerializerForBackgroundCompilation::Environment::ExportRegisterHints(
interpreter::Register first, size_t count, HintsVector& dst) {
interpreter::Register first, size_t count, HintsVector* dst) {
const int reg_base = first.index();
for (int i = 0; i < static_cast<int>(count); ++i) {
dst.push_back(register_hints(interpreter::Register(reg_base + i)));
dst->push_back(register_hints(interpreter::Register(reg_base + i)));
}
}
......@@ -1888,7 +1887,7 @@ void SerializerForBackgroundCompilation::VisitConstruct(
const Hints& new_target = environment()->accumulator_hints();
HintsVector arguments(zone());
environment()->ExportRegisterHints(first_reg, reg_count, arguments);
environment()->ExportRegisterHints(first_reg, reg_count, &arguments);
ProcessCallOrConstruct(callee, new_target, arguments, slot);
}
......@@ -1903,7 +1902,7 @@ void SerializerForBackgroundCompilation::VisitConstructWithSpread(
const Hints& new_target = environment()->accumulator_hints();
HintsVector arguments(zone());
environment()->ExportRegisterHints(first_reg, reg_count, arguments);
environment()->ExportRegisterHints(first_reg, reg_count, &arguments);
ProcessCallOrConstruct(callee, new_target, arguments, slot, true);
}
......
......@@ -24,7 +24,7 @@ using Variable = CodeAssemblerVariable;
Node* SmiTag(CodeAssembler& m, // NOLINT(runtime/references)
Node* value) {
int32_t constant_value;
if (m.ToInt32Constant(value, constant_value) &&
if (m.ToInt32Constant(value, &constant_value) &&
Smi::IsValid(constant_value)) {
return m.SmiConstant(Smi::FromInt(constant_value));
}
......@@ -374,24 +374,24 @@ TEST(TestToConstant) {
int32_t value32;
int64_t value64;
Node* a = m.Int32Constant(5);
CHECK(m.ToInt32Constant(a, value32));
CHECK(m.ToInt64Constant(a, value64));
CHECK(m.ToInt32Constant(a, &value32));
CHECK(m.ToInt64Constant(a, &value64));
a = m.Int64Constant(static_cast<int64_t>(1) << 32);
CHECK(!m.ToInt32Constant(a, value32));
CHECK(m.ToInt64Constant(a, value64));
CHECK(!m.ToInt32Constant(a, &value32));
CHECK(m.ToInt64Constant(a, &value64));
a = m.Int64Constant(13);
CHECK(m.ToInt32Constant(a, value32));
CHECK(m.ToInt64Constant(a, value64));
CHECK(m.ToInt32Constant(a, &value32));
CHECK(m.ToInt64Constant(a, &value64));
a = UndefinedConstant(m);
CHECK(!m.ToInt32Constant(a, value32));
CHECK(!m.ToInt64Constant(a, value64));
CHECK(!m.ToInt32Constant(a, &value32));
CHECK(!m.ToInt64Constant(a, &value64));
a = UndefinedConstant(m);
CHECK(!m.ToInt32Constant(a, value32));
CHECK(!m.ToInt64Constant(a, value64));
CHECK(!m.ToInt32Constant(a, &value32));
CHECK(!m.ToInt64Constant(a, &value64));
}
TEST(DeferredCodePhiHints) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment