Commit 9e640b74 authored by lpy's avatar lpy Committed by Commit bot

[Tracing] Remove unnecessary memory allocation in runtime call stats.

Previously we didn't implement TRACE_STR_COPY when we write trace events to
file, which causes us to allocate a growing independent memory chunk for dumped
runtime call stats table. Since we now have a fully functional TRACE_STR_COPY,
this memory allocation can be avoided, this patch removes it.

BUG=v8:5089

Committed: https://crrev.com/e1997bb7d780d12e3a89078e8dd652dcf1d90039
Review-Url: https://codereview.chromium.org/2342643004
Cr-Original-Commit-Position: refs/heads/master@{#39462}
Cr-Commit-Position: refs/heads/master@{#39510}
parent a1784e87
...@@ -364,7 +364,7 @@ void RuntimeCallStats::Reset() { ...@@ -364,7 +364,7 @@ void RuntimeCallStats::Reset() {
in_use_ = true; in_use_ = true;
} }
const char* RuntimeCallStats::Dump() { std::string RuntimeCallStats::Dump() {
buffer_.str(std::string()); buffer_.str(std::string());
buffer_.clear(); buffer_.clear();
buffer_ << "{"; buffer_ << "{";
...@@ -393,15 +393,8 @@ const char* RuntimeCallStats::Dump() { ...@@ -393,15 +393,8 @@ const char* RuntimeCallStats::Dump() {
FOR_EACH_HANDLER_COUNTER(DUMP_COUNTER) FOR_EACH_HANDLER_COUNTER(DUMP_COUNTER)
#undef DUMP_COUNTER #undef DUMP_COUNTER
buffer_ << "\"END\":[]}"; buffer_ << "\"END\":[]}";
const std::string& buffer_str = buffer_.str();
size_t length = buffer_str.size();
if (length > len_) {
buffer_c_str_.reset(new char[length + 1]);
len_ = length;
}
strncpy(buffer_c_str_.get(), buffer_str.c_str(), length + 1);
in_use_ = false; in_use_ = false;
return buffer_c_str_.get(); return buffer_.str();
} }
} // namespace internal } // namespace internal
......
...@@ -791,7 +791,7 @@ class RuntimeCallStats { ...@@ -791,7 +791,7 @@ class RuntimeCallStats {
void Reset(); void Reset();
V8_NOINLINE void Print(std::ostream& os); V8_NOINLINE void Print(std::ostream& os);
V8_NOINLINE const char* Dump(); V8_NOINLINE std::string Dump();
RuntimeCallStats() { RuntimeCallStats() {
Reset(); Reset();
...@@ -803,8 +803,6 @@ class RuntimeCallStats { ...@@ -803,8 +803,6 @@ class RuntimeCallStats {
private: private:
std::stringstream buffer_; std::stringstream buffer_;
std::unique_ptr<char[]> buffer_c_str_;
size_t len_ = 0;
// Counter to track recursive time events. // Counter to track recursive time events.
RuntimeCallTimer* current_timer_ = NULL; RuntimeCallTimer* current_timer_ = NULL;
// Used to track nested tracing scopes. // Used to track nested tracing scopes.
......
...@@ -28,9 +28,10 @@ void CallStatsScopedTracer::AddEndTraceEvent() { ...@@ -28,9 +28,10 @@ void CallStatsScopedTracer::AddEndTraceEvent() {
TRACE_EVENT_PHASE_END, p_data_->category_group_enabled, p_data_->name, TRACE_EVENT_PHASE_END, p_data_->category_group_enabled, p_data_->name,
v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId, v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId,
v8::internal::tracing::kNoId, TRACE_EVENT_FLAG_NONE, v8::internal::tracing::kNoId, TRACE_EVENT_FLAG_NONE,
"runtime-call-stats", "runtime-call-stats", TRACE_STR_COPY(p_data_->isolate->counters()
TRACE_STR_COPY( ->runtime_call_stats()
p_data_->isolate->counters()->runtime_call_stats()->Dump())); ->Dump()
.c_str()));
} else { } else {
v8::internal::tracing::AddTraceEvent( v8::internal::tracing::AddTraceEvent(
TRACE_EVENT_PHASE_END, p_data_->category_group_enabled, p_data_->name, TRACE_EVENT_PHASE_END, p_data_->category_group_enabled, p_data_->name,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment