Commit 33ffced5 authored by machenbach's avatar machenbach Committed by Commit bot

[coverage] Enable sanitizer coverage.

This adds sanitizer-coverage compilation, test-runner
features and post processing.

Sanitizer coverage is expected to be used together with
asan.

During test runner execution, the produced sancov files
are disambiguated and match the pattern:
<executable name>.test.<test id>.sancov.

Two additional scripts are added for merging raw sancov
files and for generating json data containing all
instrumented lines + all covered lines from merged sancov
files. Both scripts use multiprocessing for speed.

The json data will later be uploaded to google storage
for further use, e.g. to show coverage data in rietveld.

Sancov documentation:
http://clang.llvm.org/docs/SanitizerCoverage.html

BUG=chromium:568949
LOG=n
NOTRY=true
TEST=python -m unittest sancov_formatter_test
TEST=python -m unittest sancov_merger_test

Review URL: https://codereview.chromium.org/1737263003

Cr-Commit-Position: refs/heads/master@{#34578}
parent cabe6844
...@@ -11,18 +11,26 @@ import subprocess ...@@ -11,18 +11,26 @@ import subprocess
import sys import sys
exclusions = [ exclusions = [
'buildtools',
'src/third_party', 'src/third_party',
'third_party', 'third_party',
'test', 'test',
'testing', 'testing',
] ]
def remove_if_exists(string_list, item):
if item in string_list:
string_list.remove(item)
args = sys.argv[1:] args = sys.argv[1:]
text = ' '.join(sys.argv[2:]) text = ' '.join(sys.argv[2:])
for exclusion in exclusions: for exclusion in exclusions:
if re.search(r'\-o obj/%s[^ ]*\.o' % exclusion, text): if re.search(r'\-o obj/%s[^ ]*\.o' % exclusion, text):
args.remove('-fprofile-arcs') remove_if_exists(args, '-fprofile-arcs')
args.remove('-ftest-coverage') remove_if_exists(args, '-ftest-coverage')
remove_if_exists(args, '-fsanitize-coverage=func')
remove_if_exists(args, '-fsanitize-coverage=bb')
remove_if_exists(args, '-fsanitize-coverage=edge')
break break
sys.exit(subprocess.check_call(args)) sys.exit(subprocess.check_call(args))
...@@ -68,7 +68,9 @@ ...@@ -68,7 +68,9 @@
'target_arch%': '<(host_arch)', 'target_arch%': '<(host_arch)',
'base_dir%': '<!(cd <(DEPTH) && python -c "import os; print os.getcwd()")', 'base_dir%': '<!(cd <(DEPTH) && python -c "import os; print os.getcwd()")',
# Instrument for code coverage with gcov. # Instrument for code coverage and use coverage wrapper to exclude some
# files. Uses gcov if clang=0 is set explicitly. Otherwise,
# sanitizer_coverage must be set too.
'coverage%': 0, 'coverage%': 0,
}, },
'base_dir%': '<(base_dir)', 'base_dir%': '<(base_dir)',
...@@ -122,8 +124,7 @@ ...@@ -122,8 +124,7 @@
}, { }, {
'gomadir': '<!(/bin/echo -n ${HOME}/goma)', 'gomadir': '<!(/bin/echo -n ${HOME}/goma)',
}], }],
['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le" and host_arch!="s390" and host_arch!="s390x" and \ ['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le" and host_arch!="s390" and host_arch!="s390x"', {
coverage==0', {
'host_clang%': 1, 'host_clang%': 1,
}, { }, {
'host_clang%': 0, 'host_clang%': 0,
...@@ -228,7 +229,7 @@ ...@@ -228,7 +229,7 @@
'v8_enable_gdbjit%': 0, 'v8_enable_gdbjit%': 0,
}], }],
['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64") and \ ['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64") and \
(v8_target_arch!="x87" and v8_target_arch!="x32") and coverage==0', { (v8_target_arch!="x87" and v8_target_arch!="x32")', {
'clang%': 1, 'clang%': 1,
}, { }, {
'clang%': 0, 'clang%': 0,
...@@ -727,7 +728,7 @@ ...@@ -727,7 +728,7 @@
[ 'component=="shared_library"', { [ 'component=="shared_library"', {
'cflags': [ '-fPIC', ], 'cflags': [ '-fPIC', ],
}], }],
[ 'coverage==1', { [ 'clang==0 and coverage==1', {
'cflags': [ '-fprofile-arcs', '-ftest-coverage'], 'cflags': [ '-fprofile-arcs', '-ftest-coverage'],
'ldflags': [ '-fprofile-arcs'], 'ldflags': [ '-fprofile-arcs'],
}], }],
......
...@@ -387,7 +387,8 @@ def Execute(arch, mode, args, options, suites, workspace): ...@@ -387,7 +387,8 @@ def Execute(arch, mode, args, options, suites, workspace):
0, # No use of a rerun-failing-tests maximum. 0, # No use of a rerun-failing-tests maximum.
False, # No predictable mode. False, # No predictable mode.
False, # No no_harness mode. False, # No no_harness mode.
False) # Don't use perf data. False, # Don't use perf data.
False) # Coverage not supported.
# Find available test suites and read test cases from them. # Find available test suites and read test cases from them.
variables = { variables = {
......
...@@ -208,6 +208,8 @@ def BuildOptions(): ...@@ -208,6 +208,8 @@ def BuildOptions():
result.add_option("--asan", result.add_option("--asan",
help="Regard test expectations for ASAN", help="Regard test expectations for ASAN",
default=False, action="store_true") default=False, action="store_true")
result.add_option("--sancov-dir",
help="Directory where to collect coverage data")
result.add_option("--cfi-vptr", result.add_option("--cfi-vptr",
help="Run tests with UBSAN cfi_vptr option.", help="Run tests with UBSAN cfi_vptr option.",
default=False, action="store_true") default=False, action="store_true")
...@@ -385,6 +387,14 @@ def SetupEnvironment(options): ...@@ -385,6 +387,14 @@ def SetupEnvironment(options):
if options.asan: if options.asan:
os.environ['ASAN_OPTIONS'] = symbolizer os.environ['ASAN_OPTIONS'] = symbolizer
if options.sancov_dir:
assert os.path.exists(options.sancov_dir)
os.environ['ASAN_OPTIONS'] = ":".join([
'coverage=1',
'coverage_dir=%s' % options.sancov_dir,
symbolizer,
])
if options.cfi_vptr: if options.cfi_vptr:
os.environ['UBSAN_OPTIONS'] = ":".join([ os.environ['UBSAN_OPTIONS'] = ":".join([
'print_stacktrace=1', 'print_stacktrace=1',
...@@ -688,7 +698,8 @@ def Execute(arch, mode, args, options, suites): ...@@ -688,7 +698,8 @@ def Execute(arch, mode, args, options, suites):
options.rerun_failures_max, options.rerun_failures_max,
options.predictable, options.predictable,
options.no_harness, options.no_harness,
use_perf_data=not options.swarming) use_perf_data=not options.swarming,
sancov_dir=options.sancov_dir)
# TODO(all): Combine "simulator" and "simulator_run". # TODO(all): Combine "simulator" and "simulator_run".
simulator_run = not options.dont_skip_simulator_slow_tests and \ simulator_run = not options.dont_skip_simulator_slow_tests and \
......
This diff is collapsed.
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Requires python-coverage. Native python coverage version >= 3.7.1 should
# be installed to get the best speed.
import copy
import coverage
import logging
import os
import sys
import unittest
# Directory of this file.
LOCATION = os.path.dirname(os.path.abspath(__file__))
# V8 checkout directory.
BASE_DIR = os.path.dirname(os.path.dirname(LOCATION))
# Executable location.
BUILD_DIR = os.path.join(BASE_DIR, 'out', 'Release')
def abs_line(line):
"""Absolute paths as output by the llvm symbolizer."""
return '%s/%s' % (BUILD_DIR, line)
#------------------------------------------------------------------------------
# Data for test_process_symbolizer_output. This simulates output from the
# llvm symbolizer. The paths are not normlized.
SYMBOLIZER_OUTPUT = (
abs_line('../../src/foo.cc:87:7\n') +
abs_line('../../src/foo.cc:92:0\n') + # Test sorting.
abs_line('../../src/baz/bar.h:1234567:0\n') + # Test large line numbers.
abs_line('../../src/foo.cc:92:0\n') + # Test duplicates.
abs_line('../../src/baz/bar.h:0:0\n') + # Test subdirs.
'/usr/include/cool_stuff.h:14:2\n' + # Test dropping absolute paths.
abs_line('../../src/foo.cc:87:10\n') + # Test dropping character indexes.
abs_line('../../third_party/icu.cc:0:0\n') + # Test dropping excluded dirs.
abs_line('../../src/baz/bar.h:11:0\n')
)
# The expected post-processed output maps relative file names to line numbers.
# The numbers are sorted and unique.
EXPECTED_PROCESSED_OUTPUT = {
'src/baz/bar.h': [0, 11, 1234567],
'src/foo.cc': [87, 92],
}
#------------------------------------------------------------------------------
# Data for test_merge_instrumented_line_results. A list of absolute paths to
# all executables.
EXE_LIST = [
'/path/to/d8',
'/path/to/cctest',
'/path/to/unittests',
]
# Post-processed llvm symbolizer output as returned by
# process_symbolizer_output. These are lists of this output for merging.
INSTRUMENTED_LINE_RESULTS = [
{
'src/baz/bar.h': [0, 3, 7],
'src/foo.cc': [11],
},
{
'src/baz/bar.h': [3, 7, 8],
'src/baz.cc': [2],
'src/foo.cc': [1, 92],
},
{
'src/baz.cc': [1],
'src/foo.cc': [92, 93],
},
]
# This shows initial instrumentation. No lines are covered, hence,
# the coverage mask is 0 for all lines. The line tuples remain sorted by
# line number and contain no duplicates.
EXPECTED_INSTRUMENTED_LINES_DATA = {
'version': 1,
'tests': ['cctest', 'd8', 'unittests'],
'files': {
'src/baz/bar.h': [[0, 0], [3, 0], [7, 0], [8, 0]],
'src/baz.cc': [[1, 0], [2, 0]],
'src/foo.cc': [[1, 0], [11, 0], [92, 0], [93, 0]],
},
}
#------------------------------------------------------------------------------
# Data for test_merge_covered_line_results. List of post-processed
# llvm-symbolizer output as a tuple including the executable name of each data
# set.
COVERED_LINE_RESULTS = [
({
'src/baz/bar.h': [3, 7],
'src/foo.cc': [11],
}, 'd8'),
({
'src/baz/bar.h': [3, 7],
'src/baz.cc': [2],
'src/foo.cc': [1],
}, 'cctest'),
({
'src/foo.cc': [92],
'src/baz.cc': [2],
}, 'unittests'),
]
# This shows initial instrumentation + coverage. The mask bits are:
# cctest: 1, d8: 2, unittests:4. So a line covered by cctest and unittests
# has a coverage mask of 0b101, e.g. line 2 in src/baz.cc.
EXPECTED_COVERED_LINES_DATA = {
'version': 1,
'tests': ['cctest', 'd8', 'unittests'],
'files': {
'src/baz/bar.h': [[0, 0b0], [3, 0b11], [7, 0b11], [8, 0b0]],
'src/baz.cc': [[1, 0b0], [2, 0b101]],
'src/foo.cc': [[1, 0b1], [11, 0b10], [92, 0b100], [93, 0b0]],
},
}
class FormatterTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
sys.path.append(LOCATION)
cls._cov = coverage.coverage(
include=([os.path.join(LOCATION, 'sancov_formatter.py')]))
cls._cov.start()
import sancov_formatter
global sancov_formatter
@classmethod
def tearDownClass(cls):
cls._cov.stop()
cls._cov.report()
def test_process_symbolizer_output(self):
result = sancov_formatter.process_symbolizer_output(SYMBOLIZER_OUTPUT)
self.assertEquals(EXPECTED_PROCESSED_OUTPUT, result)
def test_merge_instrumented_line_results(self):
result = sancov_formatter.merge_instrumented_line_results(
EXE_LIST, INSTRUMENTED_LINE_RESULTS)
self.assertEquals(EXPECTED_INSTRUMENTED_LINES_DATA, result)
def test_merge_covered_line_results(self):
data = copy.deepcopy(EXPECTED_INSTRUMENTED_LINES_DATA)
sancov_formatter.merge_covered_line_results(
data, COVERED_LINE_RESULTS)
self.assertEquals(EXPECTED_COVERED_LINES_DATA, data)
#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script for merging sancov files in parallel.
The sancov files are expected
to be located in one directory with the file-name pattern:
<executable name>.test.<id>.sancov
For each executable, this script writes a new file:
<executable name>.result.sancov
The sancov tool is expected to be in the llvm compiler-rt third-party
directory. It's not checked out by default and must be added as a custom deps:
'v8/third_party/llvm/projects/compiler-rt':
'https://chromium.googlesource.com/external/llvm.org/compiler-rt.git'
"""
import argparse
import logging
import math
import os
import re
import subprocess
import sys
from multiprocessing import Pool, cpu_count
logging.basicConfig(level=logging.INFO)
# V8 checkout directory.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
# The sancov tool location.
SANCOV_TOOL = os.path.join(
BASE_DIR, 'third_party', 'llvm', 'projects', 'compiler-rt',
'lib', 'sanitizer_common', 'scripts', 'sancov.py')
# Number of cpus.
CPUS = cpu_count()
# Regexp to find sancov file as output by the v8 test runner. Also grabs the
# executable name in group 1.
SANCOV_FILE_RE = re.compile(r'^(.*)\.test\.\d+\.sancov$')
def merge(args):
"""Merge several sancov files into one.
Called trough multiprocessing pool. The args are expected to unpack to:
keep: Option if source and intermediate sancov files should be kept.
coverage_dir: Folder where to find the sancov files.
executable: Name of the executable whose sancov files should be merged.
index: A number to be put into the intermediate result file name.
If None, this is a final result.
bucket: The list of sancov files to be merged.
Returns: A tuple with the executable name and the result file name.
"""
keep, coverage_dir, executable, index, bucket = args
process = subprocess.Popen(
[SANCOV_TOOL, 'merge'] + bucket,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=coverage_dir,
)
output, _ = process.communicate()
assert process.returncode == 0
if index is not None:
# This is an intermediate result, add the bucket index to the file name.
result_file_name = '%s.result.%d.sancov' % (executable, index)
else:
# This is the final result without bucket index.
result_file_name = '%s.result.sancov' % executable
with open(os.path.join(coverage_dir, result_file_name), "wb") as f:
f.write(output)
if not keep:
for f in bucket:
os.remove(os.path.join(coverage_dir, f))
return executable, result_file_name
def generate_inputs(keep, coverage_dir, file_map, cpus):
"""Generate inputs for multiprocessed merging.
Splits the sancov files into several buckets, so that each bucket can be
merged in a separate process. We have only few executables in total with
mostly lots of associated files. In the general case, with many executables
we might need to avoid splitting buckets of executables with few files.
Returns: List of args as expected by merge above.
"""
inputs = []
for executable, files in file_map.iteritems():
# What's the bucket size for distributing files for merging? E.g. with
# 2 cpus and 9 files we want bucket size 5.
n = max(2, int(math.ceil(len(files) / float(cpus))))
# Chop files into buckets.
buckets = [files[i:i+n] for i in xrange(0, len(files), n)]
# Inputs for multiprocessing. List of tuples containing:
# Keep-files option, base path, executable name, index of bucket,
# list of files.
inputs.extend([(keep, coverage_dir, executable, i, b)
for i, b in enumerate(buckets)])
return inputs
def merge_parallel(inputs):
"""Process several merge jobs in parallel."""
pool = Pool(CPUS)
try:
return pool.map(merge, inputs)
finally:
pool.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--coverage-dir', required=True,
help='Path to the sancov output files.')
parser.add_argument('--keep', default=False, action='store_true',
help='Keep sancov output files after merging.')
options = parser.parse_args()
# Check if folder with coverage output exists.
assert (os.path.exists(options.coverage_dir) and
os.path.isdir(options.coverage_dir))
# Map executable names to their respective sancov files.
file_map = {}
for f in os.listdir(options.coverage_dir):
match = SANCOV_FILE_RE.match(f)
if match:
file_map.setdefault(match.group(1), []).append(f)
inputs = generate_inputs(
options.keep, options.coverage_dir, file_map, CPUS)
logging.info('Executing %d merge jobs in parallel for %d executables.' %
(len(inputs), len(file_map)))
results = merge_parallel(inputs)
# Map executable names to intermediate bucket result files.
file_map = {}
for executable, f in results:
file_map.setdefault(executable, []).append(f)
# Merge the bucket results for each executable.
# The final result has index None, so no index will appear in the
# file name.
inputs = [(options.keep, options.coverage_dir, executable, None, files)
for executable, files in file_map.iteritems()]
logging.info('Merging %d intermediate results.' % len(inputs))
merge_parallel(inputs)
return 0
if __name__ == '__main__':
sys.exit(main())
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import sancov_merger
# Files on disk after test runner completes. The files are mapped by
# executable name -> file list.
FILE_MAP = {
'd8': [
'd8.test.1.sancov',
'd8.test.2.sancov',
'd8.test.3.sancov',
'd8.test.4.sancov',
'd8.test.5.sancov',
'd8.test.6.sancov',
'd8.test.7.sancov',
],
'cctest': [
'cctest.test.1.sancov',
'cctest.test.2.sancov',
'cctest.test.3.sancov',
'cctest.test.4.sancov',
],
}
# Inputs for merge process with 2 cpus. The tuples contain:
# (flag, path, executable name, intermediate result index, file list).
EXPECTED_INPUTS_2 = [
(False, '/some/path', 'cctest', 0, [
'cctest.test.1.sancov',
'cctest.test.2.sancov']),
(False, '/some/path', 'cctest', 1, [
'cctest.test.3.sancov',
'cctest.test.4.sancov']),
(False, '/some/path', 'd8', 0, [
'd8.test.1.sancov',
'd8.test.2.sancov',
'd8.test.3.sancov',
'd8.test.4.sancov']),
(False, '/some/path', 'd8', 1, [
'd8.test.5.sancov',
'd8.test.6.sancov',
'd8.test.7.sancov']),
]
# The same for 4 cpus.
EXPECTED_INPUTS_4 = [
(True, '/some/path', 'cctest', 0, [
'cctest.test.1.sancov',
'cctest.test.2.sancov']),
(True, '/some/path', 'cctest', 1, [
'cctest.test.3.sancov',
'cctest.test.4.sancov']),
(True, '/some/path', 'd8', 0, [
'd8.test.1.sancov',
'd8.test.2.sancov']),
(True, '/some/path', 'd8', 1, [
'd8.test.3.sancov',
'd8.test.4.sancov']),
(True, '/some/path', 'd8', 2, [
'd8.test.5.sancov',
'd8.test.6.sancov']),
(True, '/some/path', 'd8', 3, [
'd8.test.7.sancov'])]
class MergerTests(unittest.TestCase):
def test_generate_inputs_2_cpu(self):
inputs = sancov_merger.generate_inputs(
False, '/some/path', FILE_MAP, 2)
self.assertEquals(EXPECTED_INPUTS_2, inputs)
def test_generate_inputs_4_cpu(self):
inputs = sancov_merger.generate_inputs(
True, '/some/path', FILE_MAP, 4)
self.assertEquals(EXPECTED_INPUTS_4, inputs)
#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Corrects objdump output. The logic is from sancov.py, see comments there."""
import sys;
for line in sys.stdin:
print '0x%x' % (int(line.strip(), 16) + 4)
...@@ -107,14 +107,16 @@ def RunProcess(verbose, timeout, args, **rest): ...@@ -107,14 +107,16 @@ def RunProcess(verbose, timeout, args, **rest):
timer.start() timer.start()
stdout, stderr = process.communicate() stdout, stderr = process.communicate()
timer.cancel() timer.cancel()
return process.returncode, timeout_result[0], stdout, stderr
return output.Output(
process.returncode,
timeout_result[0],
stdout,
stderr,
process.pid,
)
def Execute(args, verbose=False, timeout=None): def Execute(args, verbose=False, timeout=None):
args = [ c for c in args if c != "" ] args = [ c for c in args if c != "" ]
exit_code, timed_out, stdout, stderr = RunProcess( return RunProcess(verbose, timeout, args=args)
verbose,
timeout,
args=args,
)
return output.Output(exit_code, timed_out, stdout, stderr)
...@@ -144,6 +144,26 @@ class TestJob(Job): ...@@ -144,6 +144,26 @@ class TestJob(Job):
def __init__(self, test): def __init__(self, test):
self.test = test self.test = test
def _rename_coverage_data(self, output, context):
"""Rename coverage data.
Rename files with PIDs to files with unique test IDs, because the number
of tests might be higher than pid_max. E.g.:
d8.1234.sancov -> d8.test.1.sancov, where 1234 was the process' PID
and 1 is the test ID.
"""
if context.sancov_dir:
sancov_file = os.path.join(
context.sancov_dir, "%s.%d.sancov" % (self.test.shell(), output.pid))
# Some tests are expected to fail and don't produce coverage data.
if os.path.exists(sancov_file):
parts = sancov_file.split(".")
new_sancov_file = ".".join(
parts[:-2] + ["test", str(self.test.id)] + parts[-1:])
assert not os.path.exists(new_sancov_file)
os.rename(sancov_file, new_sancov_file)
def Run(self, process_context): def Run(self, process_context):
try: try:
# Retrieve a new suite object on the worker-process side. The original # Retrieve a new suite object on the worker-process side. The original
...@@ -155,6 +175,7 @@ class TestJob(Job): ...@@ -155,6 +175,7 @@ class TestJob(Job):
start_time = time.time() start_time = time.time()
output = commands.Execute(instr.command, instr.verbose, instr.timeout) output = commands.Execute(instr.command, instr.verbose, instr.timeout)
self._rename_coverage_data(output, process_context.context)
return (instr.id, output, time.time() - start_time) return (instr.id, output, time.time() - start_time)
......
...@@ -30,7 +30,7 @@ class Context(): ...@@ -30,7 +30,7 @@ class Context():
def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout, def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
isolates, command_prefix, extra_flags, noi18n, random_seed, isolates, command_prefix, extra_flags, noi18n, random_seed,
no_sorting, rerun_failures_count, rerun_failures_max, no_sorting, rerun_failures_count, rerun_failures_max,
predictable, no_harness, use_perf_data): predictable, no_harness, use_perf_data, sancov_dir):
self.arch = arch self.arch = arch
self.mode = mode self.mode = mode
self.shell_dir = shell_dir self.shell_dir = shell_dir
...@@ -48,13 +48,14 @@ class Context(): ...@@ -48,13 +48,14 @@ class Context():
self.predictable = predictable self.predictable = predictable
self.no_harness = no_harness self.no_harness = no_harness
self.use_perf_data = use_perf_data self.use_perf_data = use_perf_data
self.sancov_dir = sancov_dir
def Pack(self): def Pack(self):
return [self.arch, self.mode, self.mode_flags, self.timeout, self.isolates, return [self.arch, self.mode, self.mode_flags, self.timeout, self.isolates,
self.command_prefix, self.extra_flags, self.noi18n, self.command_prefix, self.extra_flags, self.noi18n,
self.random_seed, self.no_sorting, self.rerun_failures_count, self.random_seed, self.no_sorting, self.rerun_failures_count,
self.rerun_failures_max, self.predictable, self.no_harness, self.rerun_failures_max, self.predictable, self.no_harness,
self.use_perf_data] self.use_perf_data, self.sancov_dir]
@staticmethod @staticmethod
def Unpack(packed): def Unpack(packed):
...@@ -62,4 +63,4 @@ class Context(): ...@@ -62,4 +63,4 @@ class Context():
return Context(packed[0], packed[1], None, packed[2], False, return Context(packed[0], packed[1], None, packed[2], False,
packed[3], packed[4], packed[5], packed[6], packed[7], packed[3], packed[4], packed[5], packed[6], packed[7],
packed[8], packed[9], packed[10], packed[11], packed[12], packed[8], packed[9], packed[10], packed[11], packed[12],
packed[13], packed[14]) packed[13], packed[14], packed[15])
...@@ -32,11 +32,12 @@ from ..local import utils ...@@ -32,11 +32,12 @@ from ..local import utils
class Output(object): class Output(object):
def __init__(self, exit_code, timed_out, stdout, stderr): def __init__(self, exit_code, timed_out, stdout, stderr, pid):
self.exit_code = exit_code self.exit_code = exit_code
self.timed_out = timed_out self.timed_out = timed_out
self.stdout = stdout self.stdout = stdout
self.stderr = stderr self.stderr = stderr
self.pid = pid
def HasCrashed(self): def HasCrashed(self):
if utils.IsWindows(): if utils.IsWindows():
...@@ -52,9 +53,9 @@ class Output(object): ...@@ -52,9 +53,9 @@ class Output(object):
return self.timed_out return self.timed_out
def Pack(self): def Pack(self):
return [self.exit_code, self.timed_out, self.stdout, self.stderr] return [self.exit_code, self.timed_out, self.stdout, self.stderr, self.pid]
@staticmethod @staticmethod
def Unpack(packed): def Unpack(packed):
# For the order of the fields, refer to Pack() above. # For the order of the fields, refer to Pack() above.
return Output(packed[0], packed[1], packed[2], packed[3]) return Output(packed[0], packed[1], packed[2], packed[3], packed[4])
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment