Commit e42fda5e authored by machenbach's avatar machenbach Committed by Commit bot

Add test flags feature to perf runner.

TBR=svenpanne@chromium.org
NOTRY=true

Review URL: https://codereview.chromium.org/722023006

Cr-Commit-Position: refs/heads/master@{#25559}
parent db818dbd
...@@ -15,6 +15,7 @@ The suite json format is expected to be: ...@@ -15,6 +15,7 @@ The suite json format is expected to be:
"archs": [<architecture name for which this suite is run>, ...], "archs": [<architecture name for which this suite is run>, ...],
"binary": <name of binary to run, default "d8">, "binary": <name of binary to run, default "d8">,
"flags": [<flag to d8>, ...], "flags": [<flag to d8>, ...],
"test_flags": [<flag to the test file>, ...],
"run_count": <how often will this suite run (optional)>, "run_count": <how often will this suite run (optional)>,
"run_count_XXX": <how often will this suite run for arch XXX (optional)>, "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
"resources": [<js file to be loaded before main>, ...] "resources": [<js file to be loaded before main>, ...]
...@@ -54,6 +55,7 @@ Full example (suite with one runner): ...@@ -54,6 +55,7 @@ Full example (suite with one runner):
{ {
"path": ["."], "path": ["."],
"flags": ["--expose-gc"], "flags": ["--expose-gc"],
"test_flags": ["5"],
"archs": ["ia32", "x64"], "archs": ["ia32", "x64"],
"run_count": 5, "run_count": 5,
"run_count_ia32": 3, "run_count_ia32": 3,
...@@ -89,6 +91,8 @@ Full example (suite with several runners): ...@@ -89,6 +91,8 @@ Full example (suite with several runners):
} }
Path pieces are concatenated. D8 is always run with the suite's path as cwd. Path pieces are concatenated. D8 is always run with the suite's path as cwd.
The test flags are passed to the js test file after '--'.
""" """
from collections import OrderedDict from collections import OrderedDict
...@@ -171,6 +175,7 @@ class DefaultSentinel(Node): ...@@ -171,6 +175,7 @@ class DefaultSentinel(Node):
self.path = [] self.path = []
self.graphs = [] self.graphs = []
self.flags = [] self.flags = []
self.test_flags = []
self.resources = [] self.resources = []
self.results_regexp = None self.results_regexp = None
self.stddev_regexp = None self.stddev_regexp = None
...@@ -190,12 +195,14 @@ class Graph(Node): ...@@ -190,12 +195,14 @@ class Graph(Node):
assert isinstance(suite.get("path", []), list) assert isinstance(suite.get("path", []), list)
assert isinstance(suite["name"], basestring) assert isinstance(suite["name"], basestring)
assert isinstance(suite.get("flags", []), list) assert isinstance(suite.get("flags", []), list)
assert isinstance(suite.get("test_flags", []), list)
assert isinstance(suite.get("resources", []), list) assert isinstance(suite.get("resources", []), list)
# Accumulated values. # Accumulated values.
self.path = parent.path[:] + suite.get("path", []) self.path = parent.path[:] + suite.get("path", [])
self.graphs = parent.graphs[:] + [suite["name"]] self.graphs = parent.graphs[:] + [suite["name"]]
self.flags = parent.flags[:] + suite.get("flags", []) self.flags = parent.flags[:] + suite.get("flags", [])
self.test_flags = parent.test_flags[:] + suite.get("test_flags", [])
self.resources = parent.resources[:] + suite.get("resources", []) self.resources = parent.resources[:] + suite.get("resources", [])
# Descrete values (with parent defaults). # Descrete values (with parent defaults).
...@@ -282,11 +289,13 @@ class Runnable(Graph): ...@@ -282,11 +289,13 @@ class Runnable(Graph):
def GetCommand(self, shell_dir): def GetCommand(self, shell_dir):
# TODO(machenbach): This requires +.exe if run on windows. # TODO(machenbach): This requires +.exe if run on windows.
suffix = ["--"] + self.test_flags if self.test_flags else []
return ( return (
[os.path.join(shell_dir, self.binary)] + [os.path.join(shell_dir, self.binary)] +
self.flags + self.flags +
self.resources + self.resources +
[self.main] [self.main] +
suffix
) )
def Run(self, runner): def Run(self, runner):
......
...@@ -179,6 +179,20 @@ class PerfTest(unittest.TestCase): ...@@ -179,6 +179,20 @@ class PerfTest(unittest.TestCase):
self._VerifyErrors([]) self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
def testOneRunWithTestFlags(self):
test_input = dict(V8_JSON)
test_input["test_flags"] = ["2", "test_name"]
self._WriteTestInput(test_input)
self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567"])
self.assertEquals(0, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js",
"--", "2", "test_name")
def testTwoRuns_Units_SuiteName(self): def testTwoRuns_Units_SuiteName(self):
test_input = dict(V8_JSON) test_input = dict(V8_JSON)
test_input["run_count"] = 2 test_input["run_count"] = 2
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment