vendor/v8/tools/test.py in mustang-0.0.1 vs vendor/v8/tools/test.py in mustang-0.1.0
- old
+ new
@@ -338,10 +338,13 @@
self.mode = mode
def IsNegative(self):
return False
+ def TestsIsolates(self):
+ return False
+
def CompareTime(self, other):
return cmp(other.duration, self.duration)
def DidFail(self, output):
if output.failed is None:
@@ -369,12 +372,16 @@
pass
def AfterRun(self, result):
pass
+ def GetCustomFlags(self, mode):
+ return None
+
def Run(self):
self.BeforeRun()
+ result = "exception"
try:
result = self.RunCommand(self.GetCommand())
finally:
self.AfterRun(result)
return result
@@ -497,16 +504,24 @@
sys.stderr.write(str)
sys.stderr.write('\n')
def CheckedUnlink(name):
- try:
- os.unlink(name)
- except OSError, e:
- PrintError("os.unlink() " + str(e))
+ # On Windows, when run with -jN in parallel processes,
+ # OS often fails to unlink the temp file. Not sure why.
+ # Need to retry.
+ # Idea from https://bugs.webkit.org/attachment.cgi?id=75982&action=prettypatch
+ retry_count = 0
+ while retry_count < 30:
+ try:
+ os.unlink(name)
+ return
+ except OSError, e:
+ retry_count += 1;
+ time.sleep(retry_count * 0.1)
+ PrintError("os.unlink() " + str(e))
-
def Execute(args, context, timeout=None):
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
(process, exit_code, timed_out) = RunProcess(
context,
@@ -567,11 +582,13 @@
return self.name
# Use this to run several variants of the tests, e.g.:
# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
-VARIANT_FLAGS = [[], ['--stress-opt', '--always-opt'], ['--nocrankshaft']]
+VARIANT_FLAGS = [[],
+ ['--stress-opt', '--always-opt'],
+ ['--nocrankshaft']]
class TestRepository(TestSuite):
def __init__(self, path):
@@ -598,11 +615,11 @@
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
def AddTestsToList(self, result, current_path, path, context, mode):
for v in VARIANT_FLAGS:
- tests = self.GetConfiguration(context).ListTests(current_path, path, mode)
+ tests = self.GetConfiguration(context).ListTests(current_path, path, mode, v)
for t in tests: t.variant_flags = v
result += tests
def GetTestStatus(self, context, sections, defs):
@@ -621,11 +638,11 @@
for test in self.tests:
if not name or name.match(test.GetName()):
result += test.GetBuildRequirements(rest, context)
return result
- def ListTests(self, current_path, path, context, mode):
+ def ListTests(self, current_path, path, context, mode, variant_flags):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
test_name = test.GetName()
if not name or name.match(test_name):
@@ -669,11 +686,14 @@
def GetVmCommand(self, testcase, mode):
return [self.GetVm(mode)] + self.GetVmFlags(testcase, mode)
def GetVmFlags(self, testcase, mode):
- return testcase.variant_flags + FLAGS[mode]
+ flags = testcase.GetCustomFlags(mode)
+ if flags is None:
+ flags = FLAGS[mode]
+ return testcase.variant_flags + flags
def GetTimeout(self, testcase, mode):
result = self.timeout * TIMEOUT_SCALEFACTOR[mode]
if '--stress-opt' in self.GetVmFlags(testcase, mode):
return result * 2
@@ -1005,11 +1025,14 @@
def __init__(self, case, outcomes):
self.case = case
self.outcomes = outcomes
+ def TestsIsolates(self):
+ return self.case.TestsIsolates()
+
class Configuration(object):
"""The parsed contents of a configuration file"""
def __init__(self, sections, defs):
self.sections = sections
@@ -1164,10 +1187,11 @@
result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--shell", help="Path to V8 shell", default="shell")
+ result.add_option("--isolates", help="Whether to test isolates", default=False, action="store_true")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
result.add_option("--no-store-unexpected-output",
help="Deletes the temporary JS files from tests that fails",
@@ -1293,11 +1317,11 @@
def ExpandCommand(args):
return prefix + args + suffix
return ExpandCommand
-BUILT_IN_TESTS = ['mjsunit', 'cctest', 'message']
+BUILT_IN_TESTS = ['mjsunit', 'cctest', 'message', 'preparser']
def GetSuites(test_root):
def IsSuite(path):
return isdir(path) and exists(join(path, 'testcfg.py'))
@@ -1386,21 +1410,18 @@
all_unused = [ ]
unclassified_tests = [ ]
globally_unused_rules = None
for path in paths:
for mode in options.mode:
- if not exists(context.GetVm(mode)):
- print "Can't find shell executable: '%s'" % context.GetVm(mode)
- continue
env = {
'mode': mode,
'system': utils.GuessOS(),
'arch': options.arch,
'simulator': options.simulator,
'crankshaft': options.crankshaft
}
- test_list = root.ListTests([], path, context, mode)
+ test_list = root.ListTests([], path, context, mode, [])
unclassified_tests += test_list
(cases, unused_rules, all_outcomes) = config.ClassifyTests(test_list, env)
if globally_unused_rules is None:
globally_unused_rules = set(unused_rules)
else:
@@ -1430,9 +1451,11 @@
result = None
def DoSkip(case):
return SKIP in case.outcomes or SLOW in case.outcomes
cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
+ if not options.isolates:
+ cases_to_run = [c for c in cases_to_run if not c.TestsIsolates()]
if len(cases_to_run) == 0:
print "No tests to run."
return 0
else:
try: