diff --git a/.gitignore b/.gitignore index 74371f8daf1e5a..b48badadf8c4bf 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,6 @@ *~ *# \#* -_virtualenv \ No newline at end of file +_virtualenv +test/test.cfg +test/metadata/MANIFEST.json diff --git a/test/metadata/reftest/reftest_and_fail.html.ini b/test/metadata/reftest/reftest_and_fail.html.ini new file mode 100644 index 00000000000000..81aef049cd122f --- /dev/null +++ b/test/metadata/reftest/reftest_and_fail.html.ini @@ -0,0 +1,3 @@ +[reftest_and_fail.html] + type: reftest + expected: FAIL diff --git a/test/metadata/reftest/reftest_cycle_fail.html.ini b/test/metadata/reftest/reftest_cycle_fail.html.ini new file mode 100644 index 00000000000000..472b33f7764bde --- /dev/null +++ b/test/metadata/reftest/reftest_cycle_fail.html.ini @@ -0,0 +1,3 @@ +[reftest_cycle_fail.html] + type: reftest + expected: FAIL diff --git a/test/metadata/reftest/reftest_match_fail.html.ini b/test/metadata/reftest/reftest_match_fail.html.ini new file mode 100644 index 00000000000000..f3dc3362fac41b --- /dev/null +++ b/test/metadata/reftest/reftest_match_fail.html.ini @@ -0,0 +1,3 @@ +[reftest_match_fail.html] + type: reftest + expected: FAIL diff --git a/test/metadata/reftest/reftest_mismatch_fail.html.ini b/test/metadata/reftest/reftest_mismatch_fail.html.ini new file mode 100644 index 00000000000000..1055337e2d65cb --- /dev/null +++ b/test/metadata/reftest/reftest_mismatch_fail.html.ini @@ -0,0 +1,3 @@ +[reftest_mismatch_fail.html] + type: reftest + expected: FAIL diff --git a/test/metadata/reftest/reftest_ref_timeout.html.ini b/test/metadata/reftest/reftest_ref_timeout.html.ini new file mode 100644 index 00000000000000..8936241ad29cd1 --- /dev/null +++ b/test/metadata/reftest/reftest_ref_timeout.html.ini @@ -0,0 +1,3 @@ +[reftest_ref_timeout.html] + type: reftest + expected: TIMEOUT diff --git a/test/metadata/reftest/reftest_timeout.html.ini b/test/metadata/reftest/reftest_timeout.html.ini new file mode 100644 index 00000000000000..0d1b9bade95d7b --- /dev/null +++ b/test/metadata/reftest/reftest_timeout.html.ini @@ -0,0 +1,3 @@ +[reftest_timeout.html] + type: reftest + expected: TIMEOUT diff --git a/test/metadata/testharness/testharness_0.html.ini b/test/metadata/testharness/testharness_0.html.ini new file mode 100644 index 00000000000000..90b9a6e9f012b5 --- /dev/null +++ b/test/metadata/testharness/testharness_0.html.ini @@ -0,0 +1,4 @@ +[testharness_0.html] + type: testharness + [Test that should fail] + expected: FAIL diff --git a/test/metadata/testharness/testharness_error.html.ini b/test/metadata/testharness/testharness_error.html.ini new file mode 100644 index 00000000000000..fa53e0733abcb1 --- /dev/null +++ b/test/metadata/testharness/testharness_error.html.ini @@ -0,0 +1,3 @@ +[testharness_error.html] + type: testharness + expected: ERROR diff --git a/test/metadata/testharness/testharness_timeout.html.ini b/test/metadata/testharness/testharness_timeout.html.ini new file mode 100644 index 00000000000000..55eca5191ab1fa --- /dev/null +++ b/test/metadata/testharness/testharness_timeout.html.ini @@ -0,0 +1,3 @@ +[testharness_timeout.html] + type: testharness + expected: TIMEOUT diff --git a/test/test.cfg.example b/test/test.cfg.example new file mode 100644 index 00000000000000..6a4057e2200884 --- /dev/null +++ b/test/test.cfg.example @@ -0,0 +1,16 @@ +[general] +tests=/path/to/web-platform-tests/ +metadata=/path/to/web-platform-tests/ +ssl-type=none + +# [firefox] +# binary=/path/to/firefox +# prefs-root=/path/to/gecko-src/testing/profiles/ + +# [servo] +# binary=/path/to/servo-src/components/servo/target/servo +# exclude=testharness # Because it needs a special testharness.js + +# [chrome] +# binary=/path/to/chrome +# webdriver-binary=/path/to/chromedriver \ No newline at end of file diff --git a/test/test.py b/test/test.py new file mode 100644 index 00000000000000..b6412caf5e7e0d --- /dev/null +++ b/test/test.py @@ -0,0 +1,161 @@ +import ConfigParser +import argparse +import json +import os +import sys +import tempfile +import threading +import time +from StringIO import StringIO + +from mozlog.structured import structuredlog, reader +from mozlog.structured.handlers import BaseHandler, StreamHandler, StatusHandler +from mozlog.structured.formatters import MachFormatter +from wptrunner import wptcommandline, wptrunner + +here = os.path.abspath(os.path.dirname(__file__)) + +def setup_wptrunner_logging(logger): + structuredlog.set_default_logger(logger) + wptrunner.logger = logger + wptrunner.setup_stdlib_logger() + +class ResultHandler(BaseHandler): + def __init__(self, verbose=False, logger=None): + self.inner = StreamHandler(sys.stdout, MachFormatter()) + BaseHandler.__init__(self, self.inner) + self.product = None + self.verbose = verbose + self.logger = logger + + self.register_message_handlers("wptrunner-test", {"set-product": self.set_product}) + + def set_product(self, product): + self.product = product + + def __call__(self, data): + if self.product is not None and data["action"] in ["suite_start", "suite_end"]: + # Hack: mozlog sets some internal state to prevent multiple suite_start or + # suite_end messages. We actually want that here (one from the metaharness + # and one from the individual test type harness), so override that internal + # state (a better solution might be to not share loggers, but this works well + # enough) + self.logger._state.suite_started = True + return + + if (not self.verbose and + (data["action"] == "process_output" or + data["action"] == "log" and data["level"] not in ["error", "critical"])): + return + + if "test" in data: + data = data.copy() + data["test"] = "%s: %s" % (self.product, data["test"]) + + return self.inner(data) + +def test_settings(): + return { + "include": "_test", + "manifest-update": "", + "no-capture-stdio": "" + } + +def read_config(): + parser = ConfigParser.ConfigParser() + parser.read("test.cfg") + + rv = {"general":{}, + "products":{}} + + rv["general"].update(dict(parser.items("general"))) + + # This only allows one product per whatever for now + for product in parser.sections(): + if product != "general": + dest = rv["products"][product] = {} + for key, value in parser.items(product): + rv["products"][product][key] = value + + return rv + +def run_tests(product, kwargs): + kwargs["test_paths"]["/_test/"] = {"tests_path": os.path.join(here, "testdata"), + "metadata_path": os.path.join(here, "metadata")} + + wptrunner.run_tests(**kwargs) + +def settings_to_argv(settings): + rv = [] + for name, value in settings.iteritems(): + key = "--%s" % name + if not value: + rv.append(key) + elif isinstance(value, list): + for item in value: + rv.extend([key, item]) + else: + rv.extend([key, value]) + return rv + +def set_from_args(settings, args): + if args.test: + settings["include"] = args.test + +def run(config, args): + logger = structuredlog.StructuredLogger("web-platform-tests") + logger.add_handler(ResultHandler(logger=logger, verbose=args.verbose)) + setup_wptrunner_logging(logger) + + parser = wptcommandline.create_parser() + + logger.suite_start(tests=[]) + + for product, product_settings in config["products"].iteritems(): + if args.product and product not in args.product: + continue + + settings = test_settings() + settings.update(config["general"]) + settings.update(product_settings) + settings["product"] = product + set_from_args(settings, args) + + kwargs = vars(parser.parse_args(settings_to_argv(settings))) + wptcommandline.check_args(kwargs) + + logger.send_message("wptrunner-test", "set-product", product) + + run_tests(product, kwargs) + + logger.send_message("wptrunner-test", "set-product", None) + logger.suite_end() + +def get_parser(): + parser = argparse.ArgumentParser() + parser.add_argument("-v", "--verbose", action="store_true", default=False, + help="verbose log output") + parser.add_argument("--product", action="append", + help="Specific product to include in test run") + parser.add_argument("--pdb", action="store_true", + help="Invoke pdb on uncaught exception") + parser.add_argument("test", nargs="*", type=wptcommandline.slash_prefixed, + help="Specific tests to include in test run") + return parser + +def main(): + config = read_config() + + args = get_parser().parse_args() + + try: + run(config, args) + except Exception: + if args.pdb: + import pdb, traceback + print traceback.format_exc() + pdb.post_mortem() + + +if __name__ == "__main__": + main() diff --git a/test/testdata/reftest/green-ref.html b/test/testdata/reftest/green-ref.html new file mode 100644 index 00000000000000..0e145d60b55b35 --- /dev/null +++ b/test/testdata/reftest/green-ref.html @@ -0,0 +1,4 @@ + + \ No newline at end of file diff --git a/test/testdata/reftest/green.html b/test/testdata/reftest/green.html new file mode 100644 index 00000000000000..38167bb58d571e --- /dev/null +++ b/test/testdata/reftest/green.html @@ -0,0 +1,3 @@ + \ No newline at end of file diff --git a/test/testdata/reftest/red.html b/test/testdata/reftest/red.html new file mode 100644 index 00000000000000..2b677e00634bcd --- /dev/null +++ b/test/testdata/reftest/red.html @@ -0,0 +1,3 @@ + \ No newline at end of file diff --git a/test/testdata/reftest/reftest_and_fail.html b/test/testdata/reftest/reftest_and_fail.html new file mode 100644 index 00000000000000..2960195356238f --- /dev/null +++ b/test/testdata/reftest/reftest_and_fail.html @@ -0,0 +1,5 @@ +Reftest chain that should fail + + \ No newline at end of file diff --git a/test/testdata/reftest/reftest_and_fail_0-ref.html b/test/testdata/reftest/reftest_and_fail_0-ref.html new file mode 100644 index 00000000000000..04fb9aa15176e6 --- /dev/null +++ b/test/testdata/reftest/reftest_and_fail_0-ref.html @@ -0,0 +1,5 @@ +Reftest chain that should fail + + \ No newline at end of file diff --git a/test/testdata/reftest/reftest_cycle.html b/test/testdata/reftest/reftest_cycle.html new file mode 100644 index 00000000000000..4a84a3b6741ee4 --- /dev/null +++ b/test/testdata/reftest/reftest_cycle.html @@ -0,0 +1,5 @@ +Reftest with cycle, all match + + \ No newline at end of file diff --git a/test/testdata/reftest/reftest_cycle_0-ref.html b/test/testdata/reftest/reftest_cycle_0-ref.html new file mode 100644 index 00000000000000..118bfd88447b48 --- /dev/null +++ b/test/testdata/reftest/reftest_cycle_0-ref.html @@ -0,0 +1,5 @@ +OR match that should pass + + \ No newline at end of file diff --git a/test/testdata/reftest/reftest_cycle_1-ref.html b/test/testdata/reftest/reftest_cycle_1-ref.html new file mode 100644 index 00000000000000..59be0b641def6b --- /dev/null +++ b/test/testdata/reftest/reftest_cycle_1-ref.html @@ -0,0 +1,5 @@ +Reftest with cycle, all match + + \ No newline at end of file diff --git a/test/testdata/reftest/reftest_cycle_fail.html b/test/testdata/reftest/reftest_cycle_fail.html new file mode 100644 index 00000000000000..175e76c4cc9bb7 --- /dev/null +++ b/test/testdata/reftest/reftest_cycle_fail.html @@ -0,0 +1,5 @@ +Reftest with cycle, fails + + \ No newline at end of file diff --git a/test/testdata/reftest/reftest_cycle_fail_0-ref.html b/test/testdata/reftest/reftest_cycle_fail_0-ref.html new file mode 100644 index 00000000000000..c8e548c4622556 --- /dev/null +++ b/test/testdata/reftest/reftest_cycle_fail_0-ref.html @@ -0,0 +1,5 @@ +Reftest with cycle, fails + + \ No newline at end of file diff --git a/test/testdata/reftest/reftest_match.html b/test/testdata/reftest/reftest_match.html new file mode 100644 index 00000000000000..333cc6c1ecdf20 --- /dev/null +++ b/test/testdata/reftest/reftest_match.html @@ -0,0 +1,5 @@ +rel=match that should pass + + \ No newline at end of file diff --git a/test/testdata/reftest/reftest_match_fail.html b/test/testdata/reftest/reftest_match_fail.html new file mode 100644 index 00000000000000..a9272ef74da873 --- /dev/null +++ b/test/testdata/reftest/reftest_match_fail.html @@ -0,0 +1,5 @@ +rel=match that should fail + + \ No newline at end of file diff --git a/test/testdata/reftest/reftest_mismatch.html b/test/testdata/reftest/reftest_mismatch.html new file mode 100644 index 00000000000000..af5fa0750d83b6 --- /dev/null +++ b/test/testdata/reftest/reftest_mismatch.html @@ -0,0 +1,5 @@ +rel=mismatch that should pass + + \ No newline at end of file diff --git a/test/testdata/reftest/reftest_mismatch_fail.html b/test/testdata/reftest/reftest_mismatch_fail.html new file mode 100644 index 00000000000000..8d160c4fc200ca --- /dev/null +++ b/test/testdata/reftest/reftest_mismatch_fail.html @@ -0,0 +1,5 @@ +rel=mismatch that should fail + + \ No newline at end of file diff --git a/test/testdata/reftest/reftest_or_0.html b/test/testdata/reftest/reftest_or_0.html new file mode 100644 index 00000000000000..3a51de21644103 --- /dev/null +++ b/test/testdata/reftest/reftest_or_0.html @@ -0,0 +1,6 @@ +OR match that should pass + + + \ No newline at end of file diff --git a/test/testdata/reftest/reftest_ref_timeout-ref.html b/test/testdata/reftest/reftest_ref_timeout-ref.html new file mode 100644 index 00000000000000..04cbb71e0c3c42 --- /dev/null +++ b/test/testdata/reftest/reftest_ref_timeout-ref.html @@ -0,0 +1,6 @@ + +rel=match that should time out in the ref + + diff --git a/test/testdata/reftest/reftest_ref_timeout.html b/test/testdata/reftest/reftest_ref_timeout.html new file mode 100644 index 00000000000000..aaf68f5cb5fa04 --- /dev/null +++ b/test/testdata/reftest/reftest_ref_timeout.html @@ -0,0 +1,6 @@ + +rel=match that should time out in the ref + + diff --git a/test/testdata/reftest/reftest_timeout.html b/test/testdata/reftest/reftest_timeout.html new file mode 100644 index 00000000000000..b10e676bf0052e --- /dev/null +++ b/test/testdata/reftest/reftest_timeout.html @@ -0,0 +1,6 @@ + +rel=match that should timeout + + diff --git a/test/testdata/reftest/reftest_wait_0.html b/test/testdata/reftest/reftest_wait_0.html new file mode 100644 index 00000000000000..0088c0cab88e87 --- /dev/null +++ b/test/testdata/reftest/reftest_wait_0.html @@ -0,0 +1,11 @@ +rel=match that should fail + + + + + + \ No newline at end of file diff --git a/test/testdata/testharness/testharness_error.html b/test/testdata/testharness/testharness_error.html new file mode 100644 index 00000000000000..0ac5ba46a3305f --- /dev/null +++ b/test/testdata/testharness/testharness_error.html @@ -0,0 +1,7 @@ + +testharness.js test that should error + + + diff --git a/test/testdata/testharness/testharness_long_timeout.html b/test/testdata/testharness/testharness_long_timeout.html new file mode 100644 index 00000000000000..fc94e055be0c55 --- /dev/null +++ b/test/testdata/testharness/testharness_long_timeout.html @@ -0,0 +1,9 @@ + +testharness.js test with long timeout + + + + \ No newline at end of file diff --git a/test/testdata/testharness/testharness_timeout.html b/test/testdata/testharness/testharness_timeout.html new file mode 100644 index 00000000000000..b99915ac745395 --- /dev/null +++ b/test/testdata/testharness/testharness_timeout.html @@ -0,0 +1,6 @@ + +Simple testharness.js usage + + + +// This file should time out, obviously \ No newline at end of file diff --git a/wptrunner/browsers/chrome.py b/wptrunner/browsers/chrome.py index a4096be4110b68..5bf199713e5e86 100644 --- a/wptrunner/browsers/chrome.py +++ b/wptrunner/browsers/chrome.py @@ -4,13 +4,17 @@ from .base import Browser, ExecutorBrowser, require_arg from .webdriver import ChromedriverLocalServer -from ..executors.executorselenium import SeleniumTestharnessExecutor, required_files +from ..executors import executor_kwargs as base_executor_kwargs +from ..executors.executorselenium import (SeleniumTestharnessExecutor, + SeleniumRefTestExecutor, + required_files) __wptrunner__ = {"product": "chrome", "check_args": "check_args", "browser": "ChromeBrowser", - "executor": {"testharness": "SeleniumTestharnessExecutor"}, + "executor": {"testharness": "SeleniumTestharnessExecutor", + "reftest": "SeleniumRefTestExecutor"}, "browser_kwargs": "browser_kwargs", "executor_kwargs": "executor_kwargs", "env_options": "env_options"} @@ -25,19 +29,16 @@ def browser_kwargs(**kwargs): "webdriver_binary": kwargs["webdriver_binary"]} -def executor_kwargs(http_server_url, **kwargs): +def executor_kwargs(test_type, http_server_url, **kwargs): from selenium.webdriver import DesiredCapabilities - timeout_multiplier = kwargs["timeout_multiplier"] - if timeout_multiplier is None: - timeout_multiplier = 1 - binary = kwargs["binary"] - capabilities = dict(DesiredCapabilities.CHROME.items() + - {"chromeOptions": {"binary": binary}}.items()) + executor_kwargs = base_executor_kwargs(test_type, http_server_url, **kwargs) + executor_kwargs["close_after_done"] = True + executor_kwargs["capabilities"] = dict(DesiredCapabilities.CHROME.items() + + {"chromeOptions": + {"binary": kwargs["binary"]}}.items()) - return {"http_server_url": http_server_url, - "capabilities": capabilities, - "timeout_multiplier": timeout_multiplier} + return executor_kwargs def env_options(): diff --git a/wptrunner/browsers/firefox.py b/wptrunner/browsers/firefox.py index a1c3a620f6ff2f..f0ce10dcdd544e 100644 --- a/wptrunner/browsers/firefox.py +++ b/wptrunner/browsers/firefox.py @@ -14,7 +14,7 @@ from .base import get_free_port, Browser, ExecutorBrowser, require_arg, cmd_arg from ..executors import executor_kwargs as base_executor_kwargs -from ..executors.executormarionette import MarionetteTestharnessExecutor, MarionetteReftestExecutor, required_files +from ..executors.executormarionette import MarionetteTestharnessExecutor, MarionetteRefTestExecutor, required_files here = os.path.join(os.path.split(__file__)[0]) @@ -22,7 +22,7 @@ "check_args": "check_args", "browser": "FirefoxBrowser", "executor": {"testharness": "MarionetteTestharnessExecutor", - "reftest": "MarionetteReftestExecutor"}, + "reftest": "MarionetteRefTestExecutor"}, "browser_kwargs": "browser_kwargs", "executor_kwargs": "executor_kwargs", "env_options": "env_options"} @@ -45,8 +45,8 @@ def browser_kwargs(**kwargs): "ca_certificate_path": kwargs["ssl_env"].ca_cert_path()} -def executor_kwargs(http_server_url, **kwargs): - executor_kwargs = base_executor_kwargs(http_server_url, **kwargs) +def executor_kwargs(test_type, http_server_url, **kwargs): + executor_kwargs = base_executor_kwargs(test_type, http_server_url, **kwargs) executor_kwargs["close_after_done"] = True return executor_kwargs diff --git a/wptrunner/browsers/servo.py b/wptrunner/browsers/servo.py index a6d75abdbf40af..f27f83c25a3a4c 100644 --- a/wptrunner/browsers/servo.py +++ b/wptrunner/browsers/servo.py @@ -6,7 +6,7 @@ from .base import NullBrowser, ExecutorBrowser, require_arg from ..executors import executor_kwargs -from ..executors.executorservo import ServoTestharnessExecutor, ServoReftestExecutor +from ..executors.executorservo import ServoTestharnessExecutor, ServoRefTestExecutor here = os.path.join(os.path.split(__file__)[0]) @@ -14,7 +14,7 @@ "check_args": "check_args", "browser": "ServoBrowser", "executor": {"testharness": "ServoTestharnessExecutor", - "reftest": "ServoReftestExecutor"}, + "reftest": "ServoRefTestExecutor"}, "browser_kwargs": "browser_kwargs", "executor_kwargs": "executor_kwargs", "env_options": "env_options"} diff --git a/wptrunner/executors/base.py b/wptrunner/executors/base.py index 41fa4271bf0094..90f70b9ce23120 100644 --- a/wptrunner/executors/base.py +++ b/wptrunner/executors/base.py @@ -2,14 +2,20 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. +import hashlib import json import os +import traceback from abc import ABCMeta, abstractmethod +from multiprocessing import Manager + +from ..testrunner import Stop here = os.path.split(__file__)[0] +cache_manager = Manager() -def executor_kwargs(http_server_url, **kwargs): +def executor_kwargs(test_type, http_server_url, **kwargs): timeout_multiplier = kwargs["timeout_multiplier"] if timeout_multiplier is None: timeout_multiplier = 1 @@ -17,6 +23,10 @@ def executor_kwargs(http_server_url, **kwargs): executor_kwargs = {"http_server_url": http_server_url, "timeout_multiplier": timeout_multiplier, "debug_args": kwargs["debug_args"]} + + if test_type == "reftest": + executor_kwargs["screenshot_cache"] = cache_manager.dict() + return executor_kwargs @@ -42,12 +52,19 @@ def __call__(self, test, result): def reftest_result_converter(self, test, result): - return (test.result_cls(result["status"], result["message"]), []) + return (test.result_cls(result["status"], result["message"], + extra=result.get("extra")), []) + +class ExecutorException(Exception): + def __init__(self, status, message): + self.status = status + self.message = message class TestExecutor(object): __metaclass__ = ABCMeta + test_type = None convert_result = None def __init__(self, browser, http_server_url, timeout_multiplier=1, @@ -68,6 +85,7 @@ def __init__(self, browser, http_server_url, timeout_multiplier=1, self.http_server_url = http_server_url self.timeout_multiplier = timeout_multiplier self.debug_args = debug_args + self.protocol = None # This must be set in subclasses @property def logger(self): @@ -75,21 +93,174 @@ def logger(self): if self.runner is not None: return self.runner.logger - @abstractmethod def setup(self, runner): """Run steps needed before tests can be started e.g. connecting to browser instance :param runner: TestRunner instance that is going to run the tests""" - pass + self.runner = runner + self.protocol.setup(runner) def teardown(self): """Run cleanup steps after tests have finished""" - pass + self.protocol.teardown() - @abstractmethod def run_test(self, test): """Run a particular test. :param test: The test to run""" + try: + result = self.do_test(test) + except Exception as e: + result = self.result_from_exception(test, e) + + if result is Stop: + return result + + if result[0].status == "ERROR": + self.logger.debug(result[0].message) + self.runner.send_message("test_ended", test, result) + + @abstractmethod + def do_test(self, test): + """Test-type and protocol specific implmentation of running a + specific test. + + :param test: The test to run.""" + pass + + def result_from_exception(self, test, e): + if hasattr(e, "status") and e.status in test.result_cls.statuses: + status = e.status + else: + status = "ERROR" + message = unicode(getattr(e, "message", "")) + if message: + message += "\n" + message += traceback.format_exc(e) + return test.result_cls(status, message), [] + + +class TestharnessExecutor(TestExecutor): + convert_result = testharness_result_converter + + +class RefTestExecutor(TestExecutor): + convert_result = reftest_result_converter + + def __init__(self, browser, http_server_url, timeout_multiplier=1, screenshot_cache=None, + debug_args=None): + TestExecutor.__init__(self, browser, http_server_url, + timeout_multiplier=timeout_multiplier, + debug_args=debug_args) + + self.screenshot_cache = screenshot_cache + +class RefTestImplementation(object): + def __init__(self, executor): + self.timeout_multiplier = executor.timeout_multiplier + self.executor = executor + # Cache of url:(screenshot hash, screenshot). Typically the + # screenshot is None, but we set this value if a test fails + # and the screenshot was taken from the cache so that we may + # retrieve the screenshot from the cache directly in the future + self.screenshot_cache = self.executor.screenshot_cache + self.message = None + + @property + def logger(self): + return self.executor.logger + + def get_hash(self, url, timeout): + timeout = timeout * self.timeout_multiplier + + if url not in self.screenshot_cache: + success, data = self.executor.screenshot(url, timeout) + + if not success: + return False, data + + screenshot = data + hash_value = hashlib.sha1(screenshot).hexdigest() + + self.screenshot_cache[url] = (hash_value, None) + + rv = True, (hash_value, screenshot) + else: + rv = True, self.screenshot_cache[url] + + self.message.append("%s %s" % (url, rv[1][0])) + return rv + + def is_pass(self, lhs_hash, rhs_hash, relation): + assert relation in ("==", "!=") + self.message.append("Testing %s %s %s" % (lhs_hash, relation, rhs_hash)) + return ((relation == "==" and lhs_hash == rhs_hash) or + (relation == "!=" and lhs_hash != rhs_hash)) + + def run_test(self, test): + self.message = [] + + # Depth-first search of reference tree, with the goal + # of reachings a leaf node with only pass results + + stack = list(((test, item[0]), item[1]) for item in reversed(test.references)) + while stack: + hashes = [None, None] + screenshots = [None, None] + + nodes, relation = stack.pop() + + for i, node in enumerate(nodes): + success, data = self.get_hash(node.url, node.timeout) + if success is False: + return {"status": data[0], "message": data[1]} + + hashes[i], screenshots[i] = data + + if self.is_pass(hashes[0], hashes[1], relation): + if nodes[1].references: + stack.extend(list(((nodes[1], item[0]), item[1]) for item in reversed(nodes[1].references))) + else: + # We passed + return {"status":"PASS", "message": None} + + for i, (node, screenshot) in enumerate(zip(nodes, screenshots)): + if screenshot is None: + success, screenshot = self.retake_screenshot(node) + if success: + screenshots[i] = screenshot + + log_data = [{"url": nodes[0].url, "screenshot": screenshots[0]}, relation, + {"url": nodes[1].url, "screenshot": screenshots[1]}] + + return {"status": "FAIL", + "message": "\n".join(self.message), + "extra": {"reftest_screenshots": log_data}} + + def retake_screenshot(self, node): + success, data = self.executor.screenshot(node.url, + node.timeout * + self.timeout_multiplier) + if not success: + return False, data + + hash_val, _ = self.screenshot_cache[node.url] + self.screenshot_cache[node.url] = hash_val, data + return True, data + +class Protocol(object): + def __init__(self, executor, browser, http_server_url): + self.executor = executor + self.browser = browser + self.http_server_url = http_server_url + + @property + def logger(self): + return self.executor.logger + + def setup(self, runner): + pass + + def teardown(self): pass diff --git a/wptrunner/executors/executormarionette.py b/wptrunner/executors/executormarionette.py index 1d03ef6d1eca70..5d21129c6953ca 100644 --- a/wptrunner/executors/executormarionette.py +++ b/wptrunner/executors/executormarionette.py @@ -17,7 +17,14 @@ here = os.path.join(os.path.split(__file__)[0]) -from .base import TestExecutor, testharness_result_converter, reftest_result_converter +from .base import (ExecutorException, + Protocol, + RefTestExecutor, + RefTestImplementation, + TestExecutor, + TestharnessExecutor, + testharness_result_converter, + reftest_result_converter) from ..testrunner import Stop # Extra timeout to use after internal test timeout at which the harness @@ -38,35 +45,27 @@ def do_delayed_imports(): from marionette_driver import marionette, errors -class MarionetteTestExecutor(TestExecutor): - def __init__(self, - browser, - http_server_url, - timeout_multiplier=1, - debug_args=None, - close_after_done=True): +class MarionetteProtocol(Protocol): + def __init__(self, executor, browser, http_server_url): do_delayed_imports() - TestExecutor.__init__(self, browser, http_server_url, timeout_multiplier, debug_args) - self.marionette_port = browser.marionette_port + Protocol.__init__(self, executor, browser, http_server_url) self.marionette = None - - self.timer = None - self.window_id = str(uuid.uuid4()) - self.close_after_done = close_after_done + self.marionette_port = browser.marionette_port def setup(self, runner): """Connect to browser via Marionette.""" - self.runner = runner + Protocol.setup(self, runner) self.logger.debug("Connecting to marionette on port %i" % self.marionette_port) self.marionette = marionette.Marionette(host='localhost', port=self.marionette_port) + # XXX Move this timeout somewhere self.logger.debug("Waiting for Marionette connection") while True: success = self.marionette.wait_for_port(60) #When running in a debugger wait indefinitely for firefox to start - if success or self.debug_args is None: + if success or self.executor.debug_args is None: break session_started = False @@ -82,21 +81,21 @@ def setup(self, runner): if not success or not session_started: self.logger.warning("Failed to connect to Marionette") - self.runner.send_message("init_failed") + self.executor.runner.send_message("init_failed") else: try: self.after_connect() except Exception: self.logger.warning("Post-connection steps failed") self.logger.error(traceback.format_exc()) - self.runner.send_message("init_failed") + self.executor.runner.send_message("init_failed") else: - self.runner.send_message("init_succeeded") + self.executor.runner.send_message("init_succeeded") def teardown(self): try: self.marionette.delete_session() - except: + except Exception: # This is typically because the session never started pass del self.marionette @@ -106,7 +105,7 @@ def is_alive(self): try: # Get a simple property over the connection self.marionette.current_window_handle - except: + except Exception: return False return True @@ -116,174 +115,189 @@ def after_connect(self): self.logger.debug("Loading %s" % url) try: self.marionette.navigate(url) - except: + except Exception as e: self.logger.critical( "Loading initial page %s failed. Ensure that the " "there are no other programs bound to this port and " "that your firewall rules or network setup does not " - "prevent access." % url) - raise + "prevent access.\e%s" % (url, traceback.format_exc(e))) self.marionette.execute_script( "document.title = '%s'" % threading.current_thread().name.replace("'", '"')) - def run_test(self, test): - """Run a single test. - - This method is independent of the test type, and calls - do_test to implement the type-sepcific testing functionality. - """ - # Lock to prevent races between timeouts and other results - # This might not be strictly necessary if we need to deal - # with the result changing post-hoc anyway (e.g. due to detecting - # a crash after we get the data back from marionette) - result = None - result_flag = threading.Event() - result_lock = threading.Lock() - - timeout = test.timeout * self.timeout_multiplier - - def timeout_func(): - with result_lock: - if not result_flag.is_set(): - result_flag.set() - result = (test.result_cls("EXTERNAL-TIMEOUT", None), []) - self.runner.send_message("test_ended", test, result) - - if self.debug_args is None: - self.timer = threading.Timer(timeout + 2 * extra_timeout, timeout_func) - self.timer.start() +class MarionetteRun(object): + def __init__(self, logger, func, marionette, url, timeout): + self.logger = logger + self.result = None + self.marionette = marionette + self.func = func + self.url = url + self.timeout = timeout + self.result_flag = threading.Event() + + def run(self): + timeout = self.timeout try: - self.marionette.set_script_timeout((timeout + extra_timeout) * 1000) - except IOError, errors.InvalidResponseException: + if timeout is not None: + self.marionette.set_script_timeout((timeout + extra_timeout) * 1000) + else: + # We just want it to never time out, really, but marionette doesn't + # make that possible. It also seems to time out immediately if the + # timeout is set too high. This works at least. + self.marionette.set_script_timeout(2**31 - 1) + except (IOError, errors.InvalidResponseException): self.logger.error("Lost marionette connection before starting test") return Stop + executor = threading.Thread(target = self._run) + executor.start() + + if timeout is not None: + wait_timeout = timeout + 2 * extra_timeout + else: + wait_timeout = None + + flag = self.result_flag.wait(wait_timeout) + if self.result is None: + self.logger.debug("Timed out waiting for a result") + assert not flag + self.result = False, ("EXTERNAL-TIMEOUT", None) + + return self.result + + def _run(self): try: - result = self.convert_result(test, self.do_test(test, timeout)) + self.result = True, self.func(self.marionette, self.url, self.timeout) except errors.ScriptTimeoutException: - with result_lock: - if not result_flag.is_set(): - result_flag.set() - result = (test.result_cls("EXTERNAL-TIMEOUT", None), []) - # Clean up any unclosed windows - # This doesn't account for the possibility the browser window - # is totally hung. That seems less likely since we are still - # getting data from marionette, but it might be just as well - # to do a full restart in this case - # XXX - this doesn't work at the moment because window_handles - # only returns OS-level windows (see bug 907197) - # while True: - # handles = self.marionette.window_handles - # self.marionette.switch_to_window(handles[-1]) - # if len(handles) > 1: - # self.marionette.close() - # else: - # break - # Now need to check if the browser is still responsive and restart it if not + self.logger.debug("Got a marionette timeout") + self.result = False, ("EXTERNAL-TIMEOUT", None) except (socket.timeout, errors.InvalidResponseException, IOError): # This can happen on a crash # Also, should check after the test if the firefox process is still running # and otherwise ignore any other result and set it to crash - with result_lock: - if not result_flag.is_set(): - result_flag.set() - result = (test.result_cls("CRASH", None), []) + self.result = False, ("CRASH", None) + except Exception as e: + message = getattr(e, "message", "") + if message: + message += "\n" + message += traceback.format_exc(e) + self.result = False, ("ERROR", e) + finally: - if self.timer is not None: - self.timer.cancel() + self.result_flag.set() - with result_lock: - if result: - self.runner.send_message("test_ended", test, result) - def do_test(self, test, timeout): - """Run the steps specific to a given test type for Marionette-based tests. +class MarionetteTestharnessExecutor(TestharnessExecutor): + def __init__(self, browser, http_server_url, timeout_multiplier=1, close_after_done=True, + debug_args=None): + """Marionette-based executor for testharness.js tests""" + TestharnessExecutor.__init__(self, browser, http_server_url, + timeout_multiplier=timeout_multiplier, + debug_args=debug_args) - :param test: - the Test being run - :param timeout: - the timeout in seconds to give the test - """ - raise NotImplementedError + self.protocol = MarionetteProtocol(self, browser, http_server_url) + self.script = open(os.path.join(here, "testharness_marionette.js")).read() + self.close_after_done = close_after_done + self.window_id = str(uuid.uuid4()) -class MarionetteTestharnessExecutor(MarionetteTestExecutor): - convert_result = testharness_result_converter + if marionette is None: + do_delayed_imports() - def __init__(self, *args, **kwargs): - """Marionette-based executor for testharness.js tests""" - MarionetteTestExecutor.__init__(self, *args, **kwargs) - self.script = open(os.path.join(here, "testharness_marionette.js")).read() + def is_alive(self): + return self.protocol.is_alive() + + def do_test(self, test): + timeout = (test.timeout * self.timeout_multiplier if self.debug_args is None + else None) + success, data = MarionetteRun(self.logger, + self.do_testharness, + self.protocol.marionette, + test.url, + timeout).run() + if success: + return self.convert_result(test, data) + + return (test.result_cls(*data), []) - def do_test(self, test, timeout): + def do_testharness(self, marionette, url, timeout): if self.close_after_done: - self.marionette.execute_script("if (window.wrappedJSObject.win) {window.wrappedJSObject.win.close()}") + marionette.execute_script("if (window.wrappedJSObject.win) {window.wrappedJSObject.win.close()}") + + if timeout is not None: + timeout_ms = str(timeout * 1000) + else: + timeout_ms = "null" - return self.marionette.execute_async_script( - self.script % {"abs_url": urlparse.urljoin(self.http_server_url, test.url), - "url": test.url, - "window_id": self.window_id, - "timeout_multiplier": self.timeout_multiplier, - "timeout": timeout * 1000, - "explicit_timeout": self.debug_args is not None}, new_sandbox=False) + script = self.script % {"abs_url": urlparse.urljoin(self.http_server_url, url), + "url": url, + "window_id": self.window_id, + "timeout_multiplier": self.timeout_multiplier, + "timeout": timeout_ms, + "explicit_timeout": timeout is None} + return marionette.execute_async_script(script, new_sandbox=False) -class MarionetteReftestExecutor(MarionetteTestExecutor): - convert_result = reftest_result_converter - def __init__(self, *args, **kwargs): +class MarionetteRefTestExecutor(RefTestExecutor): + def __init__(self, browser, http_server_url, timeout_multiplier=1, + screenshot_cache=None, close_after_done=True, debug_args=None): """Marionette-based executor for reftests""" - MarionetteTestExecutor.__init__(self, *args, **kwargs) + RefTestExecutor.__init__(self, + browser, + http_server_url, + screenshot_cache=screenshot_cache, + timeout_multiplier=timeout_multiplier, + debug_args=debug_args) + self.protocol = MarionetteProtocol(self, browser, http_server_url) + self.implementation = RefTestImplementation(self) + self.close_after_done = close_after_done + self.has_window = False + with open(os.path.join(here, "reftest.js")) as f: self.script = f.read() with open(os.path.join(here, "reftest-wait.js")) as f: self.wait_script = f.read() - self.ref_hashes = {} - self.ref_urls_by_hash = defaultdict(set) - - def do_test(self, test, timeout): - test_url, ref_type, ref_url = test.url, test.ref_type, test.ref_url - hashes = {"test": None, - "ref": self.ref_hashes.get(ref_url)} - self.marionette.execute_script(self.script) - self.marionette.switch_to_window(self.marionette.window_handles[-1]) - for url_type, url in [("test", test_url), ("ref", ref_url)]: - if hashes[url_type] is None: - # Would like to do this in a new tab each time, but that isn't - # easy with the current state of marionette - full_url = urlparse.urljoin(self.http_server_url, url) - try: - self.marionette.navigate(full_url) - except errors.MarionetteException: - return {"status": "ERROR", - "message": "Failed to load url %s" % (full_url,)} - if url_type == "test": - self.wait() - screenshot = self.marionette.screenshot() - # strip off the data:img/png, part of the url - if screenshot.startswith("data:image/png;base64,"): - screenshot = screenshot.split(",", 1)[1] - hashes[url_type] = hashlib.sha1(screenshot).hexdigest() - - self.ref_urls_by_hash[hashes["ref"]].add(ref_url) - self.ref_hashes[ref_url] = hashes["ref"] - - if ref_type == "==": - passed = hashes["test"] == hashes["ref"] - elif ref_type == "!=": - passed = hashes["test"] != hashes["ref"] - else: - raise ValueError - return {"status": "PASS" if passed else "FAIL", - "message": None} + def is_alive(self): + return self.protocol.is_alive() - def wait(self): - self.marionette.execute_async_script(self.wait_script) + def do_test(self, test): + if self.close_after_done and self.has_window: + self.protocol.marionette.close() + self.protocol.marionette.switch_to_window( + self.protocol.marionette.window_handles[-1]) + self.has_window = False - def teardown(self): - count = 0 - for hash_val, urls in self.ref_urls_by_hash.iteritems(): - if len(urls) > 1: - self.logger.info("The following %i reference urls appear to be equivalent:\n %s" % - (len(urls), "\n ".join(urls))) - count += len(urls) - 1 - MarionetteTestExecutor.teardown(self) + if not self.has_window: + self.protocol.marionette.execute_script(self.script) + self.protocol.marionette.switch_to_window(self.protocol.marionette.window_handles[-1]) + self.has_window = True + + result = self.implementation.run_test(test) + + return self.convert_result(test, result) + + def screenshot(self, url, timeout): + timeout = timeout if self.debug_args is None else None + + return MarionetteRun(self.logger, + self._screenshot, + self.protocol.marionette, + url, + timeout).run() + + def _screenshot(self, marionette, url, timeout): + full_url = urlparse.urljoin(self.http_server_url, url) + try: + marionette.navigate(full_url) + except errors.MarionetteException: + raise ExecutorException("ERROR", "Failed to load url %s" % (full_url,)) + + marionette.execute_async_script(self.wait_script) + + screenshot = marionette.screenshot() + # strip off the data:img/png, part of the url + if screenshot.startswith("data:image/png;base64,"): + screenshot = screenshot.split(",", 1)[1] + + return screenshot diff --git a/wptrunner/executors/executorselenium.py b/wptrunner/executors/executorselenium.py index 2519eb74873678..72fcc5797d96df 100644 --- a/wptrunner/executors/executorselenium.py +++ b/wptrunner/executors/executorselenium.py @@ -11,7 +11,14 @@ import urlparse import uuid -from .base import TestExecutor, testharness_result_converter +from .base import (ExecutorException, + Protocol, + RefTestExecutor, + RefTestImplementation, + TestExecutor, + TestharnessExecutor, + testharness_result_converter, + reftest_result_converter) from ..testrunner import Stop @@ -23,6 +30,7 @@ required_files = [("testharness_runner.html", "", False), ("testharnessreport.js", "resources/", True)] +extra_timeout = 5 def do_delayed_imports(): global webdriver @@ -31,16 +39,14 @@ def do_delayed_imports(): from selenium.common import exceptions -class SeleniumTestExecutor(TestExecutor): - def __init__(self, browser, http_server_url, capabilities, - timeout_multiplier=1, debug_args=None, **kwargs): +class SeleniumProtocol(Protocol): + def __init__(self, executor, browser, http_server_url, capabilities, **kwargs): do_delayed_imports() - TestExecutor.__init__(self, browser, http_server_url, timeout_multiplier, debug_args) + + Protocol.__init__(self, executor, browser, http_server_url) self.capabilities = capabilities self.url = browser.webdriver_url self.webdriver = None - self.timer = None - self.window_id = str(uuid.uuid4()) def setup(self, runner): """Connect to browser via Selenium's WebDriver implementation.""" @@ -60,7 +66,7 @@ def setup(self, runner): if not session_started: self.logger.warning("Failed to connect to Selenium") - self.runner.send_message("init_failed") + self.executor.runner.send_message("init_failed") else: try: self.after_connect() @@ -68,9 +74,9 @@ def setup(self, runner): print >> sys.stderr, traceback.format_exc() self.logger.warning( "Failed to connect to navigate initial page") - self.runner.send_message("init_failed") + self.executor.runner.send_message("init_failed") else: - self.runner.send_message("init_succeeded") + self.executor.runner.send_message("init_succeeded") def teardown(self): self.logger.debug("Hanging up on Selenium session") @@ -96,90 +102,141 @@ def after_connect(self): self.webdriver.execute_script("document.title = '%s'" % threading.current_thread().name.replace("'", '"')) - def run_test(self, test): - """Run a single test. - - This method is independent of the test type, and calls - do_test to implement the type-sepcific testing functionality. - """ - # Lock to prevent races between timeouts and other results - # This might not be strictly necessary if we need to deal - # with the result changing post-hoc anyway (e.g. due to detecting - # a crash after we get the data back from webdriver) - result = None - result_flag = threading.Event() - result_lock = threading.Lock() - - timeout = test.timeout * self.timeout_multiplier +class SeleniumRun(object): + def __init__(self, func, webdriver, url, timeout): + self.func = func + self.result = None + self.webdriver = webdriver + self.url = url + self.timeout = timeout + self.result_flag = threading.Event() - def timeout_func(): - with result_lock: - if not result_flag.is_set(): - result_flag.set() - result = (test.result_cls("EXTERNAL-TIMEOUT", None), []) - self.runner.send_message("test_ended", test, result) - - self.timer = threading.Timer(timeout + 10, timeout_func) - self.timer.start() + def run(self): + timeout = self.timeout try: - self.webdriver.set_script_timeout((timeout + 5) * 1000) + self.webdriver.set_script_timeout((timeout + extra_timeout) * 1000) except exceptions.ErrorInResponseException: self.logger.error("Lost webdriver connection") - self.runner.send_message("restart_test", test) return Stop + executor = threading.Thread(target=self._run) + executor.start() + + flag = self.result_flag.wait(timeout + 2 * extra_timeout) + if self.result is None: + assert not flag + self.result = False, ("EXTERNAL-TIMEOUT", None) + + return self.result + + def _run(self): try: - result = self.convert_result(test, self.do_test(test, timeout)) + self.result = True, self.func(self.webdriver, self.url, self.timeout) except exceptions.TimeoutException: - with result_lock: - if not result_flag.is_set(): - result_flag.set() - result = (test.result_cls("EXTERNAL-TIMEOUT", None), []) - # Clean up any unclosed windows - # This doesn't account for the possibility the browser window - # is totally hung. That seems less likely since we are still - # getting data from marionette, but it might be just as well - # to do a full restart in this case - # XXX - this doesn't work at the moment because window_handles - # only returns OS-level windows (see bug 907197) - # while True: - # handles = self.marionette.window_handles - # self.marionette.switch_to_window(handles[-1]) - # if len(handles) > 1: - # self.marionette.close() - # else: - # break - # Now need to check if the browser is still responsive and restart it if not - - # TODO: try to detect crash here + self.result = False, ("EXTERNAL-TIMEOUT", None) except (socket.timeout, exceptions.ErrorInResponseException): - # This can happen on a crash - # Also, should check after the test if the firefox process is still running - # and otherwise ignore any other result and set it to crash - with result_lock: - if not result_flag.is_set(): - result_flag.set() - result = (test.result_cls("CRASH", None), []) + self.result = False, ("CRASH", None) + except Exception as e: + message = getattr(e, "message", "") + if message: + message += "\n" + message += traceback.format_exc(e) + self.result = False, ("ERROR", e) finally: - self.timer.cancel() - - with result_lock: - if result: - self.runner.send_message("test_ended", test, result) + self.result_flag.set() + + +class SeleniumTestharnessExecutor(TestharnessExecutor): + def __init__(self, browser, http_server_url, timeout_multiplier=1, + close_after_done=True, capabilities=None, debug_args=None): + """Selenium-based executor for testharness.js tests""" + TestharnessExecutor.__init__(self, browser, http_server_url, + timeout_multiplier=timeout_multiplier, + debug_args=debug_args) + self.protocol = SeleniumProtocol(self, browser, http_server_url, capabilities) + with open(os.path.join(here, "testharness_webdriver.js")) as f: + self.script = f.read() + self.close_after_done = close_after_done + self.window_id = str(uuid.uuid4()) + def is_alive(self): + return self.protocol.is_alive() -class SeleniumTestharnessExecutor(SeleniumTestExecutor): - convert_result = testharness_result_converter + def do_test(self, test): + success, data = SeleniumRun(self.do_testharness, self.protocol.webdriver, + test.url, test.timeout * self.timeout_multiplier).run() + if success: + return self.convert_result(test, data) - def __init__(self, *args, **kwargs): - SeleniumTestExecutor.__init__(self, *args, **kwargs) - self.script = open(os.path.join(here, "testharness_webdriver.js")).read() + return (test.result_cls(*data), []) - def do_test(self, test, timeout): - return self.webdriver.execute_async_script( - self.script % {"abs_url": urlparse.urljoin(self.http_server_url, test.url), - "url": test.url, + def do_testharness(self, webdriver, url, timeout): + return webdriver.execute_async_script( + self.script % {"abs_url": urlparse.urljoin(self.http_server_url, url), + "url": url, "window_id": self.window_id, "timeout_multiplier": self.timeout_multiplier, "timeout": timeout * 1000}) + +class SeleniumRefTestExecutor(RefTestExecutor): + def __init__(self, browser, http_server_url, timeout_multiplier=1, + screenshot_cache=None, close_after_done=True, + debug_args=None, capabilities=None): + """Selenium WebDriver-based executor for reftests""" + RefTestExecutor.__init__(self, + browser, + http_server_url, + screenshot_cache=screenshot_cache, + timeout_multiplier=timeout_multiplier, + debug_args=debug_args) + self.protocol = SeleniumProtocol(self, browser, http_server_url, + capabilities=capabilities) + self.implementation = RefTestImplementation(self) + self.close_after_done = close_after_done + self.has_window = False + + with open(os.path.join(here, "reftest.js")) as f: + self.script = f.read() + with open(os.path.join(here, "reftest-wait_webdriver.js")) as f: + self.wait_script = f.read() + + def is_alive(self): + return self.protocol.is_alive() + + def do_test(self, test): + self.logger.info("Test requires OS-level window focus") + + if self.close_after_done and self.has_window: + self.protocol.webdriver.close() + self.protocol.webdriver.switch_to_window( + self.protocol.webdriver.window_handles[-1]) + self.has_window = False + + if not self.has_window: + self.protocol.webdriver.execute_script(self.script) + self.protocol.webdriver.switch_to_window( + self.protocol.webdriver.window_handles[-1]) + self.has_window = True + + result = self.implementation.run_test(test) + + return self.convert_result(test, result) + + def screenshot(self, url, timeout): + return SeleniumRun(self._screenshot, self.protocol.webdriver, + url, timeout).run() + + def _screenshot(self, webdriver, url, timeout): + full_url = urlparse.urljoin(self.http_server_url, url) + webdriver.get(full_url) + + webdriver.execute_async_script(self.wait_script) + + screenshot = webdriver.get_screenshot_as_base64() + + # strip off the data:img/png, part of the url + if screenshot.startswith("data:image/png;base64,"): + screenshot = screenshot.split(",", 1)[1] + + return screenshot diff --git a/wptrunner/executors/executorservo.py b/wptrunner/executors/executorservo.py index c3cf8ffb8c2695..a737cd59a2ba53 100644 --- a/wptrunner/executors/executorservo.py +++ b/wptrunner/executors/executorservo.py @@ -14,19 +14,26 @@ from mozprocess import ProcessHandler -from .base import testharness_result_converter, reftest_result_converter +from .base import (ExecutorException, + Protocol, + RefTestImplementation, + testharness_result_converter, + reftest_result_converter) from .process import ProcessTestExecutor class ServoTestharnessExecutor(ProcessTestExecutor): convert_result = testharness_result_converter - def __init__(self, *args, **kwargs): - ProcessTestExecutor.__init__(self, *args, **kwargs) + def __init__(self, browser, http_server_url, timeout_multiplier=1, debug_args=None): + ProcessTestExecutor.__init__(self, browser, http_server_url, + timeout_multiplier=timeout_multiplier, + debug_args=debug_args) self.result_data = None self.result_flag = None + self.protocol = Protocol(self, browser, http_server_url) - def run_test(self, test): + def do_test(self, test): self.result_data = None self.result_flag = threading.Event() @@ -45,7 +52,11 @@ def run_test(self, test): timeout = test.timeout * self.timeout_multiplier # Now wait to get the output we expect, or until we reach the timeout - self.result_flag.wait(timeout + 5) + if self.debug_args is None: + wait_timeout = timeout + 5 + else: + wait_timeout = None + self.result_flag.wait(wait_timeout) if self.result_flag.is_set() and self.result_data is not None: self.result_data["test"] = test.url @@ -93,70 +104,53 @@ def __exit__(self, *args, **kwargs): pass -class ServoReftestExecutor(ProcessTestExecutor): +class ServoRefTestExecutor(ProcessTestExecutor): convert_result = reftest_result_converter - def __init__(self, *args, **kwargs): - ProcessTestExecutor.__init__(self, *args, **kwargs) - self.ref_hashes = {} - self.ref_urls_by_hash = defaultdict(set) + def __init__(self, browser, http_server_url, binary=None, timeout_multiplier=1, + screenshot_cache=None, debug_args=None): + ProcessTestExecutor.__init__(self, + browser, + http_server_url, + timeout_multiplier=timeout_multiplier, + debug_args=debug_args) + + self.protocol = Protocol(self, browser, http_server_url) + self.screenshot_cache = screenshot_cache + self.implementation = RefTestImplementation(self) self.tempdir = tempfile.mkdtemp() def teardown(self): os.rmdir(self.tempdir) ProcessTestExecutor.teardown(self) - def run_test(self, test): - test_url, ref_type, ref_url = test.url, test.ref_type, test.ref_url - hashes = {"test": None, - "ref": self.ref_hashes.get(ref_url)} - - status = None - - for url_type, url in [("test", test_url), ("ref", ref_url)]: - if hashes[url_type] is None: - full_url = urlparse.urljoin(self.http_server_url, url) - - with TempFilename(self.tempdir) as output_path: - self.command = [self.binary, "--cpu", "--hard-fail", "--exit", - "--output=%s" % output_path, full_url] - - timeout = test.timeout * self.timeout_multiplier - self.proc = ProcessHandler(self.command, - processOutputLine=[self.on_output]) - self.proc.run() - rv = self.proc.wait(timeout=timeout) - - if rv is None: - status = "EXTERNAL-TIMEOUT" - self.proc.kill() - break - - if rv < 0: - status = "CRASH" - break - - with open(output_path) as f: - # Might need to strip variable headers or something here - data = f.read() - hashes[url_type] = hashlib.sha1(data).hexdigest() - - if status is None: - self.ref_urls_by_hash[hashes["ref"]].add(ref_url) - self.ref_hashes[ref_url] = hashes["ref"] - - if ref_type == "==": - passed = hashes["test"] == hashes["ref"] - elif ref_type == "!=": - passed = hashes["test"] != hashes["ref"] - else: - raise ValueError + def screenshot(self, url, timeout): + full_url = urlparse.urljoin(self.http_server_url, url) - status = "PASS" if passed else "FAIL" + with TempFilename(self.tempdir) as output_path: + self.command = [self.binary, "--cpu", "--hard-fail", "--exit", + "--output=%s" % output_path, full_url] - result = self.convert_result(test, {"status": status, "message": None}) - self.runner.send_message("test_ended", test, result) + self.proc = ProcessHandler(self.command, + processOutputLine=[self.on_output]) + self.proc.run() + rv = self.proc.wait(timeout=timeout) + if rv is None: + self.proc.kill() + return False, ("EXTERNAL-TIMEOUT", None) + + if rv < 0: + return False, ("CRASH", None) + + with open(output_path) as f: + # Might need to strip variable headers or something here + data = f.read() + return True, data + + def do_test(self, test): + result = self.implementation.run_test(test) + return self.convert_result(test, result) def on_output(self, line): line = line.decode("utf8", "replace") diff --git a/wptrunner/executors/process.py b/wptrunner/executors/process.py index 8c0be981c22204..bff8b5bfb53620 100644 --- a/wptrunner/executors/process.py +++ b/wptrunner/executors/process.py @@ -19,5 +19,5 @@ def setup(self, runner): def is_alive(self): return True - def run_test(self, test): + def do_test(self, test): raise NotImplementedError diff --git a/wptrunner/executors/reftest-wait.js b/wptrunner/executors/reftest-wait.js index e70498fbbd5635..a2bc66565331b6 100644 --- a/wptrunner/executors/reftest-wait.js +++ b/wptrunner/executors/reftest-wait.js @@ -14,4 +14,9 @@ var root = document.documentElement; var observer = new MutationObserver(test); observer.observe(root, {attributes: true}); -test(); + +if (document.readyState != "complete") { + onload = test +} else { + test(); +} diff --git a/wptrunner/executors/reftest-wait_webdriver.js b/wptrunner/executors/reftest-wait_webdriver.js new file mode 100644 index 00000000000000..187f5daac832e0 --- /dev/null +++ b/wptrunner/executors/reftest-wait_webdriver.js @@ -0,0 +1,23 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +var callback = arguments[arguments.length - 1]; + +function test(x) { + if (!root.classList.contains("reftest-wait")) { + observer.disconnect(); + callback() + } +} + +var root = document.documentElement; +var observer = new MutationObserver(test); + +observer.observe(root, {attributes: true}); + +if (document.readyState != "complete") { + onload = test; +} else { + test(); +} diff --git a/wptrunner/executors/testharness_marionette.js b/wptrunner/executors/testharness_marionette.js index 115243e66b518a..9ca9ede01ce950 100644 --- a/wptrunner/executors/testharness_marionette.js +++ b/wptrunner/executors/testharness_marionette.js @@ -18,7 +18,10 @@ window.wrappedJSObject.done = function(tests, status) { window.wrappedJSObject.win = window.open("%(abs_url)s", "%(window_id)s"); -var timer = setTimeout(function() { - log("Timeout fired"); - window.wrappedJSObject.win.timeout(); -}, %(timeout)s); +var timer = null; +if (%(timeout)s) { + timer = setTimeout(function() { + log("Timeout fired"); + window.wrappedJSObject.win.timeout(); + }, %(timeout)s); +} diff --git a/wptrunner/manifestexpected.py b/wptrunner/manifestexpected.py index 11f3460ab3bdcd..7f5231d6af8941 100644 --- a/wptrunner/manifestexpected.py +++ b/wptrunner/manifestexpected.py @@ -89,8 +89,6 @@ def __init__(self, name): @property def is_empty(self): required_keys = set(["type"]) - if self.test_type == "reftest": - required_keys |= set(["reftype", "refurl"]) if set(self._data.keys()) != required_keys: return False return all(child.is_empty for child in self.children) @@ -101,11 +99,7 @@ def test_type(self): @property def id(self): - url = urlparse.urljoin(self.parent.url, self.name) - if self.test_type == "reftest": - return (url, self.get("reftype"), self.get("refurl")) - else: - return url + return urlparse.urljoin(self.parent.url, self.name) def disabled(self): """Boolean indicating whether the test is disabled""" diff --git a/wptrunner/manifestinclude.py b/wptrunner/manifestinclude.py index e00a436867a7b0..62d206cdccc816 100644 --- a/wptrunner/manifestinclude.py +++ b/wptrunner/manifestinclude.py @@ -8,6 +8,7 @@ representing the file and each subnode representing a subdirectory that should be included or excluded. """ +import os from wptmanifest.node import DataNode from wptmanifest.backends import conditional @@ -68,7 +69,14 @@ def _get_path_components(self, test): assert test_url[0] == "/" return [item for item in reversed(test_url.split("/")) if item] - def _add_rule(self, url, direction): + def _add_rule(self, test_manifests, url, direction): + maybe_path = os.path.abspath(os.path.join(os.curdir, url)) + if os.path.exists(maybe_path): + for manifest, data in test_manifests.iteritems(): + rel_path = os.path.relpath(maybe_path, data["tests_path"]) + if ".." not in rel_path.split(os.sep): + url = rel_path + assert direction in ("include", "exclude") components = [item for item in reversed(url.split("/")) if item] @@ -84,21 +92,21 @@ def _add_rule(self, url, direction): skip = False if direction == "include" else True node.set("skip", str(skip)) - def add_include(self, url_prefix): + def add_include(self, test_manifests, url_prefix): """Add a rule indicating that tests under a url path should be included in test runs :param url_prefix: The url prefix to include """ - return self._add_rule(url_prefix, "include") + return self._add_rule(test_manifests, url_prefix, "include") - def add_exclude(self, url_prefix): + def add_exclude(self, test_manifests, url_prefix): """Add a rule indicating that tests under a url path should be excluded from test runs :param url_prefix: The url prefix to exclude """ - return self._add_rule(url_prefix, "exclude") + return self._add_rule(test_manifests, url_prefix, "exclude") def get_manifest(manifest_path): diff --git a/wptrunner/manifestupdate.py b/wptrunner/manifestupdate.py index 16b176b3acd047..052de8f8469c25 100644 --- a/wptrunner/manifestupdate.py +++ b/wptrunner/manifestupdate.py @@ -116,26 +116,18 @@ def create(cls, test_type, test_id): :param test_type: The type of the test :param test_id: The id of the test""" - if test_type == "reftest": - url = test_id[0] - else: - url = test_id + url = test_id name = url.split("/")[-1] node = DataNode(name) self = cls(node) self.set("type", test_type) - if test_type == "reftest": - self.set("reftype", test_id[1]) - self.set("refurl", test_id[2]) self._from_file = False return self @property def is_empty(self): required_keys = set(["type"]) - if self.test_type == "reftest": - required_keys |= set(["reftype", "refurl"]) if set(self._data.keys()) != required_keys: return False return all(child.is_empty for child in self.children) @@ -149,11 +141,7 @@ def test_type(self): @property def id(self): """The id of the test represented by this TestNode""" - url = urlparse.urljoin(self.parent.url, self.name) - if self.test_type == "reftest": - return (url, self.get("reftype", None), self.get("refurl", None)) - else: - return url + return urlparse.urljoin(self.parent.url, self.name) def disabled(self, run_info): """Boolean indicating whether this test is disabled when run in an diff --git a/wptrunner/testharnessreport.js b/wptrunner/testharnessreport.js index 70b81fd3565c3d..046e8a5fe55e00 100644 --- a/wptrunner/testharnessreport.js +++ b/wptrunner/testharnessreport.js @@ -10,6 +10,7 @@ if (window.opener && "timeout_multiplier" in window.opener) { if (window.opener && window.opener.explicit_timeout) { props["explicit_timeout"] = window.opener.explicit_timeout; } + setup(props); add_completion_callback(function() { add_completion_callback(function(tests, status) { diff --git a/wptrunner/testloader.py b/wptrunner/testloader.py index 7e8171803ba6d8..bddf1b905d4c89 100644 --- a/wptrunner/testloader.py +++ b/wptrunner/testloader.py @@ -12,11 +12,13 @@ from mozlog import structured manifest = None +manifest_update = None def do_delayed_imports(): # This relies on an already loaded module having set the sys.path correctly :( - global manifest - import manifest + global manifest, manifest_update + from manifest import manifest + from manifest import update as manifest_update class TestChunker(object): def __init__(self, total_chunks, chunk_number): @@ -187,20 +189,22 @@ def __call__(self, manifest_iter): class TestFilter(object): - def __init__(self, include=None, exclude=None, manifest_path=None): + def __init__(self, test_manifests, include=None, exclude=None, manifest_path=None): + test_manifests = test_manifests + if manifest_path is not None and include is None: self.manifest = manifestinclude.get_manifest(manifest_path) else: self.manifest = manifestinclude.IncludeManifest.create() - if include is not None: + if include: self.manifest.set("skip", "true") for item in include: - self.manifest.add_include(item) + self.manifest.add_include(test_manifests, item) - if exclude is not None: + if exclude: for item in exclude: - self.manifest.add_exclude(item) + self.manifest.add_exclude(test_manifests, item) def __call__(self, manifest_iter): for test_path, tests in manifest_iter: @@ -214,7 +218,6 @@ def __call__(self, manifest_iter): class ManifestLoader(object): - def __init__(self, test_paths, force_manifest_update=False): do_delayed_imports() self.test_paths = test_paths @@ -252,9 +255,9 @@ def update_manifest(self, manifest_path, tests_path, url_base="/", if not json_data: manifest_file = manifest.Manifest(None, url_base) else: - manifest_file = manifest.Manifest.from_json(json_data) + manifest_file = manifest.Manifest.from_json(tests_path, json_data) - manifest.update(tests_path, url_base, manifest_file) + manifest_update.update(tests_path, url_base, manifest_file) manifest.write(manifest_file, manifest_path) def load_manifest(self, tests_path, metadata_path, url_base="/"): @@ -262,7 +265,7 @@ def load_manifest(self, tests_path, metadata_path, url_base="/"): if (not os.path.exists(manifest_path) or self.force_manifest_update): self.update_manifest(manifest_path, tests_path, url_base) - manifest_file = manifest.load(manifest_path) + manifest_file = manifest.load(tests_path, manifest_path) if manifest_file.url_base != url_base: self.logger.info("Updating url_base in manifest from %s to %s" % (manifest_file.url_base, url_base)) @@ -273,7 +276,7 @@ def load_manifest(self, tests_path, metadata_path, url_base="/"): class TestLoader(object): def __init__(self, - test_paths, + test_manifests, test_types, test_filter, run_info, @@ -282,11 +285,10 @@ def __init__(self, chunk_number=1, force_manifest_update=False): - self.test_paths = test_paths self.test_types = test_types self.test_filter = test_filter self.run_info = run_info - self.manifests = ManifestLoader(test_paths, force_manifest_update).load() + self.manifests = test_manifests self.tests = None self.disabled_tests = None @@ -316,6 +318,7 @@ def get_test(self, manifest_test, expected_file): expected = expected_file.get_test(manifest_test.id) else: expected = None + return wpttest.from_manifest(manifest_test, expected) def load_expected_manifest(self, test_manifest, metadata_path, test_path): diff --git a/wptrunner/testrunner.py b/wptrunner/testrunner.py index a0bea5ae290c20..9c8980e0f01724 100644 --- a/wptrunner/testrunner.py +++ b/wptrunner/testrunner.py @@ -328,6 +328,7 @@ def init_failed(): # remote control method if self.debug_args is None: self.init_timer = threading.Timer(self.browser.init_timeout, init_failed) + test_queue = self.test_source.get_queue() if test_queue is None: self.logger.info("No more tests") @@ -507,7 +508,8 @@ def test_ended(self, test, results): self.logger.test_end(test.id, status, message=file_result.message, - expected=expected) + expected=expected, + extra=file_result.extra) self.test = None diff --git a/wptrunner/update/sync.py b/wptrunner/update/sync.py index 5231414abd238e..db36f1874aa954 100644 --- a/wptrunner/update/sync.py +++ b/wptrunner/update/sync.py @@ -128,10 +128,10 @@ class UpdateManifest(Step): provides = ["initial_rev"] def create(self, state): - import manifest + from manifest import manifest, update test_manifest = state.test_manifest state.initial_rev = test_manifest.rev - manifest.update(state.sync["path"], "/", test_manifest) + update.update(state.sync["path"], "/", test_manifest) manifest.write(test_manifest, os.path.join(state.metadata_path, "MANIFEST.json")) diff --git a/wptrunner/update/update.py b/wptrunner/update/update.py index 9d9219ba6d8f53..a57146b4144a3b 100644 --- a/wptrunner/update/update.py +++ b/wptrunner/update/update.py @@ -9,11 +9,12 @@ from sync import SyncFromUpstreamRunner from tree import GitTree, HgTree, NoVCSTree +from .. import wptrunner from base import Step, StepRunner, exit_clean, exit_unclean from state import State def setup_paths(serve_root): - sys.path.insert(0, os.path.join(serve_root, "tools", "scripts")) + wptrunner.do_delayed_imports(serve_root) class LoadConfig(Step): """Step for loading configuration from the ini file and kwargs.""" @@ -82,9 +83,8 @@ def create(self, state): return kwargs = state.kwargs - with state.push(["local_tree", "sync_tree", "paths"]): + with state.push(["local_tree", "sync_tree", "paths", "serve_root"]): state.run_log = kwargs["run_log"] - state.serve_root = kwargs["serve_root"] state.ignore_existing = kwargs["ignore_existing"] state.no_patch = kwargs["no_patch"] runner = MetadataUpdateRunner(self.logger, state) @@ -108,11 +108,9 @@ def __init__(self, logger, runner_cls=UpdateRunner, **kwargs): :param kwargs: Command line arguments """ self.runner_cls = runner_cls - if kwargs["serve_root"] is None: - kwargs["serve_root"] = kwargs["test_paths"]["/"]["tests_path"] - + self.serve_root = kwargs["test_paths"]["/"]["tests_path"] #This must be before we try to reload state - setup_paths(kwargs["serve_root"]) + setup_paths(self.serve_root) self.state = State(logger) self.kwargs = kwargs @@ -136,6 +134,8 @@ def run(self, **kwargs): else: self.state.kwargs = self.kwargs + self.state.serve_root = self.serve_root + update_runner = self.runner_cls(self.logger, self.state) rv = update_runner.run() if rv in (exit_clean, None): diff --git a/wptrunner/wptcommandline.py b/wptrunner/wptcommandline.py index 34b6a05dd193e8..3fe076e57ea100 100644 --- a/wptrunner/wptcommandline.py +++ b/wptrunner/wptcommandline.py @@ -54,11 +54,6 @@ def create_parser(product_choices=None): help="Path to the folder containing test metadata"), parser.add_argument("--tests", action="store", type=abs_path, dest="tests_root", help="Path to test files"), - parser.add_argument("--prefs-root", dest="prefs_root", action="store", type=abs_path, - help="Path to the folder containing browser prefs"), - parser.add_argument("--serve-root", action="store", type=abs_path, dest="serve_root", - help="Path to web-platform-tests checkout containing serve.py and manifest.py" - " (defaults to test_root)") parser.add_argument("--run-info", action="store", type=abs_path, help="Path to directory containing extra json files to add to run info") parser.add_argument("--config", action="store", type=abs_path, dest="config", @@ -71,38 +66,14 @@ def create_parser(product_choices=None): type=abs_path, help="Binary to run tests against") parser.add_argument("--webdriver-binary", action="store", metavar="BINARY", type=abs_path, help="WebDriver server binary to use") - parser.add_argument("--test-types", action="store", - nargs="*", default=["testharness", "reftest"], - choices=["testharness", "reftest"], - help="Test types to run") parser.add_argument("--processes", action="store", type=int, default=1, help="Number of simultaneous processes to use") - parser.add_argument("--include", action="append", type=slash_prefixed, - help="URL prefix to include") - parser.add_argument("--exclude", action="append", type=slash_prefixed, - help="URL prefix to exclude") - parser.add_argument("--include-manifest", type=abs_path, - help="Path to manifest listing tests to include") parser.add_argument("--run-by-dir", type=int, nargs="?", default=False, help="Split run into groups by directories. With a parameter," "limit the depth of splits e.g. --run-by-dir=1 to split by top-level" "directory") - parser.add_argument("--total-chunks", action="store", type=int, default=1, - help="Total number of chunks to use") - parser.add_argument("--this-chunk", action="store", type=int, default=1, - help="Chunk number to run") - parser.add_argument("--chunk-type", action="store", choices=["none", "equal_time", "hash"], - default=None, help="Chunking type to use") - - parser.add_argument("--list-test-groups", action="store_true", - default=False, - help="List the top level directories containing tests that will run.") - parser.add_argument("--list-disabled", action="store_true", - default=False, - help="List the tests that are disabled on the current platform") - parser.add_argument("--timeout-multiplier", action="store", type=float, default=None, help="Multiplier relative to standard test timeout to use") parser.add_argument("--repeat", action="store", type=int, default=1, @@ -114,37 +85,73 @@ def create_parser(product_choices=None): parser.add_argument("--product", action="store", choices=product_choices, default="firefox", help="Browser against which to run tests") - parser.add_argument('--debugger', - help="run under a debugger, e.g. gdb or valgrind") - parser.add_argument('--debugger-args', help="arguments to the debugger") - parser.add_argument('--pause-on-unexpected', action="store_true", - help="Halt the test runner when an unexpected result is encountered") - - parser.add_argument("--symbols-path", action="store", type=url_or_path, - help="Path or url to symbols file used to analyse crash minidumps.") - parser.add_argument("--stackwalk-binary", action="store", type=abs_path, - help="Path to stackwalker program used to analyse minidumps.") + parser.add_argument("--list-test-groups", action="store_true", + default=False, + help="List the top level directories containing tests that will run.") + parser.add_argument("--list-disabled", action="store_true", + default=False, + help="List the tests that are disabled on the current platform") - parser.add_argument("--ssl-type", action="store", default=None, + test_selection_group = parser.add_argument_group("Test Selection") + test_selection_group.add_argument("--test-types", action="store", + nargs="*", default=["testharness", "reftest"], + choices=["testharness", "reftest"], + help="Test types to run") + test_selection_group.add_argument("--include", action="append", type=slash_prefixed, + help="URL prefix to include") + test_selection_group.add_argument("--exclude", action="append", type=slash_prefixed, + help="URL prefix to exclude") + test_selection_group.add_argument("--include-manifest", type=abs_path, + help="Path to manifest listing tests to include") + + debugging_group = parser.add_argument_group("Debugging") + debugging_group.add_argument('--debugger', + help="run under a debugger, e.g. gdb or valgrind") + debugging_group.add_argument('--debugger-args', help="arguments to the debugger") + debugging_group.add_argument('--pause-on-unexpected', action="store_true", + help="Halt the test runner when an unexpected result is encountered") + + debugging_group.add_argument("--symbols-path", action="store", type=url_or_path, + help="Path or url to symbols file used to analyse crash minidumps.") + debugging_group.add_argument("--stackwalk-binary", action="store", type=abs_path, + help="Path to stackwalker program used to analyse minidumps.") + + chunking_group = parser.add_argument_group("Test Chunking") + chunking_group.add_argument("--total-chunks", action="store", type=int, default=1, + help="Total number of chunks to use") + chunking_group.add_argument("--this-chunk", action="store", type=int, default=1, + help="Chunk number to run") + chunking_group.add_argument("--chunk-type", action="store", choices=["none", "equal_time", "hash"], + default=None, help="Chunking type to use") + + ssl_group = parser.add_argument_group("SSL/TLS") + ssl_group.add_argument("--ssl-type", action="store", default=None, choices=["openssl", "pregenerated", "none"], help="Type of ssl support to enable (running without ssl may lead to spurious errors)") - parser.add_argument("--openssl-binary", action="store", + ssl_group.add_argument("--openssl-binary", action="store", help="Path to openssl binary", default="openssl") - parser.add_argument("--certutil-binary", action="store", + ssl_group.add_argument("--certutil-binary", action="store", help="Path to certutil binary for use with Firefox + ssl") - - parser.add_argument("--ca-cert-path", action="store", type=abs_path, + ssl_group.add_argument("--ca-cert-path", action="store", type=abs_path, help="Path to ca certificate when using pregenerated ssl certificates") - parser.add_argument("--host-key-path", action="store", type=abs_path, + ssl_group.add_argument("--host-key-path", action="store", type=abs_path, help="Path to host private key when using pregenerated ssl certificates") - parser.add_argument("--host-cert-path", action="store", type=abs_path, + ssl_group.add_argument("--host-cert-path", action="store", type=abs_path, help="Path to host certificate when using pregenerated ssl certificates") + gecko_group = parser.add_argument_group("Gecko-specific") + gecko_group.add_argument("--prefs-root", dest="prefs_root", action="store", type=abs_path, + help="Path to the folder containing browser prefs") - parser.add_argument("--b2g-no-backup", action="store_true", default=False, - help="Don't backup device before testrun with --product=b2g") + b2g_group = parser.add_argument_group("B2G-specific") + b2g_group.add_argument("--b2g-no-backup", action="store_true", default=False, + help="Don't backup device before testrun with --product=b2g") + + parser.add_argument("test_list", nargs="*", + help="List of URLs for tests to run, or paths including tests to run. " + "(equivalent to --include)") commandline.add_logging_group(parser) return parser @@ -159,8 +166,7 @@ def set_from_config(kwargs): kwargs["config_path"] = config_path kwargs["config"] = config.read(kwargs["config_path"]) - keys = {"paths": [("serve", "serve_root", True), - ("prefs", "prefs_root", True), + keys = {"paths": [("prefs", "prefs_root", True), ("run_info", "run_info", True)], "web-platform-tests": [("remote_url", "remote_url", False), ("branch", "branch", False), @@ -209,6 +215,9 @@ def get_test_paths(config): def exe_path(name): + if name is None: + return + path = find_executable(name) if os.access(path, os.X_OK): return path @@ -235,12 +244,11 @@ def check_args(kwargs): print "Fatal: %s path %s is not a directory" % (name, path) sys.exit(1) - if kwargs["serve_root"] is None: - if "/" in kwargs["test_paths"]: - kwargs["serve_root"] = kwargs["test_paths"]["/"]["tests_path"] - else: - print >> sys.stderr, "Unable to determine server root path" - sys.exit(1) + if kwargs["test_list"]: + if kwargs["include"] is not None: + kwargs["include"].extend(kwargs["test_list"]) + else: + kwargs["include"] = kwargs["test_list"] if kwargs["run_info"] is None: kwargs["run_info"] = kwargs["config_path"] @@ -313,9 +321,6 @@ def create_parser_update(): help="Path to web-platform-tests"), parser.add_argument("--sync-path", action="store", type=abs_path, help="Path to store git checkout of web-platform-tests during update"), - parser.add_argument("--serve-root", action="store", type=abs_path, dest="serve_root", - help="Path to web-platform-tests checkout containing serve.py and manifest.py" - " (defaults to test_root)") parser.add_argument("--remote_url", action="store", help="URL of web-platfrom-tests repository to sync against"), parser.add_argument("--branch", action="store", type=abs_path, diff --git a/wptrunner/wptrunner.py b/wptrunner/wptrunner.py index 9760a25b5b9487..1d70a714a9cba3 100644 --- a/wptrunner/wptrunner.py +++ b/wptrunner/wptrunner.py @@ -67,21 +67,17 @@ def setup_stdlib_logger(): def do_delayed_imports(serve_root): - global serve, manifest, sslutils + global serve, sslutils sys.path.insert(0, serve_root) - sys.path.insert(0, str(os.path.join(serve_root, "tools"))) - sys.path.insert(0, str(os.path.join(serve_root, "tools", "scripts"))) + failed = [] try: - import serve + from tools.serve import serve except ImportError: failed.append("serve") - try: - import manifest - except ImportError: - failed.append("manifest") + try: import sslutils except ImportError: @@ -123,10 +119,9 @@ def __call__(self, data): class TestEnvironment(object): - def __init__(self, serve_path, test_paths, ssl_env, options): + def __init__(self, test_paths, ssl_env, options): """Context manager that owns the test environment i.e. the http and websockets servers""" - self.serve_path = serve_path self.test_paths = test_paths self.ssl_env = ssl_env self.server = None @@ -156,7 +151,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): server.kill() def load_config(self): - default_config_path = os.path.join(self.serve_path, "config.default.json") + default_config_path = os.path.join(serve_path(self.test_paths), "config.default.json") local_config_path = os.path.join(here, "config.json") with open(default_config_path) as f: @@ -172,7 +167,7 @@ def load_config(self): local_config["ssl"]["encrypt_after_connect"] = self.options.get("encrypt_after_connect", False) config = serve.merge_json(default_config, local_config) - config["doc_root"] = self.serve_path + config["doc_root"] = serve_path(self.test_paths) if not self.ssl_env.ssl_enabled: config["ports"]["https"] = [None] @@ -195,9 +190,13 @@ def setup_server_logging(self): log_filter = LogLevelRewriter(log_filter, ["error"], "warning") server_logger.component_filter = log_filter - serve.logger = server_logger - #Set as the default logger for wptserve - serve.set_logger(server_logger) + try: + #Set as the default logger for wptserve + serve.set_logger(server_logger) + serve.logger = server_logger + except Exception: + # This happens if logging has already been set up for wptserve + pass def setup_routes(self): for url, paths in self.test_paths.iteritems(): @@ -228,7 +227,7 @@ def copy_required_files(self): logger.info("Placing required files in server environment.") for source, destination, copy_if_exists in self.required_files: source_path = os.path.join(here, source) - dest_path = os.path.join(self.serve_path, destination, os.path.split(source)[1]) + dest_path = os.path.join(serve_path(self.test_paths), destination, os.path.split(source)[1]) dest_exists = os.path.exists(dest_path) if not dest_exists or copy_if_exists: if dest_exists: @@ -308,31 +307,42 @@ def write(self, data): def flush(self): pass -def list_test_groups(serve_root, test_paths, test_types, product, **kwargs): +def get_loader(test_paths, product, debug=False, **kwargs): + run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=debug) - do_delayed_imports(serve_root) + test_manifests = testloader.ManifestLoader(test_paths, force_manifest_update=kwargs["manifest_update"]).load() - run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=False) test_filter = testloader.TestFilter(include=kwargs["include"], exclude=kwargs["exclude"], - manifest_path=kwargs["include_manifest"]) - test_loader = testloader.TestLoader(test_paths, - test_types, + manifest_path=kwargs["include_manifest"], + test_manifests=test_manifests) + + test_loader = testloader.TestLoader(test_manifests, + kwargs["test_types"], test_filter, - run_info) + run_info, + chunk_type=kwargs["chunk_type"], + total_chunks=kwargs["total_chunks"], + chunk_number=kwargs["this_chunk"]) + return run_info, test_loader + +def list_test_groups(test_paths, product, **kwargs): + + do_delayed_imports(serve_path(test_paths)) - for item in sorted(test_loader.groups(test_types)): + run_info, test_loader = get_loader(test_paths, product, + **kwargs) + + for item in sorted(test_loader.groups(kwargs["test_types"])): print item -def list_disabled(serve_root, test_paths, test_types, product, **kwargs): - do_delayed_imports(serve_root) +def list_disabled(test_paths, product, **kwargs): + do_delayed_imports(serve_path(test_paths)) rv = [] - run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=False) - test_loader = testloader.TestLoader(test_paths, - test_types, - testloader.TestFilter(), - run_info) + + run_info, test_loader = get_loader(test_paths, product, + **kwargs) for test_type, tests in test_loader.disabled_tests.iteritems(): for test in tests: @@ -351,8 +361,10 @@ def get_ssl_kwargs(**kwargs): args = {} return args +def serve_path(test_paths): + return test_paths["/"]["tests_path"] -def run_tests(config, serve_root, test_paths, product, **kwargs): +def run_tests(config, test_paths, product, **kwargs): logging_queue = None logging_thread = None original_stdio = (sys.stdout, sys.stderr) @@ -366,9 +378,7 @@ def run_tests(config, serve_root, test_paths, product, **kwargs): sys.stderr = LoggingWrapper(logging_queue, prefix="STDERR") logging_thread.start() - do_delayed_imports(serve_root) - - run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=False) + do_delayed_imports(serve_path(test_paths)) (check_args, browser_cls, get_browser_kwargs, @@ -383,19 +393,11 @@ def run_tests(config, serve_root, test_paths, product, **kwargs): unexpected_total = 0 if "test_loader" in kwargs: + run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=False) test_loader = kwargs["test_loader"] else: - test_filter = testloader.TestFilter(include=kwargs["include"], - exclude=kwargs["exclude"], - manifest_path=kwargs["include_manifest"]) - test_loader = testloader.TestLoader(test_paths, - kwargs["test_types"], - test_filter, - run_info, - kwargs["chunk_type"], - kwargs["total_chunks"], - kwargs["this_chunk"], - kwargs["manifest_update"]) + run_info, test_loader = get_loader(test_paths, product, + **kwargs) if kwargs["run_by_dir"] is False: test_source_cls = testloader.SingleTestSource @@ -407,8 +409,7 @@ def run_tests(config, serve_root, test_paths, product, **kwargs): logger.info("Using %i client processes" % kwargs["processes"]) - with TestEnvironment(serve_root, - test_paths, + with TestEnvironment(test_paths, ssl_env, env_options) as test_environment: try: @@ -437,7 +438,8 @@ def run_tests(config, serve_root, test_paths, product, **kwargs): logger.test_end(test.id, status="SKIP") executor_cls = executor_classes.get(test_type) - executor_kwargs = get_executor_kwargs(base_server, + executor_kwargs = get_executor_kwargs(test_type, + base_server, **kwargs) if executor_cls is None: @@ -482,22 +484,28 @@ def run_tests(config, serve_root, test_paths, product, **kwargs): if logging_thread is not None: logging_thread.join(10) logging_queue.close() + logger.info("queue closed") return unexpected_total == 0 def main(): """Main entry point when calling from the command line""" - kwargs = wptcommandline.parse_args() + try: + kwargs = wptcommandline.parse_args() - if kwargs["prefs_root"] is None: - kwargs["prefs_root"] = os.path.abspath(os.path.join(here, "prefs")) + if kwargs["prefs_root"] is None: + kwargs["prefs_root"] = os.path.abspath(os.path.join(here, "prefs")) - setup_logging(kwargs, {"raw": sys.stdout}) + setup_logging(kwargs, {"raw": sys.stdout}) - if kwargs["list_test_groups"]: - list_test_groups(**kwargs) - elif kwargs["list_disabled"]: - list_disabled(**kwargs) - else: - return run_tests(**kwargs) + if kwargs["list_test_groups"]: + list_test_groups(**kwargs) + elif kwargs["list_disabled"]: + list_disabled(**kwargs) + else: + return run_tests(**kwargs) + except Exception: + import pdb, traceback + print traceback.format_exc() + pdb.post_mortem() diff --git a/wptrunner/wpttest.py b/wptrunner/wpttest.py index 95c5f0eeb2f355..db79da2457bfd5 100644 --- a/wptrunner/wpttest.py +++ b/wptrunner/wpttest.py @@ -11,12 +11,13 @@ class Result(object): - def __init__(self, status, message, expected=None): + def __init__(self, status, message, expected=None, extra=None): if status not in self.statuses: raise ValueError("Unrecognised status %s" % status) self.status = status self.message = message self.expected = expected + self.extra = extra class SubtestResult(object): @@ -67,7 +68,7 @@ def _update_mozinfo(self, metadata_root): while path != os.path.expanduser('~'): if path in dirs: break - dirs.add(path) + dirs.add(str(path)) path = os.path.split(path)[0] mozinfo.find_and_update_from_json(*dirs) @@ -82,7 +83,7 @@ class Test(object): result_cls = None subtest_result_cls = None - def __init__(self, url, expected_metadata, timeout=None, path=None): + def __init__(self, url, expected_metadata, timeout=DEFAULT_TIMEOUT, path=None): self.url = url self._expected_metadata = expected_metadata self.timeout = timeout @@ -91,6 +92,15 @@ def __init__(self, url, expected_metadata, timeout=None, path=None): def __eq__(self, other): return self.id == other.id + @classmethod + def from_manifest(cls, manifest_item, expected_metadata): + timeout = LONG_TIMEOUT if manifest_item.timeout == "long" else DEFAULT_TIMEOUT + return cls(manifest_item.url, + expected_metadata, + timeout=timeout, + path=manifest_item.path) + + @property def id(self): return self.url @@ -150,24 +160,75 @@ def id(self): class ReftestTest(Test): result_cls = ReftestResult - def __init__(self, url, ref_url, ref_type, expected, timeout=None, path=None): + def __init__(self, url, expected, references, timeout=DEFAULT_TIMEOUT, path=None): self.url = url - self.ref_url = ref_url - if ref_type not in ("==", "!="): - raise ValueError - self.ref_type = ref_type + for _, ref_type in references: + if ref_type not in ("==", "!="): + raise ValueError self._expected_metadata = expected self.timeout = timeout self.path = path + self.references = references + + @classmethod + def from_manifest(cls, manifest_test, + expected_metadata, + nodes=None, + references_seen=None): + + timeout = LONG_TIMEOUT if manifest_test.timeout == "long" else DEFAULT_TIMEOUT + + if nodes is None: + nodes = {} + if references_seen is None: + references_seen = set() + + url = manifest_test.url + + node = cls(manifest_test.url, + expected_metadata, + [], + timeout=timeout, + path=manifest_test.path) + + nodes[url] = node + + for ref_url, ref_type in manifest_test.references: + comparison_key = (ref_type,) + tuple(sorted([url, ref_url])) + if ref_url in nodes: + manifest_node = ref_url + if comparison_key in references_seen: + # We have reached a cycle so stop here + # Note that just seeing a node for the second time is not + # enough to detect a cycle because + # A != B != C != A must include C != A + # but A == B == A should not include the redundant B == A. + continue + + references_seen.add(comparison_key) + + manifest_node = manifest_test.manifest.get_reference(ref_url) + if manifest_node: + reference = ReftestTest.from_manifest(manifest_node, + None, + nodes, + references_seen) + else: + reference = ReftestTest(ref_url, None, []) + + node.references.append((reference, ref_type)) + + return node @property def id(self): - return self.url, self.ref_type, self.ref_url + return self.url @property def keys(self): return ("reftype", "refurl") + manifest_test_cls = {"reftest": ReftestTest, "testharness": TestharnessTest, "manual": ManualTest} @@ -176,17 +237,4 @@ def keys(self): def from_manifest(manifest_test, expected_metadata): test_cls = manifest_test_cls[manifest_test.item_type] - timeout = LONG_TIMEOUT if manifest_test.timeout == "long" else DEFAULT_TIMEOUT - - if test_cls == ReftestTest: - return test_cls(manifest_test.url, - manifest_test.ref_url, - manifest_test.ref_type, - expected_metadata, - timeout=timeout, - path=manifest_test.path) - else: - return test_cls(manifest_test.url, - expected_metadata, - timeout=timeout, - path=manifest_test.path) + return test_cls.from_manifest(manifest_test, expected_metadata)