diff --git a/grizzly/common/runner.py b/grizzly/common/runner.py index e64659ee..0aa975e0 100644 --- a/grizzly/common/runner.py +++ b/grizzly/common/runner.py @@ -248,7 +248,7 @@ def initial(self) -> bool: """ return self._tests_run < 2 - def post_launch(self, delay: int = -1) -> None: + def post_launch(self, delay: int = 0) -> None: """Perform actions after launching browser before loading test cases. Args: @@ -257,41 +257,38 @@ def post_launch(self, delay: int = -1) -> None: Returns: None """ - if delay >= 0 and not self.startup_failure: - with TestCase("post_launch_delay.html", "None") as content: - content.add_from_file( - Path(__file__).parent / "post_launch_delay.html", - file_name=content.entry_point, - copy=True, - ) - srv_map = ServerMap() - srv_map.set_redirect("grz_start", content.entry_point, required=False) - srv_map.set_redirect("grz_continue", "grz_start", required=True) - # temporarily override server timeout - org_timeout = self._server.timeout - # add time buffer to redirect delay - # in practice this should take a few seconds (~10s) - # in extreme cases ~40s (slow build + debugger) - self._server.timeout = delay + 180 - if delay > 0: - LOG.info("Browser launched, continuing in %ds...", delay) - # serve prompt page - server_status, _ = self._server.serve_path( - content.root, - continue_cb=self._target.monitor.is_healthy, - server_map=srv_map, - ) - # restore server timeout - self._server.timeout = org_timeout - if server_status != Served.ALL: - self.startup_failure = True - if server_status == Served.TIMEOUT: - # this should never happen with a correctly functioning build - LOG.warning("Target hung after launch") - - if self.startup_failure: - # TODO: we need a better way to handle delayed startup failures - LOG.warning("Post launch check failed!") + assert delay >= 0 + with TestCase("post_launch_delay.html", "None") as content: + content.add_from_file( + Path(__file__).parent / "post_launch_delay.html", + file_name=content.entry_point, + copy=True, + ) + srv_map = ServerMap() + srv_map.set_redirect("grz_start", content.entry_point, required=False) + srv_map.set_redirect("grz_continue", "grz_start", required=True) + # temporarily override server timeout + org_timeout = self._server.timeout + # add time buffer to redirect delay + # in practice this should take a few seconds (~10s) + # in extreme cases ~40s (slow build + debugger) + self._server.timeout = delay + 180 + if delay > 0: + LOG.info("Browser launched, continuing in %ds...", delay) + # serve prompt page + server_status, _ = self._server.serve_path( + content.root, + continue_cb=self._target.monitor.is_healthy, + server_map=srv_map, + ) + # restore server timeout + self._server.timeout = org_timeout + if server_status != Served.ALL: + self.startup_failure = True + if server_status == Served.TIMEOUT: + # this should never happen with a correctly functioning build + LOG.warning("Target hung after launch") + LOG.warning("Post launch check failed!") def run( self, diff --git a/grizzly/common/test_runner.py b/grizzly/common/test_runner.py index 8ffb6d5a..df61617d 100644 --- a/grizzly/common/test_runner.py +++ b/grizzly/common/test_runner.py @@ -44,7 +44,7 @@ def test_runner_01(mocker, coverage, scheme): testcase.add_from_bytes(b"", testcase.entry_point) serv_files = {"a.bin": testcase.root / "a.bin"} server.serve_path.return_value = (Served.ALL, serv_files) - result = runner.run([], serv_map, testcase, coverage=coverage) + result = runner.run(set(), serv_map, testcase, coverage=coverage) assert testcase.https == (scheme == "https") assert runner.initial assert runner._tests_run == 1 @@ -81,7 +81,7 @@ def test_runner_02(mocker): runner = Runner(server, target, relaunch=1) assert runner._relaunch == 1 smap = ServerMap() - result = runner.run([], smap, testcase) + result = runner.run(set(), smap, testcase) assert runner.initial assert result.attempted assert target.close.call_count == 1 @@ -100,7 +100,7 @@ def test_runner_02(mocker): target.monitor.is_idle.return_value = True runner = Runner(server, target, relaunch=1) assert runner._relaunch == 1 - result = runner.run([], ServerMap(), testcase) + result = runner.run(set(), ServerMap(), testcase) assert result.attempted assert target.close.call_count == 1 assert target.monitor.is_healthy.call_count > 0 @@ -111,7 +111,7 @@ def test_runner_02(mocker): target.monitor.is_healthy.return_value = False for _ in range(2): smap = ServerMap() - result = runner.run([], smap, testcase) + result = runner.run(set(), smap, testcase) assert result.attempted assert target.close.call_count == 0 assert target.monitor.is_healthy.call_count == 0 @@ -154,7 +154,7 @@ def test_runner_03(mocker, srv_result, served): target.check_result.return_value = Result.NONE test = mocker.Mock(spec_set=TestCase, entry_point="x", required=["x"]) runner = Runner(server, target) - result = runner.run([], ServerMap(), test) + result = runner.run(set(), ServerMap(), test) assert runner.initial assert runner.startup_failure assert result @@ -224,7 +224,7 @@ def test_runner_05(mocker, served, attempted, target_result, status): testcase = mocker.Mock(spec_set=TestCase, entry_point="a.bin", required=["a.bin"]) runner = Runner(server, target) runner.launch("http://a/") - result = runner.run([], ServerMap(), testcase) + result = runner.run(set(), ServerMap(), testcase) assert result.attempted == attempted assert result.status == status assert not result.timeout @@ -242,7 +242,7 @@ def test_runner_06(mocker): runner = Runner(server, target, idle_threshold=0.01, idle_delay=0.01, relaunch=10) assert runner._idle is not None result = runner.run( - [], + set(), ServerMap(), mocker.Mock(spec_set=TestCase, entry_point="a.bin", required=tuple(serv_files)), ) @@ -357,7 +357,7 @@ def test_runner_10(mocker, tmp_path): "test/inc_file3.txt": inc3, } server.serve_path.return_value = (Served.ALL, serv_files) - result = runner.run([], smap, test) + result = runner.run(set(), smap, test) assert result.attempted assert result.status == Result.NONE assert "inc_file.bin" in test @@ -386,7 +386,7 @@ def test_runner_11(mocker): "extra.js": test.root / "extra.js", }, ) - result = runner.run([], ServerMap(), test) + result = runner.run(set(), ServerMap(), test) assert result.attempted assert result.status == Result.NONE assert "test.html" in test @@ -401,8 +401,6 @@ def test_runner_11(mocker): (10, (Served.ALL, None), False), # continue immediately (0, (Served.ALL, None), False), - # skip post launch delay page - (-1, None, False), # startup failure (0, (Served.NONE, None), True), # target hang while loading content @@ -412,10 +410,7 @@ def test_runner_11(mocker): def test_runner_12(mocker, delay, srv_result, startup_failure): """test Runner.post_launch()""" srv_timeout = 1 - server = mocker.Mock( - spec_set=Sapphire, - timeout=srv_timeout, - ) + server = mocker.Mock(spec_set=Sapphire, timeout=srv_timeout) server.serve_path.return_value = srv_result runner = Runner(server, mocker.Mock(spec_set=Target, launch_timeout=30)) runner.launch("http://a/") diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index e3b4e888..43561d28 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -293,7 +293,7 @@ def run( idle_threshold: int = 0, launch_attempts: int = 3, on_iteration_cb: Optional[Callable[[], None]] = None, - post_launch_delay: int = 0, + post_launch_delay: int = -1, ) -> List[ReplayResult]: """Run testcase replay. @@ -313,7 +313,7 @@ def run( launch_attempts: Number of attempts to launch the browser. on_iteration_cb: Called every time a single iteration is run. post_launch_delay: Number of seconds to wait before continuing after the - browser is launched. + browser is launched. A negative number skips redirect. Returns: ReplayResults that were found running provided testcases. @@ -379,7 +379,8 @@ def harness_fn(_: str) -> bytes: # pragma: no cover time_limit=time_limit if self._harness else None, ) runner.launch(location, max_retries=launch_attempts) - runner.post_launch(delay=post_launch_delay) + if post_launch_delay >= 0 and not runner.startup_failure: + runner.post_launch(delay=post_launch_delay) # TODO: avoid running test case if runner.startup_failure is True # run tests durations: List[float] = [] diff --git a/grizzly/replay/test_replay.py b/grizzly/replay/test_replay.py index 721b62c5..38177c11 100644 --- a/grizzly/replay/test_replay.py +++ b/grizzly/replay/test_replay.py @@ -33,24 +33,21 @@ def _fake_save_logs(result_logs): log_fp.write(" #1 0x1337dd in bar /file2.c:1806:19\n") -def test_replay_01(mocker, server, tmp_path): +@mark.parametrize("post_launch_delay", [0, -1]) +def test_replay_01(mocker, server, tmp_path, post_launch_delay): """test ReplayManager.run() - no repro""" target = mocker.Mock(spec_set=Target, closed=True, launch_timeout=30) target.check_result.return_value = Result.NONE target.monitor.is_healthy.return_value = False - iter_cb = mocker.Mock() - (tmp_path / "test.html").touch() - server.serve_path.return_value = ( - Served.ALL, - {"test.html": str(tmp_path / "test.html")}, - ) + (tmp_path / "a.html").touch() + server.serve_path.return_value = (Served.ALL, {"a.html": str(tmp_path / "a.html")}) with TestCase.load(tmp_path) as testcase: with ReplayManager([], server, target, use_harness=True, relaunch=1) as replay: - assert not replay.run([testcase], 10, on_iteration_cb=iter_cb) + assert not replay.run([testcase], 10, post_launch_delay=post_launch_delay) assert replay.signature is None assert replay.status assert replay.status.ignored == 0 - assert replay.status.iteration == iter_cb.call_count == 1 + assert replay.status.iteration == 1 assert replay.status.results.total == 0 assert target.monitor.is_healthy.call_count == 1 assert target.close.call_count == 2 @@ -66,16 +63,11 @@ def test_replay_02(mocker, server, tmp_path): target.check_result.return_value = Result.NONE target.monitor.is_healthy.return_value = False iter_cb = mocker.Mock() - (tmp_path / "test.html").touch() - server.serve_path.return_value = ( - Served.ALL, - {"test.html": str(tmp_path / "test.html")}, - ) + (tmp_path / "a.html").touch() + server.serve_path.return_value = (Served.ALL, {"a.html": str(tmp_path / "a.html")}) with TestCase.load(tmp_path) as testcase: with ReplayManager([], server, target, use_harness=True, relaunch=20) as replay: - assert not replay.run( - [testcase], 10, repeat=10, min_results=1, on_iteration_cb=iter_cb - ) + assert not replay.run([testcase], 10, repeat=10, on_iteration_cb=iter_cb) assert replay.signature is None assert replay.status assert replay.status.ignored == 0 @@ -97,11 +89,8 @@ def test_replay_03(mocker, server, tmp_path): ) target = mocker.Mock(spec_set=Target, closed=False, launch_timeout=30) target.check_result.return_value = Result.NONE - (tmp_path / "test.html").touch() - server.serve_path.return_value = ( - Served.ALL, - {"test.html": str(tmp_path / "test.html")}, - ) + (tmp_path / "a.html").touch() + server.serve_path.return_value = (Served.ALL, {"a.html": str(tmp_path / "a.html")}) with TestCase.load(tmp_path) as testcase: with ReplayManager([], server, target, use_harness=True, relaunch=20) as replay: assert not replay.run([testcase], 10, repeat=10, min_results=1) @@ -114,21 +103,14 @@ def test_replay_03(mocker, server, tmp_path): assert target.close.call_count == 1 -@mark.parametrize( - "good_sig", - [ - # success - FM parsed signature - True, - # signature could not be parsed - False, - ], -) -def test_replay_04(mocker, server, tmp_path, good_sig): +@mark.parametrize("sig_parsed", [True, False]) +@mark.parametrize("post_launch_delay", [0, -1]) +def test_replay_04(mocker, server, tmp_path, sig_parsed, post_launch_delay): """test ReplayManager.run() - successful repro""" target = mocker.Mock(spec_set=Target, binary=Path("bin"), launch_timeout=30) target.check_result.return_value = Result.FOUND target.monitor.is_healthy.return_value = False - if good_sig: + if sig_parsed: target.save_logs = _fake_save_logs else: @@ -139,16 +121,13 @@ def _save_logs(result_logs): (log_path / "log_stdout.txt").write_text("STDOUT log\n") target.save_logs = _save_logs - (tmp_path / "test.html").touch() - server.serve_path.return_value = ( - Served.ALL, - {"test.html": str(tmp_path / "test.html")}, - ) + (tmp_path / "a.html").touch() + server.serve_path.return_value = (Served.ALL, {"a.html": str(tmp_path / "a.html")}) with TestCase.load(tmp_path) as testcase: with ReplayManager([], server, target, relaunch=10) as replay: assert replay.signature is None - results = replay.run([testcase], 10) - if good_sig: + results = replay.run([testcase], 10, post_launch_delay=post_launch_delay) + if sig_parsed: assert replay.signature is not None else: assert replay.signature is None @@ -216,7 +195,7 @@ def test_replay_06(mocker, server): (Served.REQUEST, {"x": "/fake/path"}), ) with ReplayManager([], server, target, use_harness=True, relaunch=10) as replay: - assert replay.run(tests, 10, repeat=2, post_launch_delay=-1) + assert replay.run(tests, 10, repeat=2) assert replay.status assert replay.status.ignored == 0 assert replay.status.iteration == 2 @@ -500,7 +479,7 @@ def test_replay_15(mocker, server): [], server, target, any_crash=True, use_harness=True, relaunch=2 ) as replay: with raises(KeyboardInterrupt): - replay.run(tests, 10, repeat=3, min_results=2, post_launch_delay=-1) + replay.run(tests, 10, repeat=3, min_results=2) assert replay.signature is None assert replay.status assert replay.status.iteration == 2 @@ -542,7 +521,7 @@ def test_replay_17(mocker, server): mocker.MagicMock(spec_set=TestCase, entry_point="a.html") for _ in range(3) ] with ReplayManager([], server, target, use_harness=True, relaunch=2) as replay: - assert not replay.run(tests, 10, repeat=10, post_launch_delay=-1) + assert not replay.run(tests, 10, repeat=10) assert server.serve_path.call_count == 30 assert target.close.call_count == 6 assert target.launch.call_count == 5 @@ -572,7 +551,7 @@ def test_replay_18(mocker, server): mocker.MagicMock(spec_set=TestCase, entry_point=f"{i}.html") for i in range(3) ] with ReplayManager([], server, target, use_harness=True) as replay: - results = replay.run(tests, 30, post_launch_delay=-1) + results = replay.run(tests, 30) assert target.close.call_count == 2 assert replay.status assert replay.status.ignored == 0 @@ -595,13 +574,13 @@ def test_replay_19(mocker, server, tmp_path): ) with TestCase.load(tmp_path) as testcase: with ReplayManager([], server, target, use_harness=True) as replay: - assert not replay.run([testcase], 30, post_launch_delay=-1) + assert not replay.run([testcase], 30) assert replay.status assert replay.status.iteration == 1 - assert not replay.run([testcase], 30, post_launch_delay=-1) + assert not replay.run([testcase], 30) assert replay.status assert replay.status.iteration == 1 - assert not replay.run([testcase], 30, post_launch_delay=-1) + assert not replay.run([testcase], 30) assert replay.status assert replay.status.iteration == 1 assert server.serve_path.call_count == 3 @@ -736,7 +715,7 @@ def test_replay_23( target.monitor.is_healthy.return_value = False test = mocker.MagicMock(spec_set=TestCase, entry_point="a.html", hang=is_hang) with ReplayManager([], server, target, signature=signature, relaunch=10) as replay: - found = replay.run([test], 10, expect_hang=expect_hang, post_launch_delay=-1) + found = replay.run([test], 10, expect_hang=expect_hang) assert replay.status assert replay.status.iteration == 1 assert replay.status.ignored == ignored @@ -810,11 +789,8 @@ def _save_logs_variation(result_logs): target.save_logs.side_effect = _save_logs_variation - (tmp_path / "test.html").touch() - server.serve_path.return_value = ( - Served.ALL, - {"test.html": str(tmp_path / "test.html")}, - ) + (tmp_path / "a.html").touch() + server.serve_path.return_value = (Served.ALL, {"a.html": str(tmp_path / "a.html")}) with TestCase.load(tmp_path) as testcase: with ReplayManager([], server, target, relaunch=10, signature=sig) as replay: results = replay.run([testcase], 10, min_results=2, repeat=2) @@ -879,11 +855,8 @@ def _save_logs_variation(result_logs): target.save_logs.side_effect = _save_logs_variation has_sig = include_stack[0] - (tmp_path / "test.html").touch() - server.serve_path.return_value = ( - Served.ALL, - {"test.html": str(tmp_path / "test.html")}, - ) + (tmp_path / "a.html").touch() + server.serve_path.return_value = (Served.ALL, {"a.html": str(tmp_path / "a.html")}) with TestCase.load(tmp_path) as testcase: with ReplayManager([], server, target, relaunch=10) as replay: results = replay.run([testcase], 10, min_results=2, repeat=iters) @@ -924,7 +897,7 @@ def test_replay_27(mocker, server, tmp_path): ) with ReplayManager([], server, target, use_harness=True) as replay: assert "include.js" not in test - results = replay.run([test], 30, post_launch_delay=-1) + results = replay.run([test], 30) assert replay.status assert replay.status.ignored == 0 assert replay.status.iteration == 1 diff --git a/grizzly/session.py b/grizzly/session.py index ea922275..12acc045 100644 --- a/grizzly/session.py +++ b/grizzly/session.py @@ -215,7 +215,8 @@ def run( ) with self.status.measure("launch"): runner.launch(location, max_retries=launch_attempts, retry_delay=0) - runner.post_launch(delay=post_launch_delay) + if post_launch_delay >= 0 and not runner.startup_failure: + runner.post_launch(delay=post_launch_delay) # TODO: avoid running test case if runner.startup_failure is True # especially if it is a hang!