From 9a389a012fa78de851241490dbe5595f06ee56e2 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 19 Aug 2020 16:18:30 -0700 Subject: [PATCH 001/531] Add TestCase.load_archive() --- grizzly/common/storage.py | 35 +++++++++++++++++++++++++++++++- grizzly/common/test_storage.py | 37 ++++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+), 1 deletion(-) diff --git a/grizzly/common/storage.py b/grizzly/common/storage.py index 25e856f1..350a0f9e 100644 --- a/grizzly/common/storage.py +++ b/grizzly/common/storage.py @@ -8,7 +8,9 @@ import json import os import shutil -from tempfile import SpooledTemporaryFile +from tempfile import mkdtemp, SpooledTemporaryFile +from zipfile import BadZipfile, ZipFile +from zlib import error as zlib_error from ..target import sanitizer_opts from .utils import grz_tmp @@ -236,6 +238,37 @@ def dump(self, out_path, include_details=False): for meta_file in self._files.meta: meta_file.dump(out_path) + @classmethod + def load_archive(cls, archive, working_path=None): + """Unpack and load TestCases from an archive. + + Args: + archive (str): Path to archive file containing testcase data. + working_path (str): Location to unpack testcase data files. + + Yields: + TestCase: A TestCase. + """ + if archive.lower().endswith(".zip"): + unpacked = mkdtemp(prefix="test_unpack_", dir=working_path) + try: + with ZipFile(archive) as zip_fp: + zip_fp.extractall(path=unpacked) + except (BadZipfile, zlib_error): + raise TestCaseLoadFailure("Testcase archive is corrupted") + else: + raise TestCaseLoadFailure("Unsupported archive type") + try: + for entry in os.listdir(unpacked): + tc_path = os.path.join(unpacked, entry) + if os.path.isdir(tc_path): + try: + yield cls.load_path(tc_path) + except TestCaseLoadFailure: + pass + finally: + shutil.rmtree(unpacked, ignore_errors=True) + def load_environ(self, path, env_data): # sanity check environment variable data for name, value in env_data.items(): diff --git a/grizzly/common/test_storage.py b/grizzly/common/test_storage.py index 2bc35b35..f96883e8 100644 --- a/grizzly/common/test_storage.py +++ b/grizzly/common/test_storage.py @@ -7,6 +7,7 @@ import json import re import os +import zipfile import pytest @@ -266,6 +267,42 @@ def test_testcase_11(tmp_path): with pytest.raises(TestFileExists, match="'file.bin' exists in test"): tcase.add_batch(str(include), [str(inc_1)]) +def test_testcase_12(tmp_path): + """test TestCase.load_archive()""" + # build archive containing multiple testcases + with TestCase("target.bin", None, "test-adapter") as src: + src.add_from_data("test", "target.bin") + src.dump(str(tmp_path / "src-0"), include_details=True) + src.dump(str(tmp_path / "src-1"), include_details=True) + src.dump(str(tmp_path / "src-2"), include_details=True) + (tmp_path / "log_dummy.txt").touch() + (tmp_path / "not_a_tc").mkdir() + (tmp_path / "not_a_tc" / "file.txt").touch() + archive = str(tmp_path / "testcase.zip") + with zipfile.ZipFile(archive, mode="w", compression=zipfile.ZIP_DEFLATED) as zfp: + for dir_name, _, dir_files in os.walk(str(tmp_path)): + arc_path = os.path.relpath(dir_name, str(tmp_path)) + for file_name in dir_files: + zfp.write( + os.path.join(dir_name, file_name), + arcname=os.path.join(arc_path, file_name)) + # load archive + tests = tuple(TestCase.load_archive(archive, working_path=str(tmp_path))) + try: + assert len(tests) == 3 + finally: + for test in tests: + test.cleanup() + # load unsupported archive + with pytest.raises(TestCaseLoadFailure, match="Unsupported archive type"): + any(TestCase.load_archive("somefile.test")) + # load broken archive + archive = (tmp_path / "fake.zip") + archive.write_bytes(b"x") + with pytest.raises(TestCaseLoadFailure, match="Testcase archive is corrupted"): + any(TestCase.load_archive(str(archive))) + + def test_testfile_01(): """test simple TestFile""" with TestFile("test_file.txt") as tfile: From dda3f5ac061d6a867cbce9a9d72e25b8b8e168c7 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 19 Aug 2020 16:26:38 -0700 Subject: [PATCH 002/531] Add TestCase.timestamp --- grizzly/common/storage.py | 15 ++++++++------- grizzly/common/test_storage.py | 3 +++ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/grizzly/common/storage.py b/grizzly/common/storage.py index 350a0f9e..da0aead0 100644 --- a/grizzly/common/storage.py +++ b/grizzly/common/storage.py @@ -9,6 +9,7 @@ import os import shutil from tempfile import mkdtemp, SpooledTemporaryFile +from time import time from zipfile import BadZipfile, ZipFile from zlib import error as zlib_error @@ -35,15 +36,16 @@ class TestFileExists(Exception): class TestCase(object): __slots__ = ( "adapter_name", "duration", "env_vars", "input_fname", "landing_page", - "redirect_page", "_existing_paths", "_files") + "redirect_page", "timestamp", "_existing_paths", "_files") - def __init__(self, landing_page, redirect_page, adapter_name, input_fname=None): + def __init__(self, landing_page, redirect_page, adapter_name, input_fname=None, timestamp=None): self.adapter_name = adapter_name self.duration = None self.env_vars = dict() # environment variables self.input_fname = input_fname # file that was used to create the test case self.landing_page = landing_page self.redirect_page = redirect_page + self.timestamp = time() if timestamp is None else timestamp self._existing_paths = list() # file paths in use self._files = TestFileMap( meta=list(), # environment files such as prefs.js, etc... @@ -231,7 +233,8 @@ def dump(self, out_path, include_details=False): "duration": self.duration, "env": self.env_vars, "input": os.path.basename(self.input_fname) if self.input_fname else None, - "target": self.landing_page} + "target": self.landing_page, + "timestamp": self.timestamp} with open(os.path.join(out_path, "test_info.json"), "w") as out_fp: json.dump(info, out_fp, indent=2, sort_keys=True) # save meta files @@ -316,16 +319,14 @@ def load_path(cls, path, full_scan=False, prefs=True): entry_point = os.path.basename(info["target"]) if not os.path.isfile(os.path.join(path, entry_point)): raise TestCaseLoadFailure("entry_point '%s' not found in '%s'" % (entry_point, path)) - adapter = info.get("adapter", None) full_scan = True elif os.path.isfile(path): - adapter = None + info = dict() entry_point = os.path.basename(path) path = os.path.dirname(path) - info = None else: raise TestCaseLoadFailure("Cannot find %r" % (path,)) - test = cls(None, None, adapter) + test = cls(None, None, info.get("adapter", None), timestamp=info.get("timestamp", 0)) if full_scan: # load all files from directory as test for dpath, _, files in os.walk(path): diff --git a/grizzly/common/test_storage.py b/grizzly/common/test_storage.py index f96883e8..81315064 100644 --- a/grizzly/common/test_storage.py +++ b/grizzly/common/test_storage.py @@ -26,6 +26,7 @@ def test_testcase_01(tmp_path): assert tcase.duration is None assert tcase.data_size == 0 assert tcase.input_fname is None + assert tcase.timestamp > 0 assert not tcase.env_vars assert not tcase._existing_paths assert not tcase._files.meta @@ -188,6 +189,7 @@ def test_testcase_08(tmp_path): assert "x.bin" in (x.file_name for x in dst._files.optional) assert os.path.join("nested", "x.bin") in (x.file_name for x in dst._files.optional) assert dst.env_vars["TEST_ENV_VAR"] == "100" + assert dst.timestamp > 0 def test_testcase_09(tmp_path): """test TestCase.load_path() using a file""" @@ -207,6 +209,7 @@ def test_testcase_09(tmp_path): assert "prefs.js" not in (x.file_name for x in tcase._files.meta) assert "target.bin" in (x.file_name for x in tcase._files.required) assert "optional.bin" not in (x.file_name for x in tcase._files.optional) + assert tcase.timestamp == 0 # load full test case with TestCase.load_path(str(entry_point), full_scan=True, prefs=True) as tcase: assert tcase.landing_page == "target.bin" From ac4d5f525b9557cfb201701e94e42c4a65d8a7fc Mon Sep 17 00:00:00 2001 From: Jesse Schwartzentruber Date: Fri, 21 Aug 2020 16:01:55 -0400 Subject: [PATCH 003/531] Move argparse epilog to CommonArgs --- grizzly/args.py | 6 +++--- grizzly/reduce/args.py | 3 --- grizzly/replay/args.py | 3 --- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/grizzly/args.py b/grizzly/args.py index d466e3e3..62c0d274 100644 --- a/grizzly/args.py +++ b/grizzly/args.py @@ -102,6 +102,9 @@ def __init__(self): "--tool", help="Override tool name used when reporting issues to FuzzManager") + self.parser.epilog = "For addition help check out the wiki:" \ + " https://github.com/MozillaSecurity/grizzly/wiki" + def parse_args(self, argv=None): args = self.parser.parse_args(argv) self.sanity_check(args) @@ -193,9 +196,6 @@ def __init__(self): "--s3-fuzzmanager", action="store_true", help="Report large attachments (if any) to S3 and then the crash & S3 link to FuzzManager") - self.parser.epilog = "For addition help check out the wiki:" \ - " https://github.com/MozillaSecurity/grizzly/wiki" - def sanity_check(self, args): super(GrizzlyArgs, self).sanity_check(args) diff --git a/grizzly/reduce/args.py b/grizzly/reduce/args.py index 4f555cdb..2c25c3c8 100644 --- a/grizzly/reduce/args.py +++ b/grizzly/reduce/args.py @@ -64,9 +64,6 @@ def __init__(self): help="One or more strategies (space-separated). Available: %s (default: %s)" % (" ".join(sorted(strategies_by_name())), " ".join(ReductionJob.DEFAULT_STRATEGIES))) - self.parser.epilog = "For addition help check out the wiki:" \ - " https://github.com/MozillaSecurity/grizzly/wiki" - def sanity_check(self, args): super(ReducerArgs, self).sanity_check(args) diff --git a/grizzly/replay/args.py b/grizzly/replay/args.py index 36d8237d..a2a1f3e2 100644 --- a/grizzly/replay/args.py +++ b/grizzly/replay/args.py @@ -53,9 +53,6 @@ def __init__(self): "--include-test", action="store_true", help="Include the testcase when reporting results.") - self.parser.epilog = "For addition help check out the wiki:" \ - " https://github.com/MozillaSecurity/grizzly/wiki" - def sanity_check(self, args): super(ReplayArgs, self).sanity_check(args) From df64c654603d7944b1acfc4b51bfda986aab926d Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 20 Aug 2020 14:34:14 -0700 Subject: [PATCH 004/531] [replay] Add zip archive and multi-testcase support --- grizzly/common/storage.py | 60 +++++-------- grizzly/common/test_storage.py | 50 ++++------- grizzly/replay/args.py | 5 +- grizzly/replay/replay.py | 137 +++++++++++++++++++++------- grizzly/replay/test_main.py | 23 +++++ grizzly/replay/test_replay.py | 159 ++++++++++++++++++++++++++++----- 6 files changed, 306 insertions(+), 128 deletions(-) diff --git a/grizzly/common/storage.py b/grizzly/common/storage.py index da0aead0..8bcc0998 100644 --- a/grizzly/common/storage.py +++ b/grizzly/common/storage.py @@ -8,10 +8,8 @@ import json import os import shutil -from tempfile import mkdtemp, SpooledTemporaryFile +from tempfile import SpooledTemporaryFile from time import time -from zipfile import BadZipfile, ZipFile -from zlib import error as zlib_error from ..target import sanitizer_opts from .utils import grz_tmp @@ -241,37 +239,6 @@ def dump(self, out_path, include_details=False): for meta_file in self._files.meta: meta_file.dump(out_path) - @classmethod - def load_archive(cls, archive, working_path=None): - """Unpack and load TestCases from an archive. - - Args: - archive (str): Path to archive file containing testcase data. - working_path (str): Location to unpack testcase data files. - - Yields: - TestCase: A TestCase. - """ - if archive.lower().endswith(".zip"): - unpacked = mkdtemp(prefix="test_unpack_", dir=working_path) - try: - with ZipFile(archive) as zip_fp: - zip_fp.extractall(path=unpacked) - except (BadZipfile, zlib_error): - raise TestCaseLoadFailure("Testcase archive is corrupted") - else: - raise TestCaseLoadFailure("Unsupported archive type") - try: - for entry in os.listdir(unpacked): - tc_path = os.path.join(unpacked, entry) - if os.path.isdir(tc_path): - try: - yield cls.load_path(tc_path) - except TestCaseLoadFailure: - pass - finally: - shutil.rmtree(unpacked, ignore_errors=True) - def load_environ(self, path, env_data): # sanity check environment variable data for name, value in env_data.items(): @@ -288,7 +255,7 @@ def load_environ(self, path, env_data): self.env_vars[opt_key] = ":".join("=".join((k, v)) for k, v in opts.items()) @classmethod - def load_path(cls, path, full_scan=False, prefs=True): + def load_path(cls, path, full_scan=False, load_prefs=True): """Load contents of a TestCase from disk. If `path` is a directory it must contain a valid test_info.json file. @@ -299,7 +266,7 @@ def load_path(cls, path, full_scan=False, prefs=True): subdirectories. This is always the case when loading a directory. WARNING: This should be used with caution! - prefs (bool): Include prefs.js file in the test case. + load_prefs (bool): Include prefs.js file in the test case. Returns: TestCase: A TestCase. @@ -335,7 +302,7 @@ def load_path(cls, path, full_scan=False, prefs=True): continue if dpath == path: if fname == "prefs.js": - if prefs: + if load_prefs: test.add_meta(TestFile.from_file(os.path.join(dpath, fname))) continue if fname == entry_point: @@ -398,6 +365,25 @@ def purge_optional(self, keep): for idx in reversed(to_remove): self._files.optional.pop(idx).close() + @staticmethod + def scan_path(path): + """Check path and subdirectories for potential test cases. + + Args: + path (str): Path to scan. + + Yields: + str: Path to what appears to be a valid testcase. + """ + contents = os.listdir(path) + if "test_info.json" in contents: + yield path + else: + for entry in contents: + tc_path = os.path.join(path, entry) + if os.path.isfile(os.path.join(tc_path, "test_info.json")): + yield tc_path + class TestFile(object): CACHE_LIMIT = 0x80000 # data cache limit per file: 512KB diff --git a/grizzly/common/test_storage.py b/grizzly/common/test_storage.py index 81315064..e45913d5 100644 --- a/grizzly/common/test_storage.py +++ b/grizzly/common/test_storage.py @@ -7,7 +7,6 @@ import json import re import os -import zipfile import pytest @@ -211,7 +210,7 @@ def test_testcase_09(tmp_path): assert "optional.bin" not in (x.file_name for x in tcase._files.optional) assert tcase.timestamp == 0 # load full test case - with TestCase.load_path(str(entry_point), full_scan=True, prefs=True) as tcase: + with TestCase.load_path(str(entry_point), full_scan=True, load_prefs=True) as tcase: assert tcase.landing_page == "target.bin" assert "prefs.js" in (x.file_name for x in tcase._files.meta) assert "target.bin" in (x.file_name for x in tcase._files.required) @@ -271,40 +270,21 @@ def test_testcase_11(tmp_path): tcase.add_batch(str(include), [str(inc_1)]) def test_testcase_12(tmp_path): - """test TestCase.load_archive()""" - # build archive containing multiple testcases + """test TestCase.scan_path()""" + # empty path + (tmp_path / "not-test").mkdir() + assert not tuple(TestCase.scan_path(str(tmp_path))) + # multiple test case directories + paths = [str(tmp_path / ("test-%d" % i)) for i in range(3)] with TestCase("target.bin", None, "test-adapter") as src: - src.add_from_data("test", "target.bin") - src.dump(str(tmp_path / "src-0"), include_details=True) - src.dump(str(tmp_path / "src-1"), include_details=True) - src.dump(str(tmp_path / "src-2"), include_details=True) - (tmp_path / "log_dummy.txt").touch() - (tmp_path / "not_a_tc").mkdir() - (tmp_path / "not_a_tc" / "file.txt").touch() - archive = str(tmp_path / "testcase.zip") - with zipfile.ZipFile(archive, mode="w", compression=zipfile.ZIP_DEFLATED) as zfp: - for dir_name, _, dir_files in os.walk(str(tmp_path)): - arc_path = os.path.relpath(dir_name, str(tmp_path)) - for file_name in dir_files: - zfp.write( - os.path.join(dir_name, file_name), - arcname=os.path.join(arc_path, file_name)) - # load archive - tests = tuple(TestCase.load_archive(archive, working_path=str(tmp_path))) - try: - assert len(tests) == 3 - finally: - for test in tests: - test.cleanup() - # load unsupported archive - with pytest.raises(TestCaseLoadFailure, match="Unsupported archive type"): - any(TestCase.load_archive("somefile.test")) - # load broken archive - archive = (tmp_path / "fake.zip") - archive.write_bytes(b"x") - with pytest.raises(TestCaseLoadFailure, match="Testcase archive is corrupted"): - any(TestCase.load_archive(str(archive))) - + src.add_from_data("test", "test.htm") + for path in paths: + src.dump(path, include_details=True) + tc_paths = list(TestCase.scan_path(str(tmp_path))) + assert len(tc_paths) == 3 + # single test case directory + tc_paths = list(TestCase.scan_path(str(paths[0]))) + assert len(tc_paths) == 1 def test_testfile_01(): """test simple TestFile""" diff --git a/grizzly/replay/args.py b/grizzly/replay/args.py index a2a1f3e2..0cc88c6d 100644 --- a/grizzly/replay/args.py +++ b/grizzly/replay/args.py @@ -13,7 +13,10 @@ def __init__(self): super(ReplayArgs, self).__init__() self.parser.add_argument( "input", - help="Directory containing test case data or file to use as a test case." \ + help="Accepted input includes: " \ + "1) A directory containing testcase data. " \ + "2) A file to use as a testcase. " \ + "3) A zip archive containing testcases. " \ "When using a directory it must contain a 'test_info.json' file.") replay_args = self.parser.add_argument_group("Replay Arguments") diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 67cbefc1..e449a015 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -4,9 +4,12 @@ # file, You can obtain one at http://mozilla.org/MPL/2.0/. from logging import getLogger -from os.path import dirname, join as pathjoin +from os.path import dirname, isfile, join as pathjoin +from shutil import copyfile, rmtree from tempfile import mkdtemp from time import sleep +from zipfile import BadZipfile, ZipFile +from zlib import error as zlib_error from FTB.Signatures.CrashInfo import CrashSignature from sapphire import Sapphire, ServerMap @@ -28,21 +31,22 @@ class ReplayManager(object): HARNESS_FILE = pathjoin(dirname(__file__), "..", "common", "harness.html") - __slots__ = ("ignore", "server", "status", "target", "testcase", "_any_crash", + __slots__ = ("ignore", "server", "status", "target", "testcases", "_any_crash", "_harness", "_reports_expected", "_reports_other", "_runner", "_signature") - def __init__(self, ignore, server, target, testcase, any_crash=False, signature=None, use_harness=True): + def __init__(self, ignore, server, target, testcases, any_crash=False, signature=None, use_harness=True): self.ignore = ignore self.server = server self.status = None self.target = target - self.testcase = testcase + self.testcases = testcases self._any_crash = any_crash self._harness = None self._reports_expected = dict() self._reports_other = dict() self._runner = Runner(self.server, self.target) + # TODO: make signature a property self._signature = signature if use_harness: @@ -73,6 +77,55 @@ def cleanup(self): if self.status is not None: self.status.cleanup() + @staticmethod + def load_testcases(path, load_prefs): + """Load TestCases from disk. + + Args: + path (str): Path to a file, directory or zip archive containing + testcase data. + load_prefs (bool): Load prefs.js file if available. + + Returns: + tuple: TestCases (list) and path to unpacked testcase data (str). + """ + unpacked = None + try: + if path.lower().endswith(".zip"): + unpacked = mkdtemp(prefix="unpack_", dir=grz_tmp("replay")) + try: + with ZipFile(path) as zip_fp: + zip_fp.extractall(path=unpacked) + except (BadZipfile, zlib_error): + raise TestCaseLoadFailure("Testcase archive is corrupted") + tc_paths = tuple(TestCase.scan_path(unpacked)) + testcases = list() + for tc_path in tc_paths: + try: + testcases.append(TestCase.load_path(tc_path, load_prefs=load_prefs)) + except TestCaseLoadFailure: # pragma: no cover + pass + testcases.sort(key=lambda tc: tc.timestamp) + if load_prefs: + # attempt to unpack prefs.js + for tc_path in tc_paths: + try: + copyfile( + pathjoin(tc_path, "prefs.js"), + pathjoin(unpacked, "prefs.js")) + except IOError: # pragma: no cover + continue + break + else: + testcases = [TestCase.load_path(path, load_prefs=load_prefs)] + if not testcases: + raise TestCaseLoadFailure("Failed to load TestCases") + except TestCaseLoadFailure: + if unpacked is not None: + rmtree(unpacked, ignore_errors=True) + raise + return testcases, unpacked + @property def other_reports(self): """Reports from results that do not match: @@ -100,7 +153,7 @@ def reports(self): return self._reports_expected.values() @staticmethod - def report_to_filesystem(path, reports, other_reports=None, test=None): + def report_to_filesystem(path, reports, other_reports=None, tests=None): """Use FilesystemReporter to write reports and testcase to disk in a known location. @@ -108,13 +161,11 @@ def report_to_filesystem(path, reports, other_reports=None, test=None): path (str): Location to write data. reports (iterable): Reports to output. other_reports (iterable): Reports to output. - test (TestCase): Testcase to output. + tests (iterable): Testcases to output. Returns: None """ - assert test is None or isinstance(test, TestCase) - tests = [test] if test else tuple() if reports: reporter = FilesystemReporter( report_path=pathjoin(path, "reports"), @@ -154,10 +205,9 @@ def _dyn_close(): # pragma: no cover return b"

Close Browser

" server_map.set_dynamic_response("grz_close_browser", _dyn_close, mime_type="text/html") server_map.set_dynamic_response("grz_harness", lambda: self._harness, mime_type="text/html") - server_map.set_redirect("grz_next_test", self.testcase.landing_page, required=True) - server_map.set_redirect("grz_current_test", self.testcase.landing_page, required=False) success = False + test_count = len(self.testcases) for _ in range(repeat): self.status.iteration += 1 if self.target.closed: @@ -170,10 +220,15 @@ def _dyn_close(): # pragma: no cover location = self._runner.location( "/grz_harness", self.server.port, - close_after=self.target.rl_reset, + close_after=self.target.rl_reset * test_count, forced_close=self.target.forced_close) try: - self._runner.launch(location, env_mod=self.testcase.env_vars) + # The environment from the initial testcase is used because + # a sequence of testcases is expected to be run without + # relaunching the Target to match the functionality of + # Grizzly. If this is not the case each TestCase should + # be run individually. + self._runner.launch(location, env_mod=self.testcases[0].env_vars) except TargetLaunchError: LOG.error("Target launch error. Check browser logs for details.") log_path = mkdtemp(prefix="logs_", dir=grz_tmp("logs")) @@ -182,12 +237,20 @@ def _dyn_close(): # pragma: no cover raise self.target.step() LOG.info("Performing replay (%d/%d)...", self.status.iteration, repeat) - # run test case - self._runner.run( - self.ignore, - server_map, - self.testcase, - wait_for_callback=self._harness is None) + # run test cases + for test_idx in range(test_count): + LOG.debug("running test: %d of %d", test_idx + 1, test_count) + if self._harness is not None: + next_idx = (test_idx + 1) % test_count + server_map.set_redirect("grz_next_test", self.testcases[next_idx].landing_page, required=True) + server_map.set_redirect("grz_current_test", self.testcases[test_idx].landing_page, required=False) + self._runner.run( + self.ignore, + server_map, + self.testcases[test_idx], + wait_for_callback=self._harness is None) + if self._runner.result != self._runner.COMPLETE: + break # process results if self._runner.result == self._runner.FAILED: log_path = mkdtemp(prefix="logs_", dir=grz_tmp("logs")) @@ -288,22 +351,26 @@ def main(cls, args): else: signature = None + LOG.debug("loading the TestCases") try: - LOG.debug("loading the TestCase") - testcase = TestCase.load_path(args.input, prefs=args.prefs is None) - # prioritize specified prefs.js file over included file - if args.prefs is not None: - prefs = args.prefs - testcase.add_meta(TestFile.from_file(args.prefs, "prefs.js")) - LOG.info("Using specified prefs.js") - elif testcase.contains("prefs.js"): - prefs = pathjoin(args.input, "prefs.js") - LOG.info("Using prefs.js from testcase") - else: - prefs = None + testcases, unpacked = cls.load_testcases(args.input, args.prefs is None) except TestCaseLoadFailure as exc: LOG.error("Error: %s", str(exc)) return 1 + # prioritize specified prefs.js file over included file + if args.prefs is not None: + prefs = args.prefs + for testcase in testcases: + testcase.add_meta(TestFile.from_file(args.prefs, "prefs.js")) + LOG.info("Using specified prefs.js") + elif unpacked and isfile(pathjoin(unpacked, "prefs.js")): + prefs = pathjoin(unpacked, "prefs.js") + LOG.info("Using prefs.js from testcase") + elif isfile(pathjoin(args.input, "prefs.js")): + prefs = pathjoin(args.input, "prefs.js") + LOG.info("Using prefs.js from testcase") + else: + prefs = None replay = None target = None @@ -322,7 +389,7 @@ def main(cls, args): xvfb=args.xvfb) if prefs is not None: target.prefs = prefs - if testcase.env_vars.get("GRZ_FORCED_CLOSE") == "0": + if testcases[0].env_vars.get("GRZ_FORCED_CLOSE") == "0": LOG.debug("setting target.forced_close=False") target.forced_close = False @@ -340,7 +407,7 @@ def main(cls, args): args.ignore, server, target, - testcase, + testcases, any_crash=args.any_crash, signature=signature, use_harness=not args.no_harness) @@ -350,7 +417,7 @@ def main(cls, args): args.logs, replay.reports, replay.other_reports, - replay.testcase if args.include_test else None) + replay.testcases if args.include_test else None) # TODO: add fuzzmanager reporting return 0 if success else 1 @@ -371,6 +438,8 @@ def main(cls, args): replay.cleanup() if target is not None: target.cleanup() - if testcase is not None: + for testcase in testcases: testcase.cleanup() + if unpacked is not None: + rmtree(unpacked, ignore_errors=True) LOG.info("Done.") diff --git a/grizzly/replay/test_main.py b/grizzly/replay/test_main.py index cb764c12..32a79900 100644 --- a/grizzly/replay/test_main.py +++ b/grizzly/replay/test_main.py @@ -262,3 +262,26 @@ def _fake_save_logs(result_logs): assert log_path.is_dir() prefs = tuple(log_path.glob('**/prefs.js')) assert prefs[0].read_bytes() == b"specified" + +def test_main_05(mocker, tmp_path): + """test ReplayManager.main() - unpacked test case - prefs and cleanup """ + unpacked = (tmp_path / "unpacked") + unpacked.mkdir() + # prefs.js from unpacked path + (unpacked / "prefs.js").touch() + fake_load_testcases = mocker.patch("grizzly.replay.replay.ReplayManager.load_testcases") + fake_load_testcases.return_value = ([mocker.Mock(env_vars=dict())], str(unpacked)) + mocker.patch("grizzly.replay.replay.ReplayManager.run") + mocker.patch("grizzly.replay.replay.load_target") + mocker.patch("grizzly.replay.replay.Sapphire") + args = mocker.Mock( + fuzzmanager=False, + ignore=None, + min_crashes=1, + relaunch=1, + repeat=1, + prefs=None, + sig=None, + timeout=1) + assert ReplayManager.main(args) == 0 + assert not unpacked.is_dir() diff --git a/grizzly/replay/test_replay.py b/grizzly/replay/test_replay.py index 79a496f8..5abc8f78 100644 --- a/grizzly/replay/test_replay.py +++ b/grizzly/replay/test_replay.py @@ -6,12 +6,14 @@ """ unit tests for grizzly.replay """ -from os.path import join as pathjoin +from os import walk +from os.path import isfile, join as pathjoin, relpath +from zipfile import ZIP_DEFLATED, ZipFile from pytest import raises from sapphire import Sapphire, SERVED_ALL, SERVED_REQUEST -from .replay import ReplayManager +from .replay import ReplayManager, TestCaseLoadFailure from ..common import Report, Status, TestCase from ..target import Target, TargetLaunchError @@ -30,7 +32,7 @@ def _fake_save_logs_result(result_logs, meta=False): # pylint: disable=unused-a def test_replay_01(mocker): """test ReplayManager.cleanup()""" - replay = ReplayManager([], mocker.Mock(spec=Sapphire), mocker.Mock(spec=Target), mocker.Mock()) + replay = ReplayManager([], mocker.Mock(spec=Sapphire), mocker.Mock(spec=Target), [mocker.Mock()]) replay._reports_expected = {"A": mocker.Mock(spec=Report)} replay._reports_other = {"B": mocker.Mock(spec=Report)} replay.status = mocker.Mock(spec=Status) @@ -51,8 +53,8 @@ def test_replay_02(mocker): target.detect_failure.return_value = Target.RESULT_NONE target.forced_close = True target.rl_reset = 1 - testcase = mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html") - with ReplayManager([], server, target, testcase, use_harness=True) as replay: + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + with ReplayManager([], server, target, testcases, use_harness=True) as replay: assert not replay.run() assert replay.status.ignored == 0 assert replay.status.iteration == 1 @@ -68,8 +70,8 @@ def test_replay_03(mocker): target.binary = "C:\\fake_bin" target.detect_failure.return_value = Target.RESULT_FAILURE target.save_logs = _fake_save_logs_result - testcase = mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html") - with ReplayManager([], server, target, testcase, use_harness=False) as replay: + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + with ReplayManager([], server, target, testcases, use_harness=False) as replay: assert replay.run() assert replay.status.ignored == 0 assert replay.status.iteration == 1 @@ -84,8 +86,8 @@ def test_replay_04(mocker): target = mocker.Mock(spec=Target) target.RESULT_NONE = Target.RESULT_NONE target.detect_failure.return_value = Target.RESULT_NONE - testcase = mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html") - with ReplayManager([], server, target, testcase, use_harness=False) as replay: + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + with ReplayManager([], server, target, testcases, use_harness=False) as replay: assert not replay.run(repeat=2) assert replay.status.ignored == 0 assert replay.status.iteration == 1 @@ -101,8 +103,8 @@ def test_replay_05(mocker): target = mocker.Mock(spec=Target) target.RESULT_IGNORED = Target.RESULT_IGNORED target.detect_failure.return_value = Target.RESULT_IGNORED - testcase = mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html") - with ReplayManager([], server, target, testcase, use_harness=False) as replay: + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + with ReplayManager([], server, target, testcases, use_harness=False) as replay: assert not replay.run() assert replay.status.ignored == 1 assert replay.status.iteration == 1 @@ -120,10 +122,10 @@ def test_replay_06(mocker): target.RESULT_NONE = Target.RESULT_NONE target.binary = "path/fake_bin" target.save_logs = _fake_save_logs_result - testcase = mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html") + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] # early failure target.detect_failure.side_effect = [Target.RESULT_FAILURE, Target.RESULT_IGNORED, Target.RESULT_NONE] - with ReplayManager([], server, target, testcase, use_harness=False) as replay: + with ReplayManager([], server, target, testcases, use_harness=False) as replay: assert not replay.run(repeat=4, min_results=3) assert replay.status.iteration == 3 assert replay.status.results == 1 @@ -131,7 +133,7 @@ def test_replay_06(mocker): assert len(replay.reports) == 1 # early success target.detect_failure.side_effect = [Target.RESULT_FAILURE, Target.RESULT_IGNORED, Target.RESULT_FAILURE] - with ReplayManager([], server, target, testcase, use_harness=False) as replay: + with ReplayManager([], server, target, testcases, use_harness=False) as replay: assert replay.run(repeat=4, min_results=2) assert replay.status.iteration == 3 assert replay.status.results == 2 @@ -164,8 +166,8 @@ def test_replay_07(mocker, tmp_path): target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE target.binary = "fake_bin" - testcase = mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html") - with ReplayManager([], server, target, testcase, signature=signature, use_harness=False) as replay: + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + with ReplayManager([], server, target, testcases, signature=signature, use_harness=False) as replay: assert not replay.run(repeat=3, min_results=2) assert replay._signature == signature assert report.from_path.call_count == 3 @@ -202,8 +204,8 @@ def test_replay_08(mocker, tmp_path): target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE target.binary = "fake_bin" - testcase = mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html") - with ReplayManager([], server, target, testcase, any_crash=True, use_harness=False) as replay: + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + with ReplayManager([], server, target, testcases, any_crash=True, use_harness=False) as replay: assert replay.run(repeat=3, min_results=2) assert replay._signature is None assert report.from_path.call_count == 3 @@ -240,7 +242,7 @@ def test_replay_09(mocker, tmp_path): reports_other[-1].path = str(tmp_path / "report_other2") test = mocker.Mock(spec=TestCase) path = tmp_path / "dest" - ReplayManager.report_to_filesystem(str(path), reports_expected, reports_other, test=test) + ReplayManager.report_to_filesystem(str(path), reports_expected, reports_other, tests=[test]) assert test.dump.call_count == 3 # called once per report assert not (tmp_path / "report_expected").is_dir() assert not (tmp_path / "report_other1").is_dir() @@ -260,10 +262,125 @@ def test_replay_10(mocker, tmp_path): server = mocker.Mock(spec=Sapphire, port=0x1337) target = mocker.Mock(spec=Target) target.launch.side_effect = TargetLaunchError - testcase = mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html") - with ReplayManager([], server, target, testcase, use_harness=False) as replay: + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + with ReplayManager([], server, target, testcases, use_harness=False) as replay: with raises(TargetLaunchError): replay.run() assert not any(replay.reports) assert any(replay.other_reports) assert "STARTUP" in replay._reports_other + +def test_replay_11(mocker): + """test ReplayManager.run() - multiple TestCases - no repro""" + server = mocker.Mock(spec=Sapphire, port=0x1337) + server.serve_testcase.return_value = (SERVED_ALL, ["index.html"]) + target = mocker.Mock(spec=Target) + target.RESULT_NONE = Target.RESULT_NONE + target.closed = True + target.detect_failure.return_value = Target.RESULT_NONE + target.forced_close = True + target.rl_reset = 1 + testcases = [ + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html"), + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html"), + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + with ReplayManager([], server, target, testcases, use_harness=True) as replay: + assert not replay.run() + assert replay.status.ignored == 0 + assert replay.status.iteration == 1 + assert replay.status.results == 0 + assert not replay.reports + +def test_replay_12(mocker): + """test ReplayManager.run() - multiple TestCases - successful repro""" + server = mocker.Mock(spec=Sapphire, port=0x1337) + server.serve_testcase.return_value = (SERVED_ALL, ["index.html"]) + target = mocker.Mock(spec=Target, binary="fake_bin", rl_reset=1) + target.RESULT_FAILURE = Target.RESULT_FAILURE + target.RESULT_NONE = Target.RESULT_NONE + target.detect_failure.side_effect = ( + Target.RESULT_NONE, + Target.RESULT_NONE, + Target.RESULT_FAILURE) + target.save_logs = _fake_save_logs_result + testcases = [ + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html"), + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html"), + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + with ReplayManager([], server, target, testcases, use_harness=True) as replay: + assert replay.run() + assert replay.status.ignored == 0 + assert replay.status.iteration == 1 + assert replay.status.results == 1 + assert len(replay.reports) == 1 + assert not replay.other_reports + +def test_replay_13(tmp_path): + """test ReplayManager.load_testcases() - error cases""" + # test missing + with raises(TestCaseLoadFailure, match="Cannot find"): + ReplayManager.load_testcases("missing", False) + # test empty path + with raises(TestCaseLoadFailure, match="Missing 'test_info.json'"): + ReplayManager.load_testcases(str(tmp_path), False) + # test broken archive + archive = (tmp_path / "fake.zip") + archive.write_bytes(b"x") + with raises(TestCaseLoadFailure, match="Testcase archive is corrupted"): + ReplayManager.load_testcases(str(archive), False) + +def test_replay_14(tmp_path): + """test ReplayManager.load_testcases() - single file""" + tfile = (tmp_path / "testcase.html") + tfile.touch() + testcases, unpacked = ReplayManager.load_testcases(str(tfile), False) + try: + assert unpacked is None + assert len(tuple(testcases)) == 1 + finally: + map(lambda x: x.cleanup, testcases) + +def test_replay_15(tmp_path): + """test ReplayManager.load_testcases() - single directory""" + with TestCase("target.bin", None, "test-adapter") as src: + src.add_from_data("test", "target.bin") + src.dump(str(tmp_path), include_details=True) + testcases, unpacked = ReplayManager.load_testcases(str(tmp_path), False) + try: + assert unpacked is None + assert len(tuple(testcases)) == 1 + finally: + map(lambda x: x.cleanup, testcases) + +def test_replay_16(tmp_path): + """test ReplayManager.load_testcases() - archive""" + # build archive containing multiple testcases + with TestCase("target.bin", None, "test-adapter") as src: + src.add_from_data("test", "target.bin") + src.dump(str(tmp_path / "src-0"), include_details=True) + src.dump(str(tmp_path / "src-1"), include_details=True) + src.dump(str(tmp_path / "src-2"), include_details=True) + (tmp_path / "src-1" / "prefs.js").write_bytes(b"fake_prefs") + (tmp_path / "log_dummy.txt").touch() + (tmp_path / "not_a_tc").mkdir() + (tmp_path / "not_a_tc" / "file.txt").touch() + archive = str(tmp_path / "testcase.zip") + with ZipFile(archive, mode="w", compression=ZIP_DEFLATED) as zfp: + for dir_name, _, dir_files in walk(str(tmp_path)): + arc_path = relpath(dir_name, str(tmp_path)) + for file_name in dir_files: + zfp.write( + pathjoin(dir_name, file_name), + arcname=pathjoin(arc_path, file_name)) + testcases, unpacked = ReplayManager.load_testcases(str(archive), True) + try: + assert unpacked is not None + assert isfile(pathjoin(unpacked, "prefs.js")) + assert len(tuple(testcases)) == 3 + finally: + map(lambda x: x.cleanup, testcases) + # empty archive + with ZipFile(archive, mode="w", compression=ZIP_DEFLATED) as zfp: + zfp.write(str(tmp_path / "not_a_tc"), arcname="not_a_tc") + with raises(TestCaseLoadFailure, match="Failed to load TestCases"): + ReplayManager.load_testcases(str(archive), True) From e821fce280fe712026940d6f7deadd0f3ccac532 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Fri, 21 Aug 2020 19:50:00 -0700 Subject: [PATCH 005/531] Refactor TestCase loading * Move zip archive handling to storage.py * Add support for directories containing multiple testcases --- grizzly/common/storage.py | 148 ++++++++++++++++++++++----------- grizzly/common/test_storage.py | 124 ++++++++++++++++++++++----- grizzly/replay/args.py | 11 +-- grizzly/replay/replay.py | 96 +++++---------------- grizzly/replay/test_main.py | 82 ++++++------------ grizzly/replay/test_replay.py | 76 +---------------- 6 files changed, 256 insertions(+), 281 deletions(-) diff --git a/grizzly/common/storage.py b/grizzly/common/storage.py index 8bcc0998..239dec32 100644 --- a/grizzly/common/storage.py +++ b/grizzly/common/storage.py @@ -8,8 +8,10 @@ import json import os import shutil -from tempfile import SpooledTemporaryFile +from tempfile import mkdtemp, SpooledTemporaryFile from time import time +from zipfile import BadZipfile, ZipFile +from zlib import error as zlib_error from ..target import sanitizer_opts from .utils import grz_tmp @@ -239,6 +241,67 @@ def dump(self, out_path, include_details=False): for meta_file in self._files.meta: meta_file.dump(out_path) + def get_file(self, file_name): + """Look up and return the TestFile with the specified file name. + + Args: + file_name (str): Name of file to retrieve. + + Returns: + TestFile: TestFile with matching file name otherwise None. + """ + for tfile in chain(self._files.meta, self._files.optional, self._files.required): + if tfile.file_name == file_name: + return tfile + return None + + @classmethod + def load(cls, path, load_prefs, adjacent=False): + """Load TestCases from disk. + + Args: + path (str): Path can be: + 1) A directory containing `test_info.json` and data. + 2) A directory with one or more subdirectories of 1. + 3) A zip archive containing testcase data or + subdirectories containing testcase data. + 4) A single file to be used as a test case. + load_prefs (bool): Load prefs.js file if available. + adjacent (str): Load adjacent files as part of the test case. + This is always the case when loading a directory. + WARNING: This should be used with caution! + + Returns: + list: TestCases successfully loaded from path. + """ + # unpack archive if needed + if path.lower().endswith(".zip"): + unpacked = mkdtemp(prefix="unpack_", dir=grz_tmp("storage")) + try: + with ZipFile(path) as zip_fp: + zip_fp.extractall(path=unpacked) + except (BadZipfile, zlib_error): + shutil.rmtree(unpacked, ignore_errors=True) + raise TestCaseLoadFailure("Testcase archive is corrupted") + path = unpacked + else: + unpacked = None + # load testcase data from disk + try: + if os.path.isfile(path): + tests = [cls.load_single(path, load_prefs, adjacent=adjacent)] + elif os.path.isdir(path): + tests = list() + for tc_path in TestCase.scan_path(path): + tests.append(cls.load_single(tc_path, load_prefs)) + tests.sort(key=lambda tc: tc.timestamp) + else: + raise TestCaseLoadFailure("Invalid TestCase path") + finally: + if unpacked is not None: + shutil.rmtree(unpacked, ignore_errors=True) + return tests + def load_environ(self, path, env_data): # sanity check environment variable data for name, value in env_data.items(): @@ -255,25 +318,23 @@ def load_environ(self, path, env_data): self.env_vars[opt_key] = ":".join("=".join((k, v)) for k, v in opts.items()) @classmethod - def load_path(cls, path, full_scan=False, load_prefs=True): + def load_single(cls, path, load_prefs, adjacent=False): """Load contents of a TestCase from disk. If `path` is a directory it must - contain a valid test_info.json file. + contain a valid 'test_info.json' file. Args: path (str): Path to the directory or file to load. - full_scan (bool): Include all files in the directory containing the - test case entry point as well as the contents of - subdirectories. This is always the case when - loading a directory. - WARNING: This should be used with caution! - load_prefs (bool): Include prefs.js file in the test case. + load_prefs (bool): Load prefs.js file if available. + adjacent (str): Load adjacent files as part of the test case. + This is always the case when loading a directory. + WARNING: This should be used with caution! Returns: TestCase: A TestCase. """ path = os.path.abspath(path) if os.path.isdir(path): - # load a directory using test_info.json + # load using test_info.json try: with open(os.path.join(path, "test_info.json"), "r") as in_fp: info = json.load(in_fp) @@ -281,51 +342,25 @@ def load_path(cls, path, full_scan=False, load_prefs=True): raise TestCaseLoadFailure("Missing 'test_info.json'") except ValueError: raise TestCaseLoadFailure("Invalid 'test_info.json'") - if "target" not in info: - raise TestCaseLoadFailure("'test_info.json' missing 'target' entry") + if not isinstance(info.get("target"), str): + raise TestCaseLoadFailure("'test_info.json' has invalid 'target' entry") entry_point = os.path.basename(info["target"]) if not os.path.isfile(os.path.join(path, entry_point)): - raise TestCaseLoadFailure("entry_point '%s' not found in '%s'" % (entry_point, path)) - full_scan = True + raise TestCaseLoadFailure("Entry point %r not found in '%s'" % (entry_point, path)) + # always load all contents of a directory if a 'test_info.json' is loaded + adjacent = True elif os.path.isfile(path): - info = dict() entry_point = os.path.basename(path) + info = dict() path = os.path.dirname(path) else: - raise TestCaseLoadFailure("Cannot find %r" % (path,)) + raise TestCaseLoadFailure("Missing or invalid TestCase %r" % (path,)) + # create testcase and add data test = cls(None, None, info.get("adapter", None), timestamp=info.get("timestamp", 0)) - if full_scan: - # load all files from directory as test - for dpath, _, files in os.walk(path): - for fname in files: - if fname == "test_info.json": - continue - if dpath == path: - if fname == "prefs.js": - if load_prefs: - test.add_meta(TestFile.from_file(os.path.join(dpath, fname))) - continue - if fname == entry_point: - test.add_from_file(os.path.join(dpath, fname)) - # set entry point - test.landing_page = fname - continue - location = None - else: - # handle nested directories - location = "/".join((dpath.split(path, 1)[-1], fname)) - test.add_from_file( - os.path.join(dpath, fname), - file_name=location, - required=False) - else: - # load single file as test - test.add_from_file(os.path.join(path, entry_point)) - test.landing_page = entry_point - if test.landing_page is None: # pragma: no cover - # this should not be possible - test.cleanup() - raise AssertionError("Scanning for test case 'entry point' failed") + if load_prefs and os.path.isfile(os.path.join(path, "prefs.js")): + test.add_meta(TestFile.from_file(os.path.join(path, "prefs.js"))) + test.add_from_file(os.path.join(path, entry_point)) + test.landing_page = entry_point # load environment variables if info: try: @@ -333,6 +368,18 @@ def load_path(cls, path, full_scan=False, load_prefs=True): except TestCaseLoadFailure: test.cleanup() raise + # load all adjacent data from directory + if adjacent: + for dpath, _, files in os.walk(path): + for fname in files: + # ignore files that have been previously loaded + if fname in (entry_point, "prefs.js", "test_info.json"): + continue + location = "/".join((dpath.split(path, 1)[-1], fname)) + test.add_from_file( + os.path.join(dpath, fname), + file_name=location, + required=False) return test @property @@ -407,7 +454,10 @@ def __init__(self, file_name): or file_name.startswith("../"): raise TypeError("file_name is invalid %r" % (file_name,)) self._file_name = os.path.normpath(file_name) # name including path relative to wwwroot - self._fp = SpooledTemporaryFile(max_size=self.CACHE_LIMIT, dir=grz_tmp(), prefix="grz_tf_") + self._fp = SpooledTemporaryFile( + dir=grz_tmp("storage"), + max_size=self.CACHE_LIMIT, + prefix="testfile_") def __enter__(self): return self diff --git a/grizzly/common/test_storage.py b/grizzly/common/test_storage.py index e45913d5..983673b0 100644 --- a/grizzly/common/test_storage.py +++ b/grizzly/common/test_storage.py @@ -7,6 +7,7 @@ import json import re import os +import zipfile import pytest @@ -127,18 +128,18 @@ def test_testcase_06(): assert tcase.data_size == 6 def test_testcase_07(tmp_path): - """test TestCase.load_path() using a directory fail cases""" + """test TestCase.load_single() using a directory fail cases""" # missing test_info.json with pytest.raises(TestCaseLoadFailure, match="Missing 'test_info.json'"): - TestCase.load_path(str(tmp_path)) + TestCase.load_single(str(tmp_path), True) # invalid test_info.json (tmp_path / "test_info.json").write_bytes(b"X") with pytest.raises(TestCaseLoadFailure, match="Invalid 'test_info.json'"): - TestCase.load_path(str(tmp_path)) + TestCase.load_single(str(tmp_path), True) # test_info.json missing 'target' entry (tmp_path / "test_info.json").write_bytes(b"{}") - with pytest.raises(TestCaseLoadFailure, match="'test_info.json' missing 'target' entry"): - TestCase.load_path(str(tmp_path)) + with pytest.raises(TestCaseLoadFailure, match="'test_info.json' has invalid 'target' entry"): + TestCase.load_single(str(tmp_path), True) # build a test case src_dir = (tmp_path / "src") src_dir.mkdir() @@ -148,20 +149,20 @@ def test_testcase_07(tmp_path): with TestCase("target.bin", None, "test-adapter") as src: src.add_from_file(str(entry_point)) src.dump(str(src_dir), include_details=True) - # bad test_info.json 'target' entry + # bad 'target' entry in test_info.json entry_point.unlink() - with pytest.raises(TestCaseLoadFailure, match="entry_point 'target.bin' not found in"): - TestCase.load_path(str(src_dir)) - # bad test_info.json 'env' entry + with pytest.raises(TestCaseLoadFailure, match="Entry point 'target.bin' not found in"): + TestCase.load_single(str(src_dir), True) + # bad 'env' entry in test_info.json entry_point.touch() with TestCase("target.bin", None, "test-adapter") as src: src.add_environ_var("TEST_ENV_VAR", 100) src.dump(str(src_dir), include_details=True) with pytest.raises(TestCaseLoadFailure, match="'env_data' contains invalid 'env' entries"): - TestCase.load_path(str(src_dir)) + TestCase.load_single(str(src_dir), True) def test_testcase_08(tmp_path): - """test TestCase.load_path() using a directory""" + """test TestCase.load_single() using a directory""" # build a valid test case src_dir = (tmp_path / "src") src_dir.mkdir() @@ -180,7 +181,7 @@ def test_testcase_08(tmp_path): src.add_from_file(str(entry_point)) src.dump(str(src_dir), include_details=True) # load test case from test_info.json - with TestCase.load_path(str(src_dir)) as dst: + with TestCase.load_single(str(src_dir), True) as dst: assert dst.landing_page == "target.bin" assert "prefs.js" in (x.file_name for x in dst._files.meta) assert "target.bin" in (x.file_name for x in dst._files.required) @@ -191,10 +192,10 @@ def test_testcase_08(tmp_path): assert dst.timestamp > 0 def test_testcase_09(tmp_path): - """test TestCase.load_path() using a file""" + """test TestCase.load_single() using a file""" # invalid entry_point specified - with pytest.raises(TestCaseLoadFailure, match="Cannot find"): - TestCase.load_path(str(tmp_path / "missing_file")) + with pytest.raises(TestCaseLoadFailure, match="Missing or invalid TestCase"): + TestCase.load_single(str(tmp_path / "missing_file"), False) # valid test case src_dir = (tmp_path / "src") src_dir.mkdir() @@ -203,20 +204,96 @@ def test_testcase_09(tmp_path): entry_point.touch() (src_dir / "optional.bin").touch() # load single file test case - with TestCase.load_path(str(entry_point)) as tcase: + with TestCase.load_single(str(entry_point), False) as tcase: assert tcase.landing_page == "target.bin" assert "prefs.js" not in (x.file_name for x in tcase._files.meta) assert "target.bin" in (x.file_name for x in tcase._files.required) assert "optional.bin" not in (x.file_name for x in tcase._files.optional) assert tcase.timestamp == 0 # load full test case - with TestCase.load_path(str(entry_point), full_scan=True, load_prefs=True) as tcase: + with TestCase.load_single(str(entry_point), True, adjacent=True) as tcase: assert tcase.landing_page == "target.bin" assert "prefs.js" in (x.file_name for x in tcase._files.meta) assert "target.bin" in (x.file_name for x in tcase._files.required) assert "optional.bin" in (x.file_name for x in tcase._files.optional) def test_testcase_10(tmp_path): + """test TestCase.load() - missing file and empty directory""" + # missing file + with pytest.raises(TestCaseLoadFailure, match="Invalid TestCase path"): + TestCase.load("missing", False) + # empty path + assert not TestCase.load(str(tmp_path), True) + +def test_testcase_11(tmp_path): + """test TestCase.load() - single file""" + tfile = (tmp_path / "testcase.html") + tfile.touch() + testcases = TestCase.load(str(tfile), False) + try: + assert len(testcases) == 1 + finally: + map(lambda x: x.cleanup, testcases) + +def test_testcase_12(tmp_path): + """test TestCase.load() - single directory""" + with TestCase("target.bin", None, "test-adapter") as src: + src.add_from_data("test", "target.bin") + src.dump(str(tmp_path), include_details=True) + testcases = TestCase.load(str(tmp_path), False) + try: + assert len(testcases) == 1 + finally: + map(lambda x: x.cleanup, testcases) + +def test_testcase_13(tmp_path): + """test TestCase.load() - multiple directories""" + nested = (tmp_path / "nested") + nested.mkdir() + with TestCase("target.bin", None, "test-adapter") as src: + src.add_from_data("test", "target.bin") + src.dump(str(nested / "test-1"), include_details=True) + src.dump(str(nested / "test-2"), include_details=True) + src.dump(str(nested / "test-3"), include_details=True) + testcases = TestCase.load(str(nested), False) + try: + assert len(testcases) == 3 + finally: + map(lambda x: x.cleanup, testcases) + # try loading testcases that are nested too deep + assert not TestCase.load(str(tmp_path), False) + +def test_testcase_14(tmp_path): + """test TestCase.load() - archive""" + archive = tmp_path / "testcase.zip" + # bad archive + archive.write_bytes(b"x") + with pytest.raises(TestCaseLoadFailure, match="Testcase archive is corrupted"): + TestCase.load(str(archive), True) + # build archive containing multiple testcases + with TestCase("target.bin", None, "test-adapter") as src: + src.add_from_data("test", "target.bin") + src.dump(str(tmp_path / "test-0"), include_details=True) + src.dump(str(tmp_path / "test-1"), include_details=True) + src.dump(str(tmp_path / "test-2"), include_details=True) + (tmp_path / "test-1" / "prefs.js").write_bytes(b"fake_prefs") + (tmp_path / "log_dummy.txt").touch() + (tmp_path / "not_a_tc").mkdir() + (tmp_path / "not_a_tc" / "file.txt").touch() + with zipfile.ZipFile(str(archive), mode="w", compression=zipfile.ZIP_DEFLATED) as zfp: + for dir_name, _, dir_files in os.walk(str(tmp_path)): + arc_path = os.path.relpath(dir_name, str(tmp_path)) + for file_name in dir_files: + zfp.write( + os.path.join(dir_name, file_name), + arcname=os.path.join(arc_path, file_name)) + testcases = TestCase.load(str(archive), True) + try: + assert len(tuple(testcases)) == 3 + finally: + map(lambda x: x.cleanup, testcases) + +def test_testcase_15(tmp_path): """test TestCase.load_environ()""" (tmp_path / "ubsan.supp").touch() (tmp_path / "other_file").touch() @@ -233,7 +310,7 @@ def test_testcase_10(tmp_path): assert "b=2" in opts assert len(opts) == 3 -def test_testcase_11(tmp_path): +def test_testcase_16(tmp_path): """test TestCase.add_batch()""" include = (tmp_path / "inc_path") include.mkdir() @@ -269,14 +346,14 @@ def test_testcase_11(tmp_path): with pytest.raises(TestFileExists, match="'file.bin' exists in test"): tcase.add_batch(str(include), [str(inc_1)]) -def test_testcase_12(tmp_path): +def test_testcase_17(tmp_path): """test TestCase.scan_path()""" # empty path (tmp_path / "not-test").mkdir() assert not tuple(TestCase.scan_path(str(tmp_path))) # multiple test case directories paths = [str(tmp_path / ("test-%d" % i)) for i in range(3)] - with TestCase("target.bin", None, "test-adapter") as src: + with TestCase("test.htm", None, "test-adapter") as src: src.add_from_data("test", "test.htm") for path in paths: src.dump(path, include_details=True) @@ -286,6 +363,13 @@ def test_testcase_12(tmp_path): tc_paths = list(TestCase.scan_path(str(paths[0]))) assert len(tc_paths) == 1 +def test_testcase_18(): + """test TestCase.get_file()""" + with TestCase("test.htm", None, "test-adapter") as src: + src.add_from_data("test", "test.htm") + assert src.get_file("missing") is None + assert src.get_file("test.htm").data == b"test" + def test_testfile_01(): """test simple TestFile""" with TestFile("test_file.txt") as tfile: diff --git a/grizzly/replay/args.py b/grizzly/replay/args.py index 0cc88c6d..1983f127 100644 --- a/grizzly/replay/args.py +++ b/grizzly/replay/args.py @@ -2,7 +2,7 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -from os.path import isfile, isdir, join as pathjoin +from os.path import isfile from ..args import CommonArgs @@ -15,8 +15,9 @@ def __init__(self): "input", help="Accepted input includes: " \ "1) A directory containing testcase data. " \ - "2) A file to use as a testcase. " \ - "3) A zip archive containing testcases. " \ + "2) A directory with one or more subdirectories containing testcase data. " \ + "3) A zip archive containing testcase data or subdirectories containing testcase data. " \ + "4) A single file to be used as a testcase. " \ "When using a directory it must contain a 'test_info.json' file.") replay_args = self.parser.add_argument_group("Replay Arguments") @@ -59,10 +60,6 @@ def __init__(self): def sanity_check(self, args): super(ReplayArgs, self).sanity_check(args) - if "input" not in self._sanity_skip and isdir(args.input): - if not isfile(pathjoin(args.input, "test_info.json")): - self.parser.error("Test case folder must contain 'test_info.json'") - if args.any_crash and args.sig is not None: self.parser.error("signature is ignored when running with '--any-crash'") diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index e449a015..d3fbc621 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -4,12 +4,10 @@ # file, You can obtain one at http://mozilla.org/MPL/2.0/. from logging import getLogger -from os.path import dirname, isfile, join as pathjoin -from shutil import copyfile, rmtree +from os.path import dirname, join as pathjoin from tempfile import mkdtemp from time import sleep -from zipfile import BadZipfile, ZipFile -from zlib import error as zlib_error +from shutil import rmtree from FTB.Signatures.CrashInfo import CrashSignature from sapphire import Sapphire, ServerMap @@ -77,55 +75,6 @@ def cleanup(self): if self.status is not None: self.status.cleanup() - @staticmethod - def load_testcases(path, load_prefs): - """Load TestCases from disk. - - Args: - path (str): Path to a file, directory or zip archive containing - testcase data. - load_prefs (bool): Load prefs.js file if available. - - Returns: - tuple: TestCases (list) and path to unpacked testcase data (str). - """ - unpacked = None - try: - if path.lower().endswith(".zip"): - unpacked = mkdtemp(prefix="unpack_", dir=grz_tmp("replay")) - try: - with ZipFile(path) as zip_fp: - zip_fp.extractall(path=unpacked) - except (BadZipfile, zlib_error): - raise TestCaseLoadFailure("Testcase archive is corrupted") - tc_paths = tuple(TestCase.scan_path(unpacked)) - testcases = list() - for tc_path in tc_paths: - try: - testcases.append(TestCase.load_path(tc_path, load_prefs=load_prefs)) - except TestCaseLoadFailure: # pragma: no cover - pass - testcases.sort(key=lambda tc: tc.timestamp) - if load_prefs: - # attempt to unpack prefs.js - for tc_path in tc_paths: - try: - copyfile( - pathjoin(tc_path, "prefs.js"), - pathjoin(unpacked, "prefs.js")) - except IOError: # pragma: no cover - continue - break - else: - testcases = [TestCase.load_path(path, load_prefs=load_prefs)] - if not testcases: - raise TestCaseLoadFailure("Failed to load TestCases") - except TestCaseLoadFailure: - if unpacked is not None: - rmtree(unpacked, ignore_errors=True) - raise - return testcases, unpacked - @property def other_reports(self): """Reports from results that do not match: @@ -353,27 +302,16 @@ def main(cls, args): LOG.debug("loading the TestCases") try: - testcases, unpacked = cls.load_testcases(args.input, args.prefs is None) + testcases = TestCase.load(args.input, args.prefs is None) + if not testcases: + raise TestCaseLoadFailure("Failed to load TestCases") except TestCaseLoadFailure as exc: LOG.error("Error: %s", str(exc)) return 1 - # prioritize specified prefs.js file over included file - if args.prefs is not None: - prefs = args.prefs - for testcase in testcases: - testcase.add_meta(TestFile.from_file(args.prefs, "prefs.js")) - LOG.info("Using specified prefs.js") - elif unpacked and isfile(pathjoin(unpacked, "prefs.js")): - prefs = pathjoin(unpacked, "prefs.js") - LOG.info("Using prefs.js from testcase") - elif isfile(pathjoin(args.input, "prefs.js")): - prefs = pathjoin(args.input, "prefs.js") - LOG.info("Using prefs.js from testcase") - else: - prefs = None replay = None target = None + tmp_prefs = None try: relaunch = min(args.relaunch, args.repeat) LOG.debug("initializing the Target") @@ -387,12 +325,24 @@ def main(cls, args): rr=args.rr, valgrind=args.valgrind, xvfb=args.xvfb) - if prefs is not None: - target.prefs = prefs + # prioritize specified prefs.js file over included file + if args.prefs is not None: + for testcase in testcases: + testcase.add_meta(TestFile.from_file(args.prefs, "prefs.js")) + LOG.info("Using specified prefs.js") + target.prefs = args.prefs + else: + for testcase in testcases: + prefs_tf = testcase.get_file("prefs.js") + if prefs_tf: + tmp_prefs = mkdtemp(prefix="prefs_", dir=grz_tmp("replay")) + prefs_tf.dump(tmp_prefs) + LOG.info("Using prefs.js from testcase") + target.prefs = pathjoin(tmp_prefs, "prefs.js") + break if testcases[0].env_vars.get("GRZ_FORCED_CLOSE") == "0": LOG.debug("setting target.forced_close=False") target.forced_close = False - LOG.debug("starting sapphire server") # launch HTTP server used to serve test cases with Sapphire(auto_close=1, timeout=args.timeout) as server: @@ -440,6 +390,6 @@ def main(cls, args): target.cleanup() for testcase in testcases: testcase.cleanup() - if unpacked is not None: - rmtree(unpacked, ignore_errors=True) + if tmp_prefs is not None: + rmtree(tmp_prefs, ignore_errors=True) LOG.info("Done.") diff --git a/grizzly/replay/test_main.py b/grizzly/replay/test_main.py index 32a79900..5f7c3d13 100644 --- a/grizzly/replay/test_main.py +++ b/grizzly/replay/test_main.py @@ -22,45 +22,33 @@ def test_args_01(capsys, tmp_path): # missing args tests with raises(SystemExit): ReplayArgs().parse_args([]) - - # test case directory missing test_info.json + # specified prefs.js missing exe = tmp_path / "binary" exe.touch() inp = tmp_path / "input" inp.mkdir() (inp / "somefile").touch() - with raises(SystemExit): - ReplayArgs().parse_args([str(exe), str(inp)]) - assert "error: Test case folder must contain 'test_info.json'" in capsys.readouterr()[-1] - (inp / "test_info.json").touch() - # specified prefs.js missing with raises(SystemExit): ReplayArgs().parse_args([str(exe), str(inp / "somefile"), "--prefs", "missing"]) assert "error: -p/--prefs not found 'missing'" in capsys.readouterr()[-1] - # test case directory (inp / "prefs.js").touch() ReplayArgs().parse_args([str(exe), str(inp)]) - # test case file ReplayArgs().parse_args([str(exe), str(inp / "somefile"), "--prefs", str(inp / "prefs.js")]) - # test negative min-crashes value with raises(SystemExit): ReplayArgs().parse_args([str(exe), str(inp), "--min-crashes", "-1"]) assert "error: '--min-crashes' value must be positive" in capsys.readouterr()[-1] - # test negative repeat value with raises(SystemExit): ReplayArgs().parse_args([str(exe), str(inp), "--repeat", "-1"]) assert "error: '--repeat' value must be positive" in capsys.readouterr()[-1] - # test missing signature file with raises(SystemExit): ReplayArgs().parse_args([str(exe), str(inp), "--sig", "missing"]) assert "error: signature file not found" in capsys.readouterr()[-1] - # test any crash and signature with raises(SystemExit): ReplayArgs().parse_args([str(exe), str(inp), "--any-crash", "--sig", "x"]) @@ -133,14 +121,14 @@ def test_main_02(mocker): mocker.patch("grizzly.replay.replay.Sapphire", autospec=True) mocker.patch("grizzly.replay.replay.TestCase", autospec=True) # setup args - args = mocker.Mock() - args.ignore = None - args.input = "test" - args.min_crashes = 1 - args.prefs = None - args.relaunch = 1 - args.repeat = 1 - args.sig = None + args = mocker.Mock( + ignore=None, + input="test", + min_crashes=1, + prefs=None, + relaunch=1, + repeat=1, + sig=None) mocker.patch("grizzly.replay.replay.ReplayManager.run", side_effect=TargetLaunchError) assert ReplayManager.main(args) == 1 @@ -151,7 +139,10 @@ def test_main_02(mocker): mocker.patch("grizzly.replay.replay.load_target", side_effect=KeyboardInterrupt) assert ReplayManager.main(args) == 1 - mocker.patch("grizzly.replay.replay.TestCase.load_path", side_effect=TestCaseLoadFailure) + mocker.patch("grizzly.replay.replay.TestCase.load", side_effect=TestCaseLoadFailure) + assert ReplayManager.main(args) == 1 + + mocker.patch("grizzly.replay.replay.TestCase.load", return_value=list()) assert ReplayManager.main(args) == 1 def test_main_03(mocker): @@ -164,20 +155,18 @@ def test_main_03(mocker): spec=TestCase, env_vars={"GRZ_FORCED_CLOSE": "0"}, landing_page="x.html") - mocker.patch("grizzly.replay.replay.TestCase.load_path", return_value=testcase) + mocker.patch("grizzly.replay.replay.TestCase.load", return_value=[testcase]) # setup args - args = mocker.Mock() - args.fuzzmanager = False - args.ignore = None - args.input = "test" - args.logs = None - args.min_crashes = 1 - args.prefs = None - args.relaunch = 1 - args.repeat = 1 - args.sig = None - args.timeout = 1 - + args = mocker.Mock( + fuzzmanager=False, + ignore=None, + input="test", + min_crashes=1, + prefs=None, + relaunch=1, + repeat=1, + sig=None, + timeout=1) ReplayManager.main(args) assert testcase.cleanup.call_count == 1 assert target.cleanup.call_count == 1 @@ -262,26 +251,3 @@ def _fake_save_logs(result_logs): assert log_path.is_dir() prefs = tuple(log_path.glob('**/prefs.js')) assert prefs[0].read_bytes() == b"specified" - -def test_main_05(mocker, tmp_path): - """test ReplayManager.main() - unpacked test case - prefs and cleanup """ - unpacked = (tmp_path / "unpacked") - unpacked.mkdir() - # prefs.js from unpacked path - (unpacked / "prefs.js").touch() - fake_load_testcases = mocker.patch("grizzly.replay.replay.ReplayManager.load_testcases") - fake_load_testcases.return_value = ([mocker.Mock(env_vars=dict())], str(unpacked)) - mocker.patch("grizzly.replay.replay.ReplayManager.run") - mocker.patch("grizzly.replay.replay.load_target") - mocker.patch("grizzly.replay.replay.Sapphire") - args = mocker.Mock( - fuzzmanager=False, - ignore=None, - min_crashes=1, - relaunch=1, - repeat=1, - prefs=None, - sig=None, - timeout=1) - assert ReplayManager.main(args) == 0 - assert not unpacked.is_dir() diff --git a/grizzly/replay/test_replay.py b/grizzly/replay/test_replay.py index 5abc8f78..47bb4e8f 100644 --- a/grizzly/replay/test_replay.py +++ b/grizzly/replay/test_replay.py @@ -6,14 +6,12 @@ """ unit tests for grizzly.replay """ -from os import walk -from os.path import isfile, join as pathjoin, relpath -from zipfile import ZIP_DEFLATED, ZipFile +from os.path import join as pathjoin from pytest import raises from sapphire import Sapphire, SERVED_ALL, SERVED_REQUEST -from .replay import ReplayManager, TestCaseLoadFailure +from .replay import ReplayManager from ..common import Report, Status, TestCase from ..target import Target, TargetLaunchError @@ -314,73 +312,3 @@ def test_replay_12(mocker): assert replay.status.results == 1 assert len(replay.reports) == 1 assert not replay.other_reports - -def test_replay_13(tmp_path): - """test ReplayManager.load_testcases() - error cases""" - # test missing - with raises(TestCaseLoadFailure, match="Cannot find"): - ReplayManager.load_testcases("missing", False) - # test empty path - with raises(TestCaseLoadFailure, match="Missing 'test_info.json'"): - ReplayManager.load_testcases(str(tmp_path), False) - # test broken archive - archive = (tmp_path / "fake.zip") - archive.write_bytes(b"x") - with raises(TestCaseLoadFailure, match="Testcase archive is corrupted"): - ReplayManager.load_testcases(str(archive), False) - -def test_replay_14(tmp_path): - """test ReplayManager.load_testcases() - single file""" - tfile = (tmp_path / "testcase.html") - tfile.touch() - testcases, unpacked = ReplayManager.load_testcases(str(tfile), False) - try: - assert unpacked is None - assert len(tuple(testcases)) == 1 - finally: - map(lambda x: x.cleanup, testcases) - -def test_replay_15(tmp_path): - """test ReplayManager.load_testcases() - single directory""" - with TestCase("target.bin", None, "test-adapter") as src: - src.add_from_data("test", "target.bin") - src.dump(str(tmp_path), include_details=True) - testcases, unpacked = ReplayManager.load_testcases(str(tmp_path), False) - try: - assert unpacked is None - assert len(tuple(testcases)) == 1 - finally: - map(lambda x: x.cleanup, testcases) - -def test_replay_16(tmp_path): - """test ReplayManager.load_testcases() - archive""" - # build archive containing multiple testcases - with TestCase("target.bin", None, "test-adapter") as src: - src.add_from_data("test", "target.bin") - src.dump(str(tmp_path / "src-0"), include_details=True) - src.dump(str(tmp_path / "src-1"), include_details=True) - src.dump(str(tmp_path / "src-2"), include_details=True) - (tmp_path / "src-1" / "prefs.js").write_bytes(b"fake_prefs") - (tmp_path / "log_dummy.txt").touch() - (tmp_path / "not_a_tc").mkdir() - (tmp_path / "not_a_tc" / "file.txt").touch() - archive = str(tmp_path / "testcase.zip") - with ZipFile(archive, mode="w", compression=ZIP_DEFLATED) as zfp: - for dir_name, _, dir_files in walk(str(tmp_path)): - arc_path = relpath(dir_name, str(tmp_path)) - for file_name in dir_files: - zfp.write( - pathjoin(dir_name, file_name), - arcname=pathjoin(arc_path, file_name)) - testcases, unpacked = ReplayManager.load_testcases(str(archive), True) - try: - assert unpacked is not None - assert isfile(pathjoin(unpacked, "prefs.js")) - assert len(tuple(testcases)) == 3 - finally: - map(lambda x: x.cleanup, testcases) - # empty archive - with ZipFile(archive, mode="w", compression=ZIP_DEFLATED) as zfp: - zfp.write(str(tmp_path / "not_a_tc"), arcname="not_a_tc") - with raises(TestCaseLoadFailure, match="Failed to load TestCases"): - ReplayManager.load_testcases(str(archive), True) From 4318e742d157eafa946a0142b8d28d8c08b60044 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 25 Aug 2020 11:47:34 -0700 Subject: [PATCH 006/531] Remove Sapphire.serve_testcase() --- grizzly/common/runner.py | 33 ++++++++++++++++------ grizzly/common/test_runner.py | 42 +++++++++++++++++----------- grizzly/reduce/test_interesting.py | 3 +- grizzly/replay/test_main.py | 27 +++++++++--------- grizzly/replay/test_replay.py | 44 +++++++++++++++--------------- grizzly/test_session.py | 20 ++++++-------- sapphire/core.py | 30 -------------------- sapphire/test_sapphire.py | 33 ++++++++-------------- 8 files changed, 108 insertions(+), 124 deletions(-) diff --git a/grizzly/common/runner.py b/grizzly/common/runner.py index b1c20381..46630f6c 100644 --- a/grizzly/common/runner.py +++ b/grizzly/common/runner.py @@ -3,6 +3,8 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from logging import getLogger +from shutil import rmtree +from tempfile import mkdtemp from time import sleep, time from sapphire import SERVED_TIMEOUT @@ -149,13 +151,14 @@ def location(srv_path, srv_port, close_after=None, forced_close=True, timeout=No return "?".join([location, "&".join(args)]) return location - def run(self, ignore, server_map, testcase, coverage=False, wait_for_callback=False): + def run(self, ignore, server_map, testcase, test_path=None, coverage=False, wait_for_callback=False): """Serve a testcase and monitor the target for results. Args: ignore (list): List of failure types to ignore. server_map (sapphire.ServerMap): A ServerMap. testcase (grizzly.TestCase): The test case that will be served. + test_path (str): Location of test case data on the filesystem. coverage (bool): Trigger coverage dump. wait_for_callback: (bool): Use `_keep_waiting()` to indicate when framework should move on. @@ -169,13 +172,27 @@ def run(self, ignore, server_map, testcase, coverage=False, wait_for_callback=Fa self.timeout = False if self._idle is not None: self._idle.schedule_poll(initial=True) - # serve the test case - server_status, self.served = self._server.serve_testcase( - testcase, - continue_cb=self._keep_waiting, - forever=wait_for_callback, - server_map=server_map, - working_path=grz_tmp("serve")) + try: + # unpack test case + if test_path is None: + wwwdir = mkdtemp(prefix="test_", dir=grz_tmp("serve")) + testcase.dump(wwwdir) + else: + wwwdir = test_path + # serve the test case + serve_start = time() + server_status, self.served = self._server.serve_path( + wwwdir, + continue_cb=self._keep_waiting, + forever=wait_for_callback, + optional_files=tuple(testcase.optional), + server_map=server_map) + testcase.duration = time() - serve_start + finally: + # remove temporary files + if test_path is None: + rmtree(wwwdir) + # TODO: fix calling TestCase.add_batch() for multi-test replay # add all include files that were served for url, resource in server_map.include.items(): testcase.add_batch(resource.target, self.served, prefix=url) diff --git a/grizzly/common/test_runner.py b/grizzly/common/test_runner.py index 0a369a52..1d537054 100644 --- a/grizzly/common/test_runner.py +++ b/grizzly/common/test_runner.py @@ -13,7 +13,7 @@ from .storage import TestCase from ..target import Target, TargetLaunchError, TargetLaunchTimeout -def test_runner_01(mocker): +def test_runner_01(mocker, tmp_path): """test Runner()""" server = mocker.Mock(spec=Sapphire) target = mocker.Mock(spec=Target) @@ -24,33 +24,43 @@ def test_runner_01(mocker): assert runner.served is None assert not runner.timeout serv_files = ["a.bin", "/another/file.bin"] - testcase = mocker.Mock(spec=TestCase) - testcase.landing_page = serv_files[0] + testcase = mocker.Mock(spec=TestCase, landing_page=serv_files[0], optional=[]) # all files served - server.serve_testcase.return_value = (SERVED_ALL, serv_files) + server.serve_path.return_value = (SERVED_ALL, serv_files) runner.run([], ServerMap(), testcase) assert runner.result == runner.COMPLETE assert runner.served == serv_files assert not runner.timeout assert target.close.call_count == 0 assert target.dump_coverage.call_count == 0 + assert testcase.dump.call_count == 1 # some files served - server.serve_testcase.return_value = (SERVED_REQUEST, serv_files) + server.serve_path.return_value = (SERVED_REQUEST, serv_files) runner.run([], ServerMap(), testcase, coverage=True) assert runner.result == runner.COMPLETE assert runner.served == serv_files assert not runner.timeout assert target.close.call_count == 0 assert target.dump_coverage.call_count == 1 + # existing test path + testcase.reset_mock() + tc_path = (tmp_path / "tc") + tc_path.mkdir() + server.serve_path.return_value = (SERVED_ALL, serv_files) + runner.run([], ServerMap(), testcase, test_path=str(tc_path)) + assert runner.result == runner.COMPLETE + assert target.close.call_count == 0 + assert testcase.dump.call_count == 0 + tc_path.is_dir() def test_runner_02(mocker): """test Runner() errors""" server = mocker.Mock(spec=Sapphire) target = mocker.Mock(spec=Target) - testcase = mocker.Mock(spec=TestCase, landing_page="x") + testcase = mocker.Mock(spec=TestCase, landing_page="x", optional=[]) runner = Runner(server, target) # no files served - server.serve_testcase.return_value = (SERVED_NONE, []) + server.serve_path.return_value = (SERVED_NONE, []) target.detect_failure.return_value = target.RESULT_NONE runner.run([], ServerMap(), testcase) assert runner.result == runner.ERROR @@ -59,7 +69,7 @@ def test_runner_02(mocker): assert target.close.call_count == 1 target.reset_mock() # landing page not served - server.serve_testcase.return_value = (SERVED_REQUEST, ["harness"]) + server.serve_path.return_value = (SERVED_REQUEST, ["harness"]) runner.run([], ServerMap(), testcase) assert runner.result == runner.ERROR assert runner.served @@ -70,10 +80,10 @@ def test_runner_03(mocker): server = mocker.Mock(spec=Sapphire) target = mocker.Mock(spec=Target) serv_files = ["a.bin", "/another/file.bin"] - server.serve_testcase.return_value = (SERVED_TIMEOUT, serv_files) + server.serve_path.return_value = (SERVED_TIMEOUT, serv_files) runner = Runner(server, target) target.detect_failure.return_value = target.RESULT_FAILURE - runner.run([], ServerMap(), mocker.Mock(spec=TestCase, landing_page="x")) + runner.run([], ServerMap(), mocker.Mock(spec=TestCase, landing_page="x", optional=[])) assert runner.result == runner.FAILED assert runner.served == serv_files assert runner.timeout @@ -83,8 +93,8 @@ def test_runner_04(mocker): server = mocker.Mock(spec=Sapphire) target = mocker.Mock(spec=Target) serv_files = ["file.bin"] - server.serve_testcase.return_value = (SERVED_REQUEST, serv_files) - testcase = mocker.Mock(spec=TestCase, landing_page=serv_files[0]) + server.serve_path.return_value = (SERVED_REQUEST, serv_files) + testcase = mocker.Mock(spec=TestCase, landing_page=serv_files[0], optional=[]) runner = Runner(server, target) # test FAILURE target.detect_failure.return_value = target.RESULT_FAILURE @@ -99,7 +109,7 @@ def test_runner_04(mocker): assert runner.served == serv_files assert not runner.timeout # failure before serving landing page - server.serve_testcase.return_value = (SERVED_REQUEST, ["harness"]) + server.serve_path.return_value = (SERVED_REQUEST, ["harness"]) target.detect_failure.return_value = target.RESULT_FAILURE runner.run([], ServerMap(), testcase) assert runner.result == runner.FAILED @@ -112,10 +122,10 @@ def test_runner_05(mocker): target = mocker.Mock(spec=Target) target.detect_failure.return_value = target.RESULT_NONE serv_files = ["/fake/file", "/another/file.bin"] - server.serve_testcase.return_value = (SERVED_REQUEST, serv_files) + server.serve_path.return_value = (SERVED_REQUEST, serv_files) runner = Runner(server, target, idle_threshold=0.01, idle_delay=0.01) assert runner._idle is not None - runner.run([], ServerMap(), mocker.Mock(spec=TestCase, landing_page=serv_files[0])) + runner.run([], ServerMap(), mocker.Mock(spec=TestCase, landing_page=serv_files[0], optional=[])) assert runner.result == runner.COMPLETE assert target.close.call_count == 0 @@ -203,7 +213,7 @@ def test_runner_09(mocker, tmp_path): smap.set_include("/", str(inc_path1)) smap.set_include("/test", str(inc_path2)) serv_files = ["a.b", str(inc1), str(inc2), str(inc3)] - server.serve_testcase.return_value = (SERVED_ALL, serv_files) + server.serve_path.return_value = (SERVED_ALL, serv_files) with TestCase("a.b", "x", "x") as tcase: runner.run([], smap, tcase) assert runner.result == runner.COMPLETE diff --git a/grizzly/reduce/test_interesting.py b/grizzly/reduce/test_interesting.py index c9a6fc5e..859469aa 100644 --- a/grizzly/reduce/test_interesting.py +++ b/grizzly/reduce/test_interesting.py @@ -32,8 +32,7 @@ def add_dynamic_response(self, *args, **kwds): def set_redirect(self, *args, **kwds): pass - def serve_testcase(self, testcase, **kwds): # pylint: disable=no-self-use,unused-argument - testcase.duration = 0.1 + def serve_path(self, *args, **kwds): # pylint: disable=no-self-use,unused-argument return sapphire.SERVED_ALL, ["test.html"] @property diff --git a/grizzly/replay/test_main.py b/grizzly/replay/test_main.py index 5f7c3d13..cb8c99b2 100644 --- a/grizzly/replay/test_main.py +++ b/grizzly/replay/test_main.py @@ -59,9 +59,9 @@ def test_main_01(mocker, tmp_path): # This is a typical scenario - a test that reproduces results ~50% of the time. # Of the four attempts only the first and third will 'reproduce' the result # and the forth attempt should be skipped. - # mock Sapphire.serve_testcase only - serve_testcase = mocker.patch("grizzly.replay.replay.Sapphire.serve_testcase", autospec=True) - serve_testcase.return_value = (None, ["test.html"]) # passed to mocked Target.detect_failure + # mock Sapphire.serve_path only + serve_path = mocker.patch("grizzly.replay.replay.Sapphire.serve_path", autospec=True) + serve_path.return_value = (None, ["test.html"]) # passed to mocked Target.detect_failure # setup Target load_target = mocker.patch("grizzly.replay.replay.load_target") target = mocker.Mock(spec=Target, binary="bin", forced_close=True) @@ -104,7 +104,7 @@ def _fake_save_logs(result_logs): assert target.launch.call_count == 3 assert target.step.call_count == 3 assert target.detect_failure.call_count == 3 - assert serve_testcase.call_count == 3 + assert serve_path.call_count == 3 assert load_target.call_count == 1 assert target.close.call_count == 1 assert target.cleanup.call_count == 1 @@ -147,14 +147,15 @@ def test_main_02(mocker): def test_main_03(mocker): """test ReplayManager.main() loading GRZ_FORCED_CLOSE from test case""" - mocker.patch("grizzly.replay.replay.Sapphire.serve_testcase", return_value=(None, ["x.html"])) + mocker.patch("grizzly.replay.replay.Sapphire.serve_path", return_value=(None, ["x.html"])) target = mocker.Mock(spec=Target, forced_close=True) load_target = mocker.patch("grizzly.replay.replay.load_target", autospec=True) load_target.return_value.return_value = target testcase = mocker.Mock( spec=TestCase, env_vars={"GRZ_FORCED_CLOSE": "0"}, - landing_page="x.html") + landing_page="x.html", + optional=[]) mocker.patch("grizzly.replay.replay.TestCase.load", return_value=[testcase]) # setup args args = mocker.Mock( @@ -174,8 +175,8 @@ def test_main_03(mocker): def test_main_04(mocker, tmp_path): """test ReplayManager.main() loading/generating prefs.js""" - serve_testcase = mocker.patch("grizzly.replay.replay.Sapphire.serve_testcase", autospec=True) - serve_testcase.return_value = (None, ["test.html"]) # passed to mocked Target.detect_failure + serve_path = mocker.patch("grizzly.replay.replay.Sapphire.serve_path", autospec=True) + serve_path.return_value = (None, ["test.html"]) # passed to mocked Target.detect_failure # setup Target target = mocker.Mock(spec=Target, binary="bin", forced_close=True) target.RESULT_FAILURE = Target.RESULT_FAILURE @@ -219,12 +220,12 @@ def _fake_save_logs(result_logs): assert ReplayManager.main(args) == 0 assert target.launch.call_count == 1 assert target.detect_failure.call_count == 1 - assert serve_testcase.call_count == 1 + assert serve_path.call_count == 1 assert log_path.is_dir() assert not any(log_path.glob('**/prefs.js')) target.reset_mock() - serve_testcase.reset_mock() + serve_path.reset_mock() rmtree(str(log_path), ignore_errors=True) # test included prefs.js @@ -232,13 +233,13 @@ def _fake_save_logs(result_logs): assert ReplayManager.main(args) == 0 assert target.launch.call_count == 1 assert target.detect_failure.call_count == 1 - assert serve_testcase.call_count == 1 + assert serve_path.call_count == 1 assert log_path.is_dir() prefs = tuple(log_path.glob('**/prefs.js')) assert prefs[0].read_bytes() == b"included" target.reset_mock() - serve_testcase.reset_mock() + serve_path.reset_mock() rmtree(str(log_path), ignore_errors=True) # test specified prefs.js @@ -247,7 +248,7 @@ def _fake_save_logs(result_logs): assert ReplayManager.main(args) == 0 assert target.launch.call_count == 1 assert target.detect_failure.call_count == 1 - assert serve_testcase.call_count == 1 + assert serve_path.call_count == 1 assert log_path.is_dir() prefs = tuple(log_path.glob('**/prefs.js')) assert prefs[0].read_bytes() == b"specified" diff --git a/grizzly/replay/test_replay.py b/grizzly/replay/test_replay.py index 47bb4e8f..efa85854 100644 --- a/grizzly/replay/test_replay.py +++ b/grizzly/replay/test_replay.py @@ -44,14 +44,14 @@ def test_replay_01(mocker): def test_replay_02(mocker): """test ReplayManager.run() - no repro""" server = mocker.Mock(spec=Sapphire, port=0x1337) - server.serve_testcase.return_value = (SERVED_ALL, ["index.html"]) + server.serve_path.return_value = (SERVED_ALL, ["index.html"]) target = mocker.Mock(spec=Target) target.RESULT_NONE = Target.RESULT_NONE target.closed = True target.detect_failure.return_value = Target.RESULT_NONE target.forced_close = True target.rl_reset = 1 - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] with ReplayManager([], server, target, testcases, use_harness=True) as replay: assert not replay.run() assert replay.status.ignored == 0 @@ -62,13 +62,13 @@ def test_replay_02(mocker): def test_replay_03(mocker): """test ReplayManager.run() - successful repro""" server = mocker.Mock(spec=Sapphire, port=0x1337) - server.serve_testcase.return_value = (SERVED_ALL, ["index.html"]) + server.serve_path.return_value = (SERVED_ALL, ["index.html"]) target = mocker.Mock(spec=Target) target.RESULT_FAILURE = Target.RESULT_FAILURE target.binary = "C:\\fake_bin" target.detect_failure.return_value = Target.RESULT_FAILURE target.save_logs = _fake_save_logs_result - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] with ReplayManager([], server, target, testcases, use_harness=False) as replay: assert replay.run() assert replay.status.ignored == 0 @@ -80,11 +80,11 @@ def test_replay_03(mocker): def test_replay_04(mocker): """test ReplayManager.run() - Error (landing page not requested/served)""" server = mocker.Mock(spec=Sapphire, port=0x1337) - server.serve_testcase.return_value = (SERVED_REQUEST, ["x"]) + server.serve_path.return_value = (SERVED_REQUEST, ["x"]) target = mocker.Mock(spec=Target) target.RESULT_NONE = Target.RESULT_NONE target.detect_failure.return_value = Target.RESULT_NONE - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] with ReplayManager([], server, target, testcases, use_harness=False) as replay: assert not replay.run(repeat=2) assert replay.status.ignored == 0 @@ -97,11 +97,11 @@ def test_replay_04(mocker): def test_replay_05(mocker): """test ReplayManager.run() - ignored""" server = mocker.Mock(spec=Sapphire, port=0x1337) - server.serve_testcase.return_value = (SERVED_ALL, ["index.html"]) + server.serve_path.return_value = (SERVED_ALL, ["index.html"]) target = mocker.Mock(spec=Target) target.RESULT_IGNORED = Target.RESULT_IGNORED target.detect_failure.return_value = Target.RESULT_IGNORED - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] with ReplayManager([], server, target, testcases, use_harness=False) as replay: assert not replay.run() assert replay.status.ignored == 1 @@ -113,14 +113,14 @@ def test_replay_05(mocker): def test_replay_06(mocker): """test ReplayManager.run() - early exit""" server = mocker.Mock(spec=Sapphire, port=0x1337) - server.serve_testcase.return_value = (SERVED_ALL, ["index.html"]) + server.serve_path.return_value = (SERVED_ALL, ["index.html"]) target = mocker.Mock(spec=Target) target.RESULT_FAILURE = Target.RESULT_FAILURE target.RESULT_IGNORED = Target.RESULT_IGNORED target.RESULT_NONE = Target.RESULT_NONE target.binary = "path/fake_bin" target.save_logs = _fake_save_logs_result - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] # early failure target.detect_failure.side_effect = [Target.RESULT_FAILURE, Target.RESULT_IGNORED, Target.RESULT_NONE] with ReplayManager([], server, target, testcases, use_harness=False) as replay: @@ -157,14 +157,14 @@ def test_replay_07(mocker, tmp_path): report.from_path.side_effect = (report_0, report_1, report_2) server = mocker.Mock(spec=Sapphire) server.port = 0x1337 - server.serve_testcase.return_value = (SERVED_ALL, ["index.html"]) + server.serve_path.return_value = (SERVED_ALL, ["index.html"]) signature = mocker.Mock() signature.matches.side_effect = (True, False) target = mocker.Mock(spec=Target) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE target.binary = "fake_bin" - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] with ReplayManager([], server, target, testcases, signature=signature, use_harness=False) as replay: assert not replay.run(repeat=3, min_results=2) assert replay._signature == signature @@ -197,12 +197,12 @@ def test_replay_08(mocker, tmp_path): report_2.minor = "abcd9876" report.from_path.side_effect = (report_0, report_1, report_2) server = mocker.Mock(spec=Sapphire, port=0x1337) - server.serve_testcase.return_value = (SERVED_ALL, ["index.html"]) + server.serve_path.return_value = (SERVED_ALL, ["index.html"]) target = mocker.Mock(spec=Target) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE target.binary = "fake_bin" - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] with ReplayManager([], server, target, testcases, any_crash=True, use_harness=False) as replay: assert replay.run(repeat=3, min_results=2) assert replay._signature is None @@ -271,7 +271,7 @@ def test_replay_10(mocker, tmp_path): def test_replay_11(mocker): """test ReplayManager.run() - multiple TestCases - no repro""" server = mocker.Mock(spec=Sapphire, port=0x1337) - server.serve_testcase.return_value = (SERVED_ALL, ["index.html"]) + server.serve_path.return_value = (SERVED_ALL, ["index.html"]) target = mocker.Mock(spec=Target) target.RESULT_NONE = Target.RESULT_NONE target.closed = True @@ -279,9 +279,9 @@ def test_replay_11(mocker): target.forced_close = True target.rl_reset = 1 testcases = [ - mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html"), - mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html"), - mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] with ReplayManager([], server, target, testcases, use_harness=True) as replay: assert not replay.run() assert replay.status.ignored == 0 @@ -292,7 +292,7 @@ def test_replay_11(mocker): def test_replay_12(mocker): """test ReplayManager.run() - multiple TestCases - successful repro""" server = mocker.Mock(spec=Sapphire, port=0x1337) - server.serve_testcase.return_value = (SERVED_ALL, ["index.html"]) + server.serve_path.return_value = (SERVED_ALL, ["index.html"]) target = mocker.Mock(spec=Target, binary="fake_bin", rl_reset=1) target.RESULT_FAILURE = Target.RESULT_FAILURE target.RESULT_NONE = Target.RESULT_NONE @@ -302,9 +302,9 @@ def test_replay_12(mocker): Target.RESULT_FAILURE) target.save_logs = _fake_save_logs_result testcases = [ - mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html"), - mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html"), - mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] with ReplayManager([], server, target, testcases, use_harness=True) as replay: assert replay.run() assert replay.status.ignored == 0 diff --git a/grizzly/test_session.py b/grizzly/test_session.py index 8318fe71..0ee4b227 100644 --- a/grizzly/test_session.py +++ b/grizzly/test_session.py @@ -36,9 +36,7 @@ def generate(self, testcase, server_map): # set target.log_size to test warning code path fake_target.log_size.return_value = Session.TARGET_LOG_SIZE_WARN + 1 with IOManager() as iomgr: - def fake_serve_tc(tcase, **_): - return (SERVED_ALL, [tcase.landing_page]) - fake_serv.serve_testcase = fake_serve_tc + fake_serv.serve_path = lambda *a, **kv: (SERVED_ALL, [iomgr.page_name(offset=-1)]) with Session(adapter, iomgr, None, fake_serv, fake_target) as session: session.run([]) assert session.status.iteration == 5 @@ -56,7 +54,6 @@ def generate(self, testcase, server_map): adapter = FuzzAdapter() adapter.setup(None, None) fake_serv = mocker.Mock(spec=Sapphire, port=0x1337) - fake_serv.serve_testcase.side_effect = lambda tc, **_: (SERVED_ALL, [tc.landing_page]) prefs = tmp_path / "prefs.js" prefs.touch() fake_target = mocker.Mock(spec=Target, prefs=str(prefs), rl_reset=10) @@ -64,6 +61,7 @@ def generate(self, testcase, server_map): fake_target.monitor.launches = 1 with IOManager() as iomgr: iomgr.harness = adapter.get_harness() + fake_serv.serve_path = lambda *a, **kv: (SERVED_ALL, [iomgr.page_name(offset=-1)]) with Session(adapter, iomgr, None, fake_serv, fake_target) as session: session.run([], iteration_limit=10) assert session.status.iteration == 10 @@ -74,13 +72,13 @@ def test_session_03(tmp_path, mocker): Status.PATH = str(tmp_path) adapter = mocker.Mock(spec=Adapter, remaining=None) fake_serv = mocker.Mock(spec=Sapphire, port=0x1337) - fake_serv.serve_testcase.side_effect = lambda tc, **_: (SERVED_ALL, [tc.landing_page]) fake_target = mocker.Mock(spec=Target, prefs=None, rl_reset=2) fake_target.log_size.return_value = 1000 fake_target.monitor.launches = 1 # target.launch() call will be skipped fake_target.closed = False with IOManager() as iomgr: + fake_serv.serve_path = lambda *a, **kv: (SERVED_ALL, [iomgr.page_name(offset=-1)]) with Session(adapter, iomgr, None, fake_serv, fake_target, coverage=True) as session: session.run([], iteration_limit=2) assert session.status.iteration == 2 @@ -101,13 +99,13 @@ def generate(self, testcase, server_map): fake_target = mocker.Mock(spec=Target, prefs=None) fake_target.monitor.launches = 1 with IOManager() as iomgr: - fake_serv.serve_testcase.return_value = (SERVED_NONE, []) + fake_serv.serve_path.return_value = (SERVED_NONE, []) # test error on first iteration with Session(adapter, iomgr, None, fake_serv, fake_target) as session: with raises(SessionError, match="Please check Adapter and Target"): session.run([], iteration_limit=10) # test that we continue if error happens later on - fake_serv.serve_testcase.return_value = (SERVED_REQUEST, ["x"]) + fake_serv.serve_path.return_value = (SERVED_REQUEST, ["x"]) with Session(adapter, iomgr, None, fake_serv, fake_target) as session: session.status.iteration = 2 session.run([], iteration_limit=3) @@ -118,7 +116,7 @@ def test_session_05(tmp_path, mocker): fake_adapter = mocker.Mock(spec=Adapter) fake_adapter.TEST_DURATION = 10 fake_adapter.remaining = None - fake_testcase = mocker.Mock(spec=TestCase, landing_page="page.htm") + fake_testcase = mocker.Mock(spec=TestCase, landing_page="page.htm", optional=[]) fake_iomgr = mocker.Mock(spec=IOManager) fake_iomgr.server_map = ServerMap() fake_iomgr.create_testcase.return_value = fake_testcase @@ -126,7 +124,7 @@ def test_session_05(tmp_path, mocker): fake_iomgr.tests = mocker.Mock(spec=deque) fake_serv = mocker.Mock(spec=Sapphire, port=0x1337) # return SERVED_TIMEOUT to test IGNORE_UNSERVED code path - fake_serv.serve_testcase.return_value = (SERVED_TIMEOUT, [fake_testcase.landing_page]) + fake_serv.serve_path.return_value = (SERVED_TIMEOUT, [fake_testcase.landing_page]) fake_target = mocker.Mock(spec=Target, prefs=None) fake_target.monitor.launches = 1 with Session(fake_adapter, fake_iomgr, None, fake_serv, fake_target) as session: @@ -139,7 +137,7 @@ def test_session_05(tmp_path, mocker): assert fake_iomgr.create_testcase.call_count == 1 assert fake_iomgr.tests.pop.call_count == 0 assert fake_testcase.purge_optional.call_count == 1 - assert fake_serv.serve_testcase.call_count == 1 + assert fake_serv.serve_path.call_count == 1 assert fake_target.launch.call_count == 1 assert fake_target.detect_failure.call_count == 1 assert fake_target.step.call_count == 1 @@ -183,7 +181,6 @@ def test_session_07(tmp_path, mocker): fake_target = mocker.Mock(spec=Target, prefs="prefs.js") fake_target.monitor.launches = 1 with Session(fake_adapter, fake_iomgr, fake_reporter, fake_serv, fake_target) as session: - session.server.serve_testcase.return_value = SERVED_REQUEST fake_runner.return_value.result = fake_runner.return_value.FAILED fake_runner.return_value.served = ["/fake/file"] fake_runner.return_value.timeout = False @@ -218,7 +215,6 @@ def test_session_08(tmp_path, mocker): # ignored results should not be reported so raise AssertionError if report_result is called mocker.patch.object(Session, 'report_result', side_effect=AssertionError) with Session(fake_adapter, fake_iomgr, None, fake_serv, fake_target) as session: - session.server.serve_testcase.return_value = SERVED_REQUEST fake_runner.return_value.result = fake_runner.return_value.IGNORED fake_runner.return_value.served = [] fake_runner.return_value.timeout = False diff --git a/sapphire/core.py b/sapphire/core.py index df28e7a7..0efd5372 100644 --- a/sapphire/core.py +++ b/sapphire/core.py @@ -9,9 +9,7 @@ import logging import os import random -import shutil import socket -import tempfile import time from .sapphire_job import SapphireJob @@ -116,34 +114,6 @@ def serve_path(self, path, continue_cb=None, forever=False, optional_files=None, LOG.debug("status: %r, timeout: %r", job.status, was_timeout) return (SERVED_TIMEOUT if was_timeout else job.status, tuple(job.served)) - def serve_testcase(self, testcase, continue_cb=None, forever=False, working_path=None, server_map=None): - """ - serve_testcase() -> tuple - testcase is the Grizzly TestCase to serve. The callback continue_cb should - be a function that returns True or False. If continue_cb is specified and returns False - the server serve loop will exit. working_path is where the testcase will be unpacked - temporary. - - returns a tuple (server status, files served) - see serve_path() for more info - """ - LOG.debug("serve_testcase() called") - wwwdir = tempfile.mkdtemp(prefix="sphr_test_", dir=working_path) - try: - testcase.dump(wwwdir) - serve_start = time.time() - result = self.serve_path( - wwwdir, - continue_cb=continue_cb, - forever=forever, - optional_files=tuple(testcase.optional), - server_map=server_map) - testcase.duration = time.time() - serve_start - finally: - # remove test case working directory - shutil.rmtree(wwwdir, ignore_errors=True) - return result - @property def timeout(self): return self._timeout diff --git a/sapphire/test_sapphire.py b/sapphire/test_sapphire.py index 24719e97..0d82cd24 100644 --- a/sapphire/test_sapphire.py +++ b/sapphire/test_sapphire.py @@ -550,20 +550,7 @@ def test_sapphire_27(client, tmp_path): assert status == SERVED_TIMEOUT assert len(files_served) < len(files_to_serve) -def test_sapphire_28(client, tmp_path): - """test Sapphire.serve_testcase()""" - with TestCase("test.html", "none.test", "foo") as test: - test.add_from_data(b"test", "test.html") - t_file = _create_test(test.landing_page, tmp_path) - with Sapphire(timeout=10) as serv: - client.launch("127.0.0.1", serv.port, [t_file]) - assert test.duration is None - status, files_served = serv.serve_testcase(test) - assert status == SERVED_ALL - assert files_served - assert test.duration >= 0 - -def test_sapphire_29(client_factory, tmp_path): +def test_sapphire_28(client_factory, tmp_path): """test Sapphire.serve_path() with forever=True""" clients = list() with Sapphire(timeout=10) as serv: @@ -584,7 +571,7 @@ def _test_callback(): assert test.code == 200 assert test.len_srv == test.len_org -def test_sapphire_30(client, tmp_path): +def test_sapphire_29(client, tmp_path): """test interesting file names""" to_serve = [ # space in file name @@ -595,10 +582,9 @@ def test_sapphire_30(client, tmp_path): client.launch("127.0.0.1", serv.port, to_serve) assert serv.serve_path(str(tmp_path))[0] == SERVED_ALL assert client.wait(timeout=10) - for t_file in to_serve: - assert t_file.code == 200 + assert all(t_file.code == 200 for t_file in to_serve) -def test_sapphire_31(client, tmp_path): +def test_sapphire_30(client, tmp_path): """test interesting path string""" all_bytes = "".join(chr(i) for i in range(256)) to_serve = [ @@ -610,10 +596,9 @@ def test_sapphire_31(client, tmp_path): client.launch("127.0.0.1", serv.port, to_serve, in_order=True) assert serv.serve_path(str(tmp_path), optional_files=[all_bytes])[0] == SERVED_ALL assert client.wait(timeout=10) - for t_file in to_serve: - assert t_file.code is not None + assert all(t_file.code is not None for t_file in to_serve) -def test_sapphire_32(mocker): +def test_sapphire_31(mocker): """test Sapphire._create_listening_socket()""" fake_sleep = mocker.patch("sapphire.core.time.sleep", autospec=True) fake_sock = mocker.patch("sapphire.core.socket.socket", autospec=True) @@ -648,4 +633,10 @@ def test_main_01(mocker, tmp_path): port=4536, remote=False, timeout=None) + fake_srv = mocker.patch("sapphire.core.Sapphire.serve_path", autospec=True) + fake_srv.return_value = (SERVED_ALL, None) + Sapphire.main(args) + fake_srv.return_value = (SERVED_NONE, None) + Sapphire.main(args) + fake_srv.side_effect = KeyboardInterrupt Sapphire.main(args) From 3fd861ab7502ab4223b49896d5a5a4037ef8272e Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 25 Aug 2020 12:54:15 -0700 Subject: [PATCH 007/531] [replay] Unpack testcase data once --- grizzly/replay/replay.py | 24 ++++++++++++++++++++++-- grizzly/replay/test_replay.py | 15 +++++++++++++++ 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index d3fbc621..4cb24261 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -31,7 +31,7 @@ class ReplayManager(object): __slots__ = ("ignore", "server", "status", "target", "testcases", "_any_crash", "_harness", "_reports_expected", "_reports_other", "_runner", - "_signature") + "_signature", "_unpacked") def __init__(self, ignore, server, target, testcases, any_crash=False, signature=None, use_harness=True): self.ignore = ignore @@ -46,7 +46,7 @@ def __init__(self, ignore, server, target, testcases, any_crash=False, signature self._runner = Runner(self.server, self.target) # TODO: make signature a property self._signature = signature - + self._unpacked = list() if use_harness: with open(self.HARNESS_FILE, "rb") as in_fp: self._harness = in_fp.read() @@ -57,6 +57,21 @@ def __enter__(self): def __exit__(self, *exc): self.cleanup() + def _unpack_tests(self): + """Unpack testcase data to known locations. + + Args: + None + + Returns: + None + """ + assert not self._unpacked + for test in self.testcases: + dst_path = mkdtemp(prefix="tc_", dir=grz_tmp("serve")) + test.dump(dst_path) + self._unpacked.append(dst_path) + def cleanup(self): """Remove temporary files from disk. @@ -72,6 +87,8 @@ def cleanup(self): for report in self._reports_other.values(): report.cleanup() self._reports_other.clear() + for tc_path in self._unpacked: + rmtree(tc_path) if self.status is not None: self.status.cleanup() @@ -144,6 +161,8 @@ def run(self, repeat=1, min_results=1): assert min_results <= repeat self.status = Status.start() + self._unpack_tests() + server_map = ServerMap() if self._harness is not None: def _dyn_close(): # pragma: no cover @@ -197,6 +216,7 @@ def _dyn_close(): # pragma: no cover self.ignore, server_map, self.testcases[test_idx], + test_path=self._unpacked[test_idx], wait_for_callback=self._harness is None) if self._runner.result != self._runner.COMPLETE: break diff --git a/grizzly/replay/test_replay.py b/grizzly/replay/test_replay.py index efa85854..a99015f4 100644 --- a/grizzly/replay/test_replay.py +++ b/grizzly/replay/test_replay.py @@ -33,6 +33,7 @@ def test_replay_01(mocker): replay = ReplayManager([], mocker.Mock(spec=Sapphire), mocker.Mock(spec=Target), [mocker.Mock()]) replay._reports_expected = {"A": mocker.Mock(spec=Report)} replay._reports_other = {"B": mocker.Mock(spec=Report)} + assert not replay._unpacked replay.status = mocker.Mock(spec=Status) ereport = tuple(replay.reports)[0] oreport = tuple(replay.other_reports)[0] @@ -53,7 +54,9 @@ def test_replay_02(mocker): target.rl_reset = 1 testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] with ReplayManager([], server, target, testcases, use_harness=True) as replay: + assert not replay._unpacked assert not replay.run() + assert replay._unpacked assert replay.status.ignored == 0 assert replay.status.iteration == 1 assert replay.status.results == 0 @@ -312,3 +315,15 @@ def test_replay_12(mocker): assert replay.status.results == 1 assert len(replay.reports) == 1 assert not replay.other_reports + +def test_replay_13(mocker, tmp_path): + """test ReplayManager._unpacked()""" + server = mocker.Mock(spec=Sapphire, port=0x1337) + testcase = mocker.Mock(spec=TestCase, env_vars=[], optional=[]) + mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) + with ReplayManager([], server, mocker.Mock(spec=Target), [testcase]) as replay: + assert not replay._unpacked + replay._unpack_tests() + assert replay._unpacked + assert replay._unpacked[0] == str(tmp_path) + assert testcase.dump.call_count == 1 From e9ded587969285a4ae1c773501e102257f7b15cf Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 26 Aug 2020 16:46:19 -0700 Subject: [PATCH 008/531] [replay] Add more sanity checks * Fix setting target.relaunch. Thanks @jschwartzentruber. --- grizzly/replay/args.py | 5 ++++- grizzly/replay/replay.py | 21 ++++++++++++--------- grizzly/replay/test_main.py | 34 ++++++++++++++++++++++++---------- 3 files changed, 40 insertions(+), 20 deletions(-) diff --git a/grizzly/replay/args.py b/grizzly/replay/args.py index 1983f127..78696a14 100644 --- a/grizzly/replay/args.py +++ b/grizzly/replay/args.py @@ -40,7 +40,7 @@ def __init__(self): " Helpful for intermittent testcases (default: %(default)sx)") replay_args.add_argument( "--no-harness", action="store_true", - help="Don't use the harness for redirection") + help="Don't use the harness for redirection. Implies '--relaunch=1'.") replay_args.add_argument( "--repeat", type=int, default=1, help="Run the testcase n times." \ @@ -66,6 +66,9 @@ def sanity_check(self, args): if args.min_crashes < 1: self.parser.error("'--min-crashes' value must be positive") + if args.no_harness: + args.relaunch = 1 + if args.repeat < 1: self.parser.error("'--repeat' value must be positive") diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 4cb24261..8d74ef27 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -38,6 +38,7 @@ def __init__(self, ignore, server, target, testcases, any_crash=False, signature self.server = server self.status = None self.target = target + # TODO: make testcases getter and setter self.testcases = testcases self._any_crash = any_crash self._harness = None @@ -161,6 +162,7 @@ def run(self, repeat=1, min_results=1): assert min_results <= repeat self.status = Status.start() + # TODO: should only be called if needed self._unpack_tests() server_map = ServerMap() @@ -333,7 +335,14 @@ def main(cls, args): target = None tmp_prefs = None try: - relaunch = min(args.relaunch, args.repeat) + if args.no_harness and len(testcases) > 1: + LOG.error("'--no-harness' cannot be used with multiple testcases") + return 1 + repeat = max(args.min_crashes, args.repeat) + relaunch = min(args.relaunch, repeat) + assert not args.no_harness or (args.no_harness and relaunch == 1) + LOG.info("Repeat: %d, Minimum crashes: %d, Relaunch %d", + repeat, args.min_crashes, relaunch) LOG.debug("initializing the Target") target = load_target(args.platform)( args.binary, @@ -367,12 +376,6 @@ def main(cls, args): # launch HTTP server used to serve test cases with Sapphire(auto_close=1, timeout=args.timeout) as server: target.reverse(server.port, server.port) - if args.no_harness: - LOG.debug("--no-harness specified relaunch set to 1") - args.relaunch = 1 - args.repeat = max(args.min_crashes, args.repeat) - LOG.info("Repeat: %d, Minimum crashes: %d, Relaunch %d", - args.repeat, args.min_crashes, relaunch) replay = ReplayManager( args.ignore, server, @@ -381,7 +384,7 @@ def main(cls, args): any_crash=args.any_crash, signature=signature, use_harness=not args.no_harness) - success = replay.run(repeat=args.repeat, min_results=args.min_crashes) + success = replay.run(repeat=repeat, min_results=args.min_crashes) if args.logs: replay.report_to_filesystem( args.logs, @@ -395,7 +398,7 @@ def main(cls, args): return 1 except (TargetLaunchError, TargetLaunchTimeout): - if args.logs: + if args.logs and replay is not None: replay.report_to_filesystem( args.logs, replay.reports, diff --git a/grizzly/replay/test_main.py b/grizzly/replay/test_main.py index cb8c99b2..b6b404c4 100644 --- a/grizzly/replay/test_main.py +++ b/grizzly/replay/test_main.py @@ -53,6 +53,10 @@ def test_args_01(capsys, tmp_path): with raises(SystemExit): ReplayArgs().parse_args([str(exe), str(inp), "--any-crash", "--sig", "x"]) assert "error: signature is ignored when running with '--any-crash'" in capsys.readouterr()[-1] + # force relaunch == 1 with --no-harness + args = ReplayArgs().parse_args([str(exe), str(inp), "--no-harness"]) + assert args.relaunch == 1 + def test_main_01(mocker, tmp_path): """test ReplayManager.main()""" @@ -117,33 +121,43 @@ def _fake_save_logs(result_logs): def test_main_02(mocker): """test ReplayManager.main() failure cases""" mocker.patch("grizzly.replay.replay.FuzzManagerReporter", autospec=True) - mocker.patch("grizzly.replay.replay.load_target", autospec=True) + fake_load_target = mocker.patch("grizzly.replay.replay.load_target", autospec=True) mocker.patch("grizzly.replay.replay.Sapphire", autospec=True) - mocker.patch("grizzly.replay.replay.TestCase", autospec=True) + fake_tc = mocker.patch("grizzly.replay.replay.TestCase", autospec=True) # setup args args = mocker.Mock( ignore=None, input="test", min_crashes=1, + no_harenss=True, prefs=None, relaunch=1, repeat=1, sig=None) - + # target launch error mocker.patch("grizzly.replay.replay.ReplayManager.run", side_effect=TargetLaunchError) assert ReplayManager.main(args) == 1 - + # target launch timeout mocker.patch("grizzly.replay.replay.ReplayManager.run", side_effect=TargetLaunchTimeout) assert ReplayManager.main(args) == 1 - - mocker.patch("grizzly.replay.replay.load_target", side_effect=KeyboardInterrupt) + # user abort + fake_load_target.side_effect = KeyboardInterrupt assert ReplayManager.main(args) == 1 - - mocker.patch("grizzly.replay.replay.TestCase.load", side_effect=TestCaseLoadFailure) + # invalid test case + fake_load_target.reset_mock() + fake_tc.load.side_effect = TestCaseLoadFailure assert ReplayManager.main(args) == 1 - - mocker.patch("grizzly.replay.replay.TestCase.load", return_value=list()) + assert fake_load_target.call_count == 0 + # no test cases + fake_tc.load.side_effect = None + fake_tc.load.return_value = list() + assert ReplayManager.main(args) == 1 + assert fake_load_target.call_count == 0 + # multiple test cases with --no-harness + fake_load_target.reset_mock() + fake_tc.load.return_value = [mocker.Mock(), mocker.Mock()] assert ReplayManager.main(args) == 1 + assert fake_load_target.call_count == 0 def test_main_03(mocker): """test ReplayManager.main() loading GRZ_FORCED_CLOSE from test case""" From 43a3076348689e640b1c60f1f205bd153787e54c Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 27 Aug 2020 12:18:59 -0700 Subject: [PATCH 009/531] [replay] Pass testcases directly to ReplayManager.run() --- grizzly/replay/replay.py | 281 +++++++++++++++++----------------- grizzly/replay/test_replay.py | 119 ++++++-------- 2 files changed, 188 insertions(+), 212 deletions(-) diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 8d74ef27..b906e204 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -29,17 +29,15 @@ class ReplayManager(object): HARNESS_FILE = pathjoin(dirname(__file__), "..", "common", "harness.html") - __slots__ = ("ignore", "server", "status", "target", "testcases", "_any_crash", + __slots__ = ("ignore", "server", "status", "target", "_any_crash", "_harness", "_reports_expected", "_reports_other", "_runner", "_signature", "_unpacked") - def __init__(self, ignore, server, target, testcases, any_crash=False, signature=None, use_harness=True): + def __init__(self, ignore, server, target, any_crash=False, signature=None, use_harness=True): self.ignore = ignore self.server = server self.status = None self.target = target - # TODO: make testcases getter and setter - self.testcases = testcases self._any_crash = any_crash self._harness = None self._reports_expected = dict() @@ -47,7 +45,6 @@ def __init__(self, ignore, server, target, testcases, any_crash=False, signature self._runner = Runner(self.server, self.target) # TODO: make signature a property self._signature = signature - self._unpacked = list() if use_harness: with open(self.HARNESS_FILE, "rb") as in_fp: self._harness = in_fp.read() @@ -58,21 +55,6 @@ def __enter__(self): def __exit__(self, *exc): self.cleanup() - def _unpack_tests(self): - """Unpack testcase data to known locations. - - Args: - None - - Returns: - None - """ - assert not self._unpacked - for test in self.testcases: - dst_path = mkdtemp(prefix="tc_", dir=grz_tmp("serve")) - test.dump(dst_path) - self._unpacked.append(dst_path) - def cleanup(self): """Remove temporary files from disk. @@ -88,8 +70,6 @@ def cleanup(self): for report in self._reports_other.values(): report.cleanup() self._reports_other.clear() - for tc_path in self._unpacked: - rmtree(tc_path) if self.status is not None: self.status.cleanup() @@ -146,11 +126,12 @@ def report_to_filesystem(path, reports, other_reports=None, tests=None): for report in other_reports: reporter.submit(tests, report=report) - def run(self, repeat=1, min_results=1): + def run(self, testcases, repeat=1, min_results=1): """Run testcase replay. Args: - repeat (int): Maximum number of times to run the test case. + testcases (list): One or more TestCases to run. + repeat (int): Maximum number of times to run the TestCase. min_results (int): Minimum number of results needed before run can be considered successful. @@ -160,10 +141,10 @@ def run(self, repeat=1, min_results=1): assert repeat > 0 assert min_results > 0 assert min_results <= repeat - self.status = Status.start() - # TODO: should only be called if needed - self._unpack_tests() + self.status = Status.start() + test_count = len(testcases) + assert test_count > 0 server_map = ServerMap() if self._harness is not None: @@ -177,122 +158,139 @@ def _dyn_close(): # pragma: no cover server_map.set_dynamic_response("grz_harness", lambda: self._harness, mime_type="text/html") success = False - test_count = len(self.testcases) - for _ in range(repeat): - self.status.iteration += 1 - if self.target.closed: - LOG.info("Launching target...") - if self._harness is None: - location = self._runner.location( - "/grz_current_test", - self.server.port) - else: - location = self._runner.location( - "/grz_harness", - self.server.port, - close_after=self.target.rl_reset * test_count, - forced_close=self.target.forced_close) - try: - # The environment from the initial testcase is used because - # a sequence of testcases is expected to be run without - # relaunching the Target to match the functionality of - # Grizzly. If this is not the case each TestCase should - # be run individually. - self._runner.launch(location, env_mod=self.testcases[0].env_vars) - except TargetLaunchError: - LOG.error("Target launch error. Check browser logs for details.") + unpacked = list() + try: + LOG.debug("unpacking testcases (%d)...", test_count) + for test in testcases: + dst_path = mkdtemp(prefix="tc_", dir=grz_tmp("serve")) + test.dump(dst_path) + unpacked.append(dst_path) + # perform iterations + for _ in range(repeat): + self.status.iteration += 1 + if self.target.closed: + LOG.info("Launching target...") + if self._harness is None: + location = self._runner.location( + "/grz_current_test", + self.server.port) + else: + location = self._runner.location( + "/grz_harness", + self.server.port, + close_after=self.target.rl_reset * test_count, + forced_close=self.target.forced_close) + try: + # The environment from the initial testcase is used because + # a sequence of testcases is expected to be run without + # relaunching the Target to match the functionality of + # Grizzly. If this is not the case each TestCase should + # be run individually. + self._runner.launch(location, env_mod=testcases[0].env_vars) + except TargetLaunchError: + LOG.error("Target launch error. Check browser logs for details.") + log_path = mkdtemp(prefix="logs_", dir=grz_tmp("logs")) + self.target.save_logs(log_path) + self._reports_other["STARTUP"] = Report.from_path(log_path) + raise + self.target.step() + LOG.info("Performing replay (%d/%d)...", self.status.iteration, repeat) + # run tests + for test_idx in range(test_count): + LOG.debug("running test: %d of %d", test_idx + 1, test_count) + # update redirects + if self._harness is not None: + next_idx = (test_idx + 1) % test_count + server_map.set_redirect( + "grz_next_test", + testcases[next_idx].landing_page, + required=True) + server_map.set_redirect( + "grz_current_test", + testcases[test_idx].landing_page, + required=False) + # run testcase + self._runner.run( + self.ignore, + server_map, + testcases[test_idx], + test_path=unpacked[test_idx], + wait_for_callback=self._harness is None) + if self._runner.result != self._runner.COMPLETE: + break + # process results + if self._runner.result == self._runner.FAILED: log_path = mkdtemp(prefix="logs_", dir=grz_tmp("logs")) self.target.save_logs(log_path) - self._reports_other["STARTUP"] = Report.from_path(log_path) - raise - self.target.step() - LOG.info("Performing replay (%d/%d)...", self.status.iteration, repeat) - # run test cases - for test_idx in range(test_count): - LOG.debug("running test: %d of %d", test_idx + 1, test_count) - if self._harness is not None: - next_idx = (test_idx + 1) % test_count - server_map.set_redirect("grz_next_test", self.testcases[next_idx].landing_page, required=True) - server_map.set_redirect("grz_current_test", self.testcases[test_idx].landing_page, required=False) - self._runner.run( - self.ignore, - server_map, - self.testcases[test_idx], - test_path=self._unpacked[test_idx], - wait_for_callback=self._harness is None) - if self._runner.result != self._runner.COMPLETE: - break - # process results - if self._runner.result == self._runner.FAILED: - log_path = mkdtemp(prefix="logs_", dir=grz_tmp("logs")) - self.target.save_logs(log_path) - report = Report.from_path(log_path) - # check signatures - crash_info = report.crash_info(self.target.binary) - short_sig = crash_info.createShortSignature() - if not self._any_crash and self._signature is None and short_sig != "No crash detected": - # signature has not been specified use the first one created - self._signature = report.crash_signature(crash_info) - if short_sig == "No crash detected": - # TODO: verify report.major == "NO_STACK" otherwise FM failed to parse the logs - # TODO: change this to support hangs/timeouts, etc - LOG.info("Result: No crash detected") - crash_hash = None - elif self._any_crash or self._signature.matches(crash_info): - self.status.count_result(short_sig) - LOG.info("Result: %s (%s:%s)", - short_sig, report.major[:8], report.minor[:8]) - crash_hash = report.crash_hash(crash_info) - if crash_hash not in self._reports_expected: - LOG.debug("now tracking %s", crash_hash) - self._reports_expected[crash_hash] = report - report = None # don't remove report - assert self._any_crash or len(self._reports_expected) == 1 - else: - LOG.info("Result: Different signature: %s (%s:%s)", - short_sig, report.major[:8], report.minor[:8]) + report = Report.from_path(log_path) + # check signatures + crash_info = report.crash_info(self.target.binary) + short_sig = crash_info.createShortSignature() + if not self._any_crash and self._signature is None and short_sig != "No crash detected": + # signature has not been specified use the first one created + self._signature = report.crash_signature(crash_info) + if short_sig == "No crash detected": + # TODO: verify report.major == "NO_STACK" otherwise FM failed to parse the logs + # TODO: change this to support hangs/timeouts, etc + LOG.info("Result: No crash detected") + crash_hash = None + elif self._any_crash or self._signature.matches(crash_info): + self.status.count_result(short_sig) + LOG.info("Result: %s (%s:%s)", + short_sig, report.major[:8], report.minor[:8]) + crash_hash = report.crash_hash(crash_info) + if crash_hash not in self._reports_expected: + LOG.debug("now tracking %s", crash_hash) + self._reports_expected[crash_hash] = report + report = None # don't remove report + assert self._any_crash or len(self._reports_expected) == 1 + else: + LOG.info("Result: Different signature: %s (%s:%s)", + short_sig, report.major[:8], report.minor[:8]) + self.status.ignored += 1 + crash_hash = report.crash_hash(crash_info) + if crash_hash not in self._reports_other: + LOG.debug("now tracking %s", crash_hash) + self._reports_other[crash_hash] = report + report = None # don't remove report + # purge untracked report + if report is not None: + if crash_hash is not None: + LOG.debug("already tracking %s", crash_hash) + report.cleanup() + report = None + elif self._runner.result == self._runner.IGNORED: self.status.ignored += 1 - crash_hash = report.crash_hash(crash_info) - if crash_hash not in self._reports_other: - LOG.debug("now tracking %s", crash_hash) - self._reports_other[crash_hash] = report - report = None # don't remove report - # purge untracked report - if report is not None: - if crash_hash is not None: - LOG.debug("already tracking %s", crash_hash) - report.cleanup() - report = None - elif self._runner.result == self._runner.IGNORED: - self.status.ignored += 1 - LOG.info("Result: Ignored (%d)", self.status.ignored) - elif self._runner.result == self._runner.ERROR: - LOG.error("ERROR: Replay malfunction, test case was not served") - break - - # check status and exit early if possible - if repeat - self.status.iteration + self.status.results < min_results: - if self.status.iteration < repeat: - LOG.debug("skipping remaining attempts") - # failed to reproduce issue - LOG.debug("results (%d) < expected (%s) after %d attempts", - self.status.results, min_results, self.status.iteration) - break - if self.status.results >= min_results: - assert self.status.results == min_results - success = True - LOG.debug("results == expected (%s) after %d attempts", - min_results, self.status.iteration) - break - - # warn about large browser logs - #self.status.log_size = self.target.log_size() - #if self.status.log_size > self.TARGET_LOG_SIZE_WARN: - # LOG.warning("Large browser logs: %dMBs", (self.status.log_size / 0x100000)) - - # trigger relaunch by closing the browser if needed - self.target.check_relaunch() + LOG.info("Result: Ignored (%d)", self.status.ignored) + elif self._runner.result == self._runner.ERROR: + LOG.error("ERROR: Replay malfunction, test case was not served") + break + # check status and exit early if possible + if repeat - self.status.iteration + self.status.results < min_results: + if self.status.iteration < repeat: + LOG.debug("skipping remaining attempts") + # failed to reproduce issue + LOG.debug("results (%d) < expected (%s) after %d attempts", + self.status.results, min_results, self.status.iteration) + break + if self.status.results >= min_results: + assert self.status.results == min_results + success = True + LOG.debug("results == expected (%s) after %d attempts", + min_results, self.status.iteration) + break + + # warn about large browser logs + #self.status.log_size = self.target.log_size() + #if self.status.log_size > self.TARGET_LOG_SIZE_WARN: + # LOG.warning("Large browser logs: %dMBs", (self.status.log_size / 0x100000)) + + # trigger relaunch by closing the browser if needed + self.target.check_relaunch() + finally: + for tc_path in unpacked: + rmtree(tc_path) if success: LOG.info("Result successfully reproduced") else: @@ -380,17 +378,16 @@ def main(cls, args): args.ignore, server, target, - testcases, any_crash=args.any_crash, signature=signature, use_harness=not args.no_harness) - success = replay.run(repeat=repeat, min_results=args.min_crashes) + success = replay.run(testcases, repeat=repeat, min_results=args.min_crashes) if args.logs: replay.report_to_filesystem( args.logs, replay.reports, replay.other_reports, - replay.testcases if args.include_test else None) + testcases if args.include_test else None) # TODO: add fuzzmanager reporting return 0 if success else 1 diff --git a/grizzly/replay/test_replay.py b/grizzly/replay/test_replay.py index a99015f4..bd02dbbe 100644 --- a/grizzly/replay/test_replay.py +++ b/grizzly/replay/test_replay.py @@ -33,7 +33,6 @@ def test_replay_01(mocker): replay = ReplayManager([], mocker.Mock(spec=Sapphire), mocker.Mock(spec=Target), [mocker.Mock()]) replay._reports_expected = {"A": mocker.Mock(spec=Report)} replay._reports_other = {"B": mocker.Mock(spec=Report)} - assert not replay._unpacked replay.status = mocker.Mock(spec=Status) ereport = tuple(replay.reports)[0] oreport = tuple(replay.other_reports)[0] @@ -42,8 +41,9 @@ def test_replay_01(mocker): assert oreport.cleanup.call_count == 1 assert replay.status.cleanup.call_count == 1 -def test_replay_02(mocker): +def test_replay_02(mocker, tmp_path): """test ReplayManager.run() - no repro""" + mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) target = mocker.Mock(spec=Target) @@ -52,18 +52,18 @@ def test_replay_02(mocker): target.detect_failure.return_value = Target.RESULT_NONE target.forced_close = True target.rl_reset = 1 - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] - with ReplayManager([], server, target, testcases, use_harness=True) as replay: - assert not replay._unpacked - assert not replay.run() - assert replay._unpacked - assert replay.status.ignored == 0 - assert replay.status.iteration == 1 - assert replay.status.results == 0 - assert not replay.reports + with TestCase("land_page.html", "redirect.html", "test-adapter") as testcase: + with ReplayManager([], server, target, use_harness=True) as replay: + assert not replay.run([testcase]) + assert replay.status.ignored == 0 + assert replay.status.iteration == 1 + assert replay.status.results == 0 + assert not replay.reports + assert not any(tmp_path.glob("*")) -def test_replay_03(mocker): +def test_replay_03(mocker, tmp_path): """test ReplayManager.run() - successful repro""" + mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) target = mocker.Mock(spec=Target) @@ -71,14 +71,15 @@ def test_replay_03(mocker): target.binary = "C:\\fake_bin" target.detect_failure.return_value = Target.RESULT_FAILURE target.save_logs = _fake_save_logs_result - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] - with ReplayManager([], server, target, testcases, use_harness=False) as replay: - assert replay.run() - assert replay.status.ignored == 0 - assert replay.status.iteration == 1 - assert replay.status.results == 1 - assert len(replay.reports) == 1 - assert not replay.other_reports + with TestCase("land_page.html", "redirect.html", "test-adapter") as testcase: + with ReplayManager([], server, target, use_harness=False) as replay: + assert replay.run([testcase]) + assert replay.status.ignored == 0 + assert replay.status.iteration == 1 + assert replay.status.results == 1 + assert len(replay.reports) == 1 + assert not replay.other_reports + assert not any(tmp_path.glob("*")) def test_replay_04(mocker): """test ReplayManager.run() - Error (landing page not requested/served)""" @@ -88,8 +89,8 @@ def test_replay_04(mocker): target.RESULT_NONE = Target.RESULT_NONE target.detect_failure.return_value = Target.RESULT_NONE testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] - with ReplayManager([], server, target, testcases, use_harness=False) as replay: - assert not replay.run(repeat=2) + with ReplayManager([], server, target, use_harness=False) as replay: + assert not replay.run(testcases, repeat=2) assert replay.status.ignored == 0 assert replay.status.iteration == 1 assert replay.status.results == 0 @@ -105,8 +106,8 @@ def test_replay_05(mocker): target.RESULT_IGNORED = Target.RESULT_IGNORED target.detect_failure.return_value = Target.RESULT_IGNORED testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] - with ReplayManager([], server, target, testcases, use_harness=False) as replay: - assert not replay.run() + with ReplayManager([], server, target, use_harness=False) as replay: + assert not replay.run(testcases) assert replay.status.ignored == 1 assert replay.status.iteration == 1 assert replay.status.results == 0 @@ -117,25 +118,24 @@ def test_replay_06(mocker): """test ReplayManager.run() - early exit""" server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target) + target = mocker.Mock(spec=Target, binary="path/fake_bin") target.RESULT_FAILURE = Target.RESULT_FAILURE target.RESULT_IGNORED = Target.RESULT_IGNORED target.RESULT_NONE = Target.RESULT_NONE - target.binary = "path/fake_bin" target.save_logs = _fake_save_logs_result testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] # early failure target.detect_failure.side_effect = [Target.RESULT_FAILURE, Target.RESULT_IGNORED, Target.RESULT_NONE] - with ReplayManager([], server, target, testcases, use_harness=False) as replay: - assert not replay.run(repeat=4, min_results=3) + with ReplayManager([], server, target, use_harness=False) as replay: + assert not replay.run(testcases, repeat=4, min_results=3) assert replay.status.iteration == 3 assert replay.status.results == 1 assert replay.status.ignored == 1 assert len(replay.reports) == 1 # early success target.detect_failure.side_effect = [Target.RESULT_FAILURE, Target.RESULT_IGNORED, Target.RESULT_FAILURE] - with ReplayManager([], server, target, testcases, use_harness=False) as replay: - assert replay.run(repeat=4, min_results=2) + with ReplayManager([], server, target, use_harness=False) as replay: + assert replay.run(testcases, repeat=4, min_results=2) assert replay.status.iteration == 3 assert replay.status.results == 2 assert replay.status.ignored == 1 @@ -149,27 +149,21 @@ def test_replay_07(mocker, tmp_path): mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) report_0 = mocker.Mock(spec=Report) report_0.crash_info.return_value.createShortSignature.return_value = "No crash detected" - report_1 = mocker.Mock(spec=Report) + report_1 = mocker.Mock(spec=Report, major="0123abcd", minor="01239999") report_1.crash_info.return_value.createShortSignature.return_value = "[@ test1]" - report_1.major = "0123abcd" - report_1.minor = "01239999" - report_2 = mocker.Mock(spec=Report) + report_2 = mocker.Mock(spec=Report, major="0123abcd", minor="abcd9876") report_2.crash_info.return_value.createShortSignature.return_value = "[@ test2]" - report_2.major = "0123abcd" - report_2.minor = "abcd9876" report.from_path.side_effect = (report_0, report_1, report_2) - server = mocker.Mock(spec=Sapphire) - server.port = 0x1337 + server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) signature = mocker.Mock() signature.matches.side_effect = (True, False) - target = mocker.Mock(spec=Target) + target = mocker.Mock(spec=Target, binary="fake_bin") target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE - target.binary = "fake_bin" testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] - with ReplayManager([], server, target, testcases, signature=signature, use_harness=False) as replay: - assert not replay.run(repeat=3, min_results=2) + with ReplayManager([], server, target, signature=signature, use_harness=False) as replay: + assert not replay.run(testcases, repeat=3, min_results=2) assert replay._signature == signature assert report.from_path.call_count == 3 assert replay.status.iteration == 3 @@ -188,26 +182,21 @@ def test_replay_08(mocker, tmp_path): mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) report_0 = mocker.Mock(spec=Report) report_0.crash_info.return_value.createShortSignature.return_value = "No crash detected" - report_1 = mocker.Mock(spec=Report) + report_1 = mocker.Mock(spec=Report, major="0123abcd", minor="01239999") report_1.crash_info.return_value.createShortSignature.return_value = "[@ test1]" report_1.crash_hash.return_value = "hash1" - report_1.major = "0123abcd" - report_1.minor = "01239999" - report_2 = mocker.Mock(spec=Report) + report_2 = mocker.Mock(spec=Report, major="0123abcd", minor="abcd9876") report_2.crash_info.return_value.createShortSignature.return_value = "[@ test2]" report_2.crash_hash.return_value = "hash2" - report_2.major = "0123abcd" - report_2.minor = "abcd9876" report.from_path.side_effect = (report_0, report_1, report_2) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target) + target = mocker.Mock(spec=Target, binary="fake_bin") target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE - target.binary = "fake_bin" testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] - with ReplayManager([], server, target, testcases, any_crash=True, use_harness=False) as replay: - assert replay.run(repeat=3, min_results=2) + with ReplayManager([], server, target, any_crash=True, use_harness=False) as replay: + assert replay.run(testcases, repeat=3, min_results=2) assert replay._signature is None assert report.from_path.call_count == 3 assert replay.status.iteration == 3 @@ -264,9 +253,9 @@ def test_replay_10(mocker, tmp_path): target = mocker.Mock(spec=Target) target.launch.side_effect = TargetLaunchError testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] - with ReplayManager([], server, target, testcases, use_harness=False) as replay: + with ReplayManager([], server, target, use_harness=False) as replay: with raises(TargetLaunchError): - replay.run() + replay.run(testcases) assert not any(replay.reports) assert any(replay.other_reports) assert "STARTUP" in replay._reports_other @@ -285,12 +274,13 @@ def test_replay_11(mocker): mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] - with ReplayManager([], server, target, testcases, use_harness=True) as replay: - assert not replay.run() + with ReplayManager([], server, target, use_harness=True) as replay: + assert not replay.run(testcases) assert replay.status.ignored == 0 assert replay.status.iteration == 1 assert replay.status.results == 0 assert not replay.reports + assert all(x.dump.call_count == 1 for x in testcases) def test_replay_12(mocker): """test ReplayManager.run() - multiple TestCases - successful repro""" @@ -308,22 +298,11 @@ def test_replay_12(mocker): mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] - with ReplayManager([], server, target, testcases, use_harness=True) as replay: - assert replay.run() + with ReplayManager([], server, target, use_harness=True) as replay: + assert replay.run(testcases) assert replay.status.ignored == 0 assert replay.status.iteration == 1 assert replay.status.results == 1 assert len(replay.reports) == 1 assert not replay.other_reports - -def test_replay_13(mocker, tmp_path): - """test ReplayManager._unpacked()""" - server = mocker.Mock(spec=Sapphire, port=0x1337) - testcase = mocker.Mock(spec=TestCase, env_vars=[], optional=[]) - mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) - with ReplayManager([], server, mocker.Mock(spec=Target), [testcase]) as replay: - assert not replay._unpacked - replay._unpack_tests() - assert replay._unpacked - assert replay._unpacked[0] == str(tmp_path) - assert testcase.dump.call_count == 1 + assert all(x.dump.call_count == 1 for x in testcases) From 7c407f9b157af0d1c0ad1fd4b7a143c37dee6976 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 1 Sep 2020 11:20:07 -0700 Subject: [PATCH 010/531] [replay] Pass empty list to Reporter.submit() instead of None --- grizzly/replay/replay.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index b906e204..8bb50973 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -118,13 +118,13 @@ def report_to_filesystem(path, reports, other_reports=None, tests=None): report_path=pathjoin(path, "reports"), major_bucket=False) for report in reports: - reporter.submit(tests, report=report) + reporter.submit(tests or [], report=report) if other_reports: reporter = FilesystemReporter( report_path=pathjoin(path, "other_reports"), major_bucket=False) for report in other_reports: - reporter.submit(tests, report=report) + reporter.submit(tests or [], report=report) def run(self, testcases, repeat=1, min_results=1): """Run testcase replay. From 7536c632003c7472043736a62ba502b50b4edc3d Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 1 Sep 2020 17:19:18 -0700 Subject: [PATCH 011/531] [tests] Add ReplayManager.report_to_filesystem() use case --- grizzly/replay/test_replay.py | 41 ++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/grizzly/replay/test_replay.py b/grizzly/replay/test_replay.py index bd02dbbe..b08d64f1 100644 --- a/grizzly/replay/test_replay.py +++ b/grizzly/replay/test_replay.py @@ -215,24 +215,27 @@ def test_replay_09(mocker, tmp_path): # no reports ReplayManager.report_to_filesystem(str(tmp_path), []) assert not any(tmp_path.glob("*")) - # with reports - reports_expected = list() - reports_expected.append(mocker.Mock(spec=Report)) - reports_expected[-1].prefix = "expected" + # with reports and tests (tmp_path / "report_expected").mkdir() - reports_expected[-1].path = str(tmp_path / "report_expected") - reports_other = list() - reports_other.append(mocker.Mock(spec=Report)) - reports_other[-1].prefix = "other1" + expected = [ + mocker.Mock( + spec=Report, + path=str(tmp_path / "report_expected"), + prefix="expected")] (tmp_path / "report_other1").mkdir() - reports_other[-1].path = str(tmp_path / "report_other1") - reports_other.append(mocker.Mock(spec=Report)) - reports_other[-1].prefix = "other2" (tmp_path / "report_other2").mkdir() - reports_other[-1].path = str(tmp_path / "report_other2") + other = [ + mocker.Mock( + spec=Report, + path=str(tmp_path / "report_other1"), + prefix="other1"), + mocker.Mock( + spec=Report, + path=str(tmp_path / "report_other2"), + prefix="other2")] test = mocker.Mock(spec=TestCase) path = tmp_path / "dest" - ReplayManager.report_to_filesystem(str(path), reports_expected, reports_other, tests=[test]) + ReplayManager.report_to_filesystem(str(path), expected, other, tests=[test]) assert test.dump.call_count == 3 # called once per report assert not (tmp_path / "report_expected").is_dir() assert not (tmp_path / "report_other1").is_dir() @@ -243,6 +246,18 @@ def test_replay_09(mocker, tmp_path): assert (path / "other_reports").is_dir() assert (path / "other_reports" / "other1_logs").is_dir() assert (path / "other_reports" / "other2_logs").is_dir() + # with reports and not tests + (tmp_path / "report_expected").mkdir() + expected = [ + mocker.Mock( + spec=Report, + path=str(tmp_path / "report_expected"), + prefix="expected")] + path = tmp_path / "dest2" + ReplayManager.report_to_filesystem(str(path), expected) + assert not (tmp_path / "report_expected").is_dir() + assert path.is_dir() + assert (path / "reports" / "expected_logs").is_dir() def test_replay_10(mocker, tmp_path): """test ReplayManager.run() - TargetLaunchError""" From 850ee353c822aac4ae7be8813c604b116e98e393 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 1 Sep 2020 17:20:59 -0700 Subject: [PATCH 012/531] [replay] Avoid leaving temp directories on disk in failure case --- grizzly/replay/replay.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 8bb50973..0b40dcf6 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -163,8 +163,8 @@ def _dyn_close(): # pragma: no cover LOG.debug("unpacking testcases (%d)...", test_count) for test in testcases: dst_path = mkdtemp(prefix="tc_", dir=grz_tmp("serve")) - test.dump(dst_path) unpacked.append(dst_path) + test.dump(dst_path) # perform iterations for _ in range(repeat): self.status.iteration += 1 From ce9034becfac15cd6cebd91c9e79fc116d50a5ad Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 2 Sep 2020 20:09:28 -0700 Subject: [PATCH 013/531] Add RunResult --- grizzly/common/__init__.py | 6 +-- grizzly/common/runner.py | 55 ++++++++++++------------ grizzly/common/test_runner.py | 79 +++++++++++++++++------------------ grizzly/reduce/reduce.py | 16 +++---- grizzly/replay/replay.py | 12 +++--- grizzly/session.py | 22 +++++----- grizzly/test_session.py | 47 ++++++++------------- 7 files changed, 113 insertions(+), 124 deletions(-) diff --git a/grizzly/common/__init__.py b/grizzly/common/__init__.py index 0fe3d642..c95dee8a 100644 --- a/grizzly/common/__init__.py +++ b/grizzly/common/__init__.py @@ -6,7 +6,7 @@ from .adapter import Adapter, AdapterError from .iomanager import IOManager, ServerMap from .reporter import FilesystemReporter, FuzzManagerReporter, Report, Reporter, S3FuzzManagerReporter -from .runner import Runner +from .runner import Runner, RunResult from .status import ReducerStats, Status from .storage import TestCaseLoadFailure, TestCase, TestFile, TestFileExists from .utils import grz_tmp @@ -14,7 +14,7 @@ __all__ = ( "Adapter", "AdapterError", "FilesystemReporter", "FuzzManagerReporter", "grz_tmp", "IOManager", - "ReducerStats", "Report", "Reporter", "Runner", "S3FuzzManagerReporter", "ServerMap", "Status", - "TestCase", "TestCaseLoadFailure", "TestFile", "TestFileExists") + "ReducerStats", "Report", "Reporter", "Runner", "RunResult", "S3FuzzManagerReporter", + "ServerMap", "Status", "TestCase", "TestCaseLoadFailure", "TestFile", "TestFileExists") __author__ = "Jesse Schwartzentruber" __credits__ = ["Jesse Schwartzentruber", "Tyson Smith"] diff --git a/grizzly/common/runner.py b/grizzly/common/runner.py index 46630f6c..2933df2b 100644 --- a/grizzly/common/runner.py +++ b/grizzly/common/runner.py @@ -11,7 +11,7 @@ from ..target import TargetLaunchTimeout from .utils import grz_tmp -__all__ = ("Runner",) +__all__ = ("Runner", "RunResult") __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] @@ -74,12 +74,7 @@ def schedule_poll(self, initial=False, now=None): class Runner(object): - COMPLETE = 1 - ERROR = 2 - FAILED = 3 - IGNORED = 4 - - __slots__ = ("_idle", "_server", "_target", "result", "served", "timeout") + __slots__ = ("_idle", "_server", "_target") def __init__(self, server, target, idle_threshold=0, idle_delay=60): if idle_threshold > 0: @@ -88,9 +83,6 @@ def __init__(self, server, target, idle_threshold=0, idle_delay=60): self._idle = None self._server = server # a sapphire instance to serve the test case self._target = target # target to run test case - self.result = None - self.served = None - self.timeout = False def launch(self, location, env_mod=None, max_retries=3, retry_delay=0): """Launch a target and open `location`. @@ -164,12 +156,8 @@ def run(self, ignore, server_map, testcase, test_path=None, coverage=False, wait framework should move on. Returns: - None + RunResult: Files served, status and timeout flag from the run. """ - # set initial state - self.served = None - self.result = None - self.timeout = False if self._idle is not None: self._idle.schedule_poll(initial=True) try: @@ -181,7 +169,7 @@ def run(self, ignore, server_map, testcase, test_path=None, coverage=False, wait wwwdir = test_path # serve the test case serve_start = time() - server_status, self.served = self._server.serve_path( + server_status, served = self._server.serve_path( wwwdir, continue_cb=self._keep_waiting, forever=wait_for_callback, @@ -192,31 +180,32 @@ def run(self, ignore, server_map, testcase, test_path=None, coverage=False, wait # remove temporary files if test_path is None: rmtree(wwwdir) + result = RunResult(served, timeout=server_status == SERVED_TIMEOUT) # TODO: fix calling TestCase.add_batch() for multi-test replay # add all include files that were served for url, resource in server_map.include.items(): - testcase.add_batch(resource.target, self.served, prefix=url) - self.timeout = server_status == SERVED_TIMEOUT - served_lpage = testcase.landing_page in self.served + testcase.add_batch(resource.target, result.served, prefix=url) + served_lpage = testcase.landing_page in result.served if not served_lpage: LOG.debug("%r not served!", testcase.landing_page) - elif coverage and not self.timeout: + elif coverage and not result.timeout: # dump_coverage() should be called before detect_failure() # to help catch any coverage related issues. self._target.dump_coverage() # detect failure - failure_detected = self._target.detect_failure(ignore, self.timeout) + failure_detected = self._target.detect_failure(ignore, result.timeout) if failure_detected == self._target.RESULT_FAILURE: - self.result = self.FAILED + result.status = RunResult.FAILED elif not served_lpage: # something is wrong so close the target # previous iteration put target in a bad state? self._target.close() - self.result = self.ERROR + result.status = RunResult.ERROR elif failure_detected == self._target.RESULT_IGNORED: - self.result = self.IGNORED + result.status = RunResult.IGNORED else: - self.result = self.COMPLETE + result.status = RunResult.COMPLETE + return result def _keep_waiting(self): """Callback used by the server to determine if should continue to wait @@ -226,9 +215,23 @@ def _keep_waiting(self): None Returns: - bool: Continue to serve test test case + bool: Continue to serve the test case. """ if self._idle is not None and self._idle.is_idle(): LOG.debug("idle target detected") return False return self._target.monitor.is_healthy() + + +class RunResult(object): + COMPLETE = 1 + ERROR = 2 + FAILED = 3 + IGNORED = 4 + + __slots__ = ("served", "status", "timeout") + + def __init__(self, served, status=None, timeout=False): + self.served = served + self.status = status + self.timeout = timeout diff --git a/grizzly/common/test_runner.py b/grizzly/common/test_runner.py index 1d537054..da9be05f 100644 --- a/grizzly/common/test_runner.py +++ b/grizzly/common/test_runner.py @@ -9,7 +9,7 @@ from sapphire import Sapphire, SERVED_ALL, SERVED_NONE, SERVED_REQUEST, SERVED_TIMEOUT, ServerMap -from .runner import _IdleChecker, Runner +from .runner import _IdleChecker, Runner, RunResult from .storage import TestCase from ..target import Target, TargetLaunchError, TargetLaunchTimeout @@ -20,26 +20,23 @@ def test_runner_01(mocker, tmp_path): target.detect_failure.return_value = target.RESULT_NONE runner = Runner(server, target) assert runner._idle is None - assert runner.result is None - assert runner.served is None - assert not runner.timeout serv_files = ["a.bin", "/another/file.bin"] testcase = mocker.Mock(spec=TestCase, landing_page=serv_files[0], optional=[]) # all files served server.serve_path.return_value = (SERVED_ALL, serv_files) - runner.run([], ServerMap(), testcase) - assert runner.result == runner.COMPLETE - assert runner.served == serv_files - assert not runner.timeout + result = runner.run([], ServerMap(), testcase) + assert result.status == RunResult.COMPLETE + assert result.served == serv_files + assert not result.timeout assert target.close.call_count == 0 assert target.dump_coverage.call_count == 0 assert testcase.dump.call_count == 1 # some files served server.serve_path.return_value = (SERVED_REQUEST, serv_files) - runner.run([], ServerMap(), testcase, coverage=True) - assert runner.result == runner.COMPLETE - assert runner.served == serv_files - assert not runner.timeout + result = runner.run([], ServerMap(), testcase, coverage=True) + assert result.status == RunResult.COMPLETE + assert result.served == serv_files + assert not result.timeout assert target.close.call_count == 0 assert target.dump_coverage.call_count == 1 # existing test path @@ -47,8 +44,8 @@ def test_runner_01(mocker, tmp_path): tc_path = (tmp_path / "tc") tc_path.mkdir() server.serve_path.return_value = (SERVED_ALL, serv_files) - runner.run([], ServerMap(), testcase, test_path=str(tc_path)) - assert runner.result == runner.COMPLETE + result = runner.run([], ServerMap(), testcase, test_path=str(tc_path)) + assert result.status == RunResult.COMPLETE assert target.close.call_count == 0 assert testcase.dump.call_count == 0 tc_path.is_dir() @@ -62,17 +59,17 @@ def test_runner_02(mocker): # no files served server.serve_path.return_value = (SERVED_NONE, []) target.detect_failure.return_value = target.RESULT_NONE - runner.run([], ServerMap(), testcase) - assert runner.result == runner.ERROR - assert not runner.served - assert not runner.timeout + result = runner.run([], ServerMap(), testcase) + assert result.status == RunResult.ERROR + assert not result.served + assert not result.timeout assert target.close.call_count == 1 target.reset_mock() # landing page not served server.serve_path.return_value = (SERVED_REQUEST, ["harness"]) - runner.run([], ServerMap(), testcase) - assert runner.result == runner.ERROR - assert runner.served + result = runner.run([], ServerMap(), testcase) + assert result.status == RunResult.ERROR + assert result.served assert target.close.call_count == 1 def test_runner_03(mocker): @@ -83,10 +80,10 @@ def test_runner_03(mocker): server.serve_path.return_value = (SERVED_TIMEOUT, serv_files) runner = Runner(server, target) target.detect_failure.return_value = target.RESULT_FAILURE - runner.run([], ServerMap(), mocker.Mock(spec=TestCase, landing_page="x", optional=[])) - assert runner.result == runner.FAILED - assert runner.served == serv_files - assert runner.timeout + result = runner.run([], ServerMap(), mocker.Mock(spec=TestCase, landing_page="x", optional=[])) + assert result.status == RunResult.FAILED + assert result.served == serv_files + assert result.timeout def test_runner_04(mocker): """test reporting failures""" @@ -98,23 +95,23 @@ def test_runner_04(mocker): runner = Runner(server, target) # test FAILURE target.detect_failure.return_value = target.RESULT_FAILURE - runner.run([], ServerMap(), testcase) - assert runner.result == runner.FAILED - assert runner.served == serv_files - assert not runner.timeout + result = runner.run([], ServerMap(), testcase) + assert result.status == RunResult.FAILED + assert result.served == serv_files + assert not result.timeout # test IGNORED target.detect_failure.return_value = target.RESULT_IGNORED - runner.run([], ServerMap(), testcase) - assert runner.result == runner.IGNORED - assert runner.served == serv_files - assert not runner.timeout + result = runner.run([], ServerMap(), testcase) + assert result.status == RunResult.IGNORED + assert result.served == serv_files + assert not result.timeout # failure before serving landing page server.serve_path.return_value = (SERVED_REQUEST, ["harness"]) target.detect_failure.return_value = target.RESULT_FAILURE - runner.run([], ServerMap(), testcase) - assert runner.result == runner.FAILED - assert runner.served - assert not runner.timeout + result = runner.run([], ServerMap(), testcase) + assert result.status == RunResult.FAILED + assert result.served + assert not result.timeout def test_runner_05(mocker): """test Runner() with idle checking""" @@ -125,8 +122,8 @@ def test_runner_05(mocker): server.serve_path.return_value = (SERVED_REQUEST, serv_files) runner = Runner(server, target, idle_threshold=0.01, idle_delay=0.01) assert runner._idle is not None - runner.run([], ServerMap(), mocker.Mock(spec=TestCase, landing_page=serv_files[0], optional=[])) - assert runner.result == runner.COMPLETE + result = runner.run([], ServerMap(), mocker.Mock(spec=TestCase, landing_page=serv_files[0], optional=[])) + assert result.status == RunResult.COMPLETE assert target.close.call_count == 0 def test_runner_06(mocker): @@ -215,8 +212,8 @@ def test_runner_09(mocker, tmp_path): serv_files = ["a.b", str(inc1), str(inc2), str(inc3)] server.serve_path.return_value = (SERVED_ALL, serv_files) with TestCase("a.b", "x", "x") as tcase: - runner.run([], smap, tcase) - assert runner.result == runner.COMPLETE + result = runner.run([], smap, tcase) + assert result.status == RunResult.COMPLETE assert "inc_file.bin" in tcase._existing_paths assert pathjoin("nested", "nested_inc.bin") in tcase._existing_paths assert pathjoin("test", "inc_file3.txt") in tcase._existing_paths diff --git a/grizzly/reduce/reduce.py b/grizzly/reduce/reduce.py index 9d57de92..9edc21ba 100644 --- a/grizzly/reduce/reduce.py +++ b/grizzly/reduce/reduce.py @@ -26,7 +26,7 @@ from . import strategies as strategies_module, testcase_contents from .exceptions import CorruptTestcaseError, NoTestcaseError, ReducerError from ..common.reporter import FilesystemReporter, FuzzManagerReporter, Report -from ..common.runner import Runner +from ..common.runner import Runner, RunResult from ..common.status import ReducerStats, Status from ..common.storage import TestCase, TestFile from ..common.utils import grz_tmp @@ -599,7 +599,7 @@ def _run(self, testcase, temp_prefix): Returns: bool: True if reduced testcase is still interesting. """ - result = False + interesting = False # if target is closed and server is alive, we should restart it or else the first request # against /first_test will 404 @@ -656,12 +656,12 @@ def _dyn_resp_close(): # pragma: no cover self._server_map.set_redirect("grz_next_test", str(self.landing_page), required=True) # run test case - runner.run(self._ignore, self._server_map, testcase, wait_for_callback=self._no_harness) + result = runner.run(self._ignore, self._server_map, testcase, wait_for_callback=self._no_harness) # handle failure if detected - if runner.result == Runner.FAILED: + if result.status == RunResult.FAILED: self._target.close() - testcase.purge_optional(runner.served) + testcase.purge_optional(result.served) # save logs result_logs = temp_prefix + "_logs" @@ -679,7 +679,7 @@ def _dyn_resp_close(): # pragma: no cover # XXX: need to change this to support reducing timeouts? LOG.info("Uninteresting: no crash detected") elif self._orig_sig is None or self._orig_sig.matches(crash): - result = True + interesting = True LOG.info("Interesting: %s", short_sig) if self._orig_sig is None and not self._any_crash: self._orig_sig = Report.crash_signature(crash) @@ -687,7 +687,7 @@ def _dyn_resp_close(): # pragma: no cover LOG.info("Uninteresting: different signature: %s", short_sig) self.on_other_crash_found(testcase, temp_prefix) - elif runner.result == Runner.IGNORED: + elif result.status == RunResult.IGNORED: LOG.info("Uninteresting: ignored") self._target.close() @@ -697,7 +697,7 @@ def _dyn_resp_close(): # pragma: no cover # trigger relaunch by closing the browser if needed self._target.check_relaunch() - return result + return interesting def _stop_log_capture(self): """Stop handling reduce logs. diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 0b40dcf6..01c09d44 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -13,7 +13,7 @@ from sapphire import Sapphire, ServerMap from ..common.reporter import FilesystemReporter, FuzzManagerReporter, Report -from ..common.runner import Runner +from ..common.runner import Runner, RunResult from ..common.status import Status from ..common.storage import TestCase, TestCaseLoadFailure, TestFile from ..common.utils import grz_tmp @@ -210,16 +210,16 @@ def _dyn_close(): # pragma: no cover testcases[test_idx].landing_page, required=False) # run testcase - self._runner.run( + result = self._runner.run( self.ignore, server_map, testcases[test_idx], test_path=unpacked[test_idx], wait_for_callback=self._harness is None) - if self._runner.result != self._runner.COMPLETE: + if result.status != RunResult.COMPLETE: break # process results - if self._runner.result == self._runner.FAILED: + if result.status == RunResult.FAILED: log_path = mkdtemp(prefix="logs_", dir=grz_tmp("logs")) self.target.save_logs(log_path) report = Report.from_path(log_path) @@ -259,10 +259,10 @@ def _dyn_close(): # pragma: no cover LOG.debug("already tracking %s", crash_hash) report.cleanup() report = None - elif self._runner.result == self._runner.IGNORED: + elif result.status == RunResult.IGNORED: self.status.ignored += 1 LOG.info("Result: Ignored (%d)", self.status.ignored) - elif self._runner.result == self._runner.ERROR: + elif result.status == RunResult.ERROR: LOG.error("ERROR: Replay malfunction, test case was not served") break diff --git a/grizzly/session.py b/grizzly/session.py index 1087a9b8..362a4f10 100644 --- a/grizzly/session.py +++ b/grizzly/session.py @@ -9,7 +9,7 @@ from tempfile import mkdtemp from time import sleep, time -from .common import grz_tmp, Report, Runner, Status, TestFile +from .common import grz_tmp, Report, Runner, RunResult, Status, TestFile from .target import TargetLaunchError @@ -172,31 +172,31 @@ def _dyn_close(): # pragma: no cover self.display_status(log_limiter=log_limiter) # run test case - runner.run(ignore, self.iomanager.server_map, current_test, coverage=self.coverage) + result = runner.run(ignore, self.iomanager.server_map, current_test, coverage=self.coverage) # adapter callbacks - if runner.timeout: + if result.timeout: log.debug("calling self.adapter.on_timeout()") - self.adapter.on_timeout(current_test, runner.served) + self.adapter.on_timeout(current_test, result.served) else: log.debug("calling self.adapter.on_served()") - self.adapter.on_served(current_test, runner.served) + self.adapter.on_served(current_test, result.served) # update test case - if runner.result != runner.ERROR: - if not runner.served: + if result.status != RunResult.ERROR: + if not result.served: # this can happen if the target crashes between serving test cases log.info("Ignoring test case since nothing was served") self.iomanager.tests.pop().cleanup() elif self.adapter.IGNORE_UNSERVED: log.debug("removing unserved files from the test case") - current_test.purge_optional(runner.served) + current_test.purge_optional(result.served) # process results - if runner.result == runner.FAILED: + if result.status == RunResult.FAILED: log.debug("result detected") self.report_result() - elif runner.result == runner.IGNORED: + elif result.status == RunResult.IGNORED: self.status.ignored += 1 log.info("Ignored (%d)", self.status.ignored) - elif runner.result == runner.ERROR: + elif result.status == RunResult.ERROR: log.error("Test case was not served") if not current_test.contains(current_test.landing_page): log.warning("Test case is missing landing page") diff --git a/grizzly/test_session.py b/grizzly/test_session.py index 0ee4b227..8533895c 100644 --- a/grizzly/test_session.py +++ b/grizzly/test_session.py @@ -11,7 +11,7 @@ from pytest import raises from sapphire import Sapphire, ServerMap, SERVED_ALL, SERVED_NONE, SERVED_REQUEST, SERVED_TIMEOUT -from .common import Adapter, IOManager, Reporter, Status, TestCase +from .common import Adapter, IOManager, Reporter, RunResult, Status, TestCase from .session import LogOutputLimiter, Session, SessionError from .target import Target, TargetLaunchError @@ -113,14 +113,12 @@ def generate(self, testcase, server_map): def test_session_05(tmp_path, mocker): """test basic Session functions""" Status.PATH = str(tmp_path) - fake_adapter = mocker.Mock(spec=Adapter) + fake_adapter = mocker.Mock(spec=Adapter, remaining=None) fake_adapter.TEST_DURATION = 10 - fake_adapter.remaining = None fake_testcase = mocker.Mock(spec=TestCase, landing_page="page.htm", optional=[]) fake_iomgr = mocker.Mock(spec=IOManager) - fake_iomgr.server_map = ServerMap() + fake_iomgr = mocker.Mock(spec=IOManager, harness=None, server_map=ServerMap()) fake_iomgr.create_testcase.return_value = fake_testcase - fake_iomgr.harness = None fake_iomgr.tests = mocker.Mock(spec=deque) fake_serv = mocker.Mock(spec=Sapphire, port=0x1337) # return SERVED_TIMEOUT to test IGNORE_UNSERVED code path @@ -148,8 +146,7 @@ def test_session_06(tmp_path, mocker): mocker.patch("grizzly.session.TestFile", autospec=True) fake_adapter = mocker.Mock(spec=Adapter) fake_adapter.NAME = "fake_adapter" - fake_iomgr = mocker.Mock(spec=IOManager) - fake_iomgr.server_map = ServerMap() + fake_iomgr = mocker.Mock(spec=IOManager, server_map=ServerMap()) fake_iomgr.create_testcase.return_value = mocker.Mock(spec=TestCase) fake_target = mocker.Mock(spec=Target, prefs="fake") with Session(fake_adapter, fake_iomgr, None, None, fake_target) as session: @@ -166,25 +163,20 @@ def test_session_07(tmp_path, mocker): Status.PATH = str(tmp_path) mocker.patch("grizzly.session.Report", autospec=True) fake_runner = mocker.patch("grizzly.session.Runner", autospec=True) + fake_runner.return_value.run.return_value = RunResult(["/fake/file"], status=RunResult.FAILED) mocker.patch("grizzly.session.TestFile", autospec=True) - fake_adapter = mocker.Mock(spec=Adapter) + fake_adapter = mocker.Mock(spec=Adapter, remaining=None) fake_adapter.IGNORE_UNSERVED = True fake_adapter.TEST_DURATION = 10 - fake_adapter.remaining = None - fake_iomgr = mocker.Mock(spec=IOManager) + fake_iomgr = mocker.Mock(spec=IOManager, harness=None, server_map=ServerMap(), tests=deque()) fake_iomgr.create_testcase.return_value = mocker.Mock(spec=TestCase) - fake_iomgr.harness = None - fake_iomgr.server_map = ServerMap() - fake_iomgr.tests = deque() fake_reporter = mocker.Mock(spec=Reporter) fake_serv = mocker.Mock(spec=Sapphire, port=0x1337) fake_target = mocker.Mock(spec=Target, prefs="prefs.js") fake_target.monitor.launches = 1 with Session(fake_adapter, fake_iomgr, fake_reporter, fake_serv, fake_target) as session: - fake_runner.return_value.result = fake_runner.return_value.FAILED - fake_runner.return_value.served = ["/fake/file"] - fake_runner.return_value.timeout = False session.run([], iteration_limit=1) + assert fake_runner.return_value.run.call_count == 1 assert fake_adapter.on_served.call_count == 1 assert fake_adapter.on_timeout.call_count == 0 assert fake_iomgr.purge_tests.call_count == 1 @@ -198,15 +190,13 @@ def test_session_08(tmp_path, mocker): """test Session.run() ignoring failures""" Status.PATH = str(tmp_path) fake_runner = mocker.patch("grizzly.session.Runner", autospec=True) + fake_runner.return_value.run.return_value = RunResult([], status=RunResult.IGNORED) mocker.patch("grizzly.session.TestFile", autospec=True) - fake_adapter = mocker.Mock(spec=Adapter) + fake_adapter = mocker.Mock(spec=Adapter, remaining=None) fake_adapter.IGNORE_UNSERVED = True fake_adapter.TEST_DURATION = 10 - fake_adapter.remaining = None - fake_iomgr = mocker.Mock(spec=IOManager) + fake_iomgr = mocker.Mock(spec=IOManager, harness=None, server_map=ServerMap()) fake_iomgr.create_testcase.return_value = mocker.Mock(spec=TestCase) - fake_iomgr.harness = None - fake_iomgr.server_map = ServerMap() fake_iomgr.tests = mocker.Mock(spec=deque) fake_iomgr.tests.pop.return_value = mocker.Mock(spec=TestCase) fake_serv = mocker.Mock(spec=Sapphire, port=0x1337) @@ -215,10 +205,8 @@ def test_session_08(tmp_path, mocker): # ignored results should not be reported so raise AssertionError if report_result is called mocker.patch.object(Session, 'report_result', side_effect=AssertionError) with Session(fake_adapter, fake_iomgr, None, fake_serv, fake_target) as session: - fake_runner.return_value.result = fake_runner.return_value.IGNORED - fake_runner.return_value.served = [] - fake_runner.return_value.timeout = False session.run([], iteration_limit=1) + assert fake_runner.return_value.run.call_count == 1 assert fake_adapter.on_served.call_count == 1 assert fake_adapter.on_timeout.call_count == 0 assert fake_iomgr.purge_tests.call_count == 1 @@ -236,11 +224,12 @@ def test_session_09(tmp_path, mocker): fake_runner.return_value.launch.side_effect = TargetLaunchError mocker.patch("grizzly.session.TestFile", autospec=True) fake_adapter = mocker.Mock(spec=Adapter) - fake_iomgr = mocker.Mock(spec=IOManager) - fake_iomgr.harness = None - fake_iomgr.input_files = [] - fake_iomgr.server_map = ServerMap() - fake_iomgr.tests = deque() + fake_iomgr = mocker.Mock( + spec=IOManager, + harness=None, + input_files=[], + server_map=ServerMap(), + tests=deque()) fake_reporter = mocker.Mock(spec=Reporter) fake_serv = mocker.Mock(spec=Sapphire, port=0x1337) fake_target = mocker.Mock(spec=Target) From 4842b5737e05baea0f1bd86a7c45fde8b049fa05 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 17 Sep 2020 13:22:10 -0700 Subject: [PATCH 014/531] Cleanup Report and Reporter --- grizzly/common/reporter.py | 344 ++++++++++++++++++-------------- grizzly/common/test_reporter.py | 307 +++++++++++++++------------- grizzly/main.py | 4 +- grizzly/reduce/reduce.py | 70 +++---- grizzly/reduce/test_common.py | 45 ++++- grizzly/replay/replay.py | 33 ++- grizzly/replay/test_replay.py | 34 ++-- grizzly/session.py | 7 +- grizzly/test_session.py | 39 ++-- 9 files changed, 484 insertions(+), 399 deletions(-) diff --git a/grizzly/common/reporter.py b/grizzly/common/reporter.py index 14060dc6..48d2892c 100644 --- a/grizzly/common/reporter.py +++ b/grizzly/common/reporter.py @@ -4,6 +4,7 @@ # file, You can obtain one at http://mozilla.org/MPL/2.0/. from abc import ABCMeta, abstractmethod +from collections import namedtuple from hashlib import sha1 from json import dump from logging import getLogger, WARNING @@ -43,7 +44,10 @@ __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] -log = getLogger("grizzly") # pylint: disable=invalid-name +LOG = getLogger("grizzly") + +# NOTE: order matters, aux -> stderr -> stdout +LogMap = namedtuple("LogMap", "aux stderr stdout") class Report(object): @@ -51,89 +55,95 @@ class Report(object): DEFAULT_MINOR = "0" MAX_LOG_SIZE = 1048576 # 1MB - __slots__ = ("_crash_info", "log_aux", "log_err", "log_out", "path", "prefix", "stack") + __slots__ = ("_crash_info", "_logs", "_signature", "_target_binary", "path", "prefix", "stack") - def __init__(self, log_path, log_map, size_limit=MAX_LOG_SIZE): + def __init__(self, log_path, target_binary, size_limit=MAX_LOG_SIZE): + assert isinstance(log_path, str) and isdir(log_path) + assert isinstance(target_binary, str) self._crash_info = None - self.log_aux = log_map.get("aux") if log_map is not None else None - self.log_err = log_map.get("stderr") if log_map is not None else None - self.log_out = log_map.get("stdout") if log_map is not None else None + self._logs = self.select_logs(log_path) + assert self._logs is not None + self._signature = None + self._target_binary = target_binary self.path = log_path - - # tail logs if needed + # tail files in log_path if needed if size_limit < 1: - log.warning("No limit set on report log size!") - elif isdir(log_path): + LOG.warning("No limit set on report log size!") + else: for fname in listdir(log_path): log_file_path = pathjoin(log_path, fname) if isfile(log_file_path): Report.tail(log_file_path, size_limit) - # look through logs one by one until we find a stack - # NOTE: order matters aux->stderr->stdout - for scan_log in (self.log_aux, self.log_err, self.log_out): - if scan_log is None: - continue - with open(pathjoin(log_path, scan_log), "rb") as log_fp: + for log_file in (x for x in self._logs if x is not None): + with open(log_file, "rb") as log_fp: stack = Stack.from_text(log_fp.read().decode("utf-8", errors="ignore")) if stack.frames: self.prefix = "%s_%s" % (stack.minor[:8], strftime("%Y-%m-%d_%H-%M-%S")) self.stack = stack break else: - self.stack = None self.prefix = "%s_%s" % (self.DEFAULT_MINOR, strftime("%Y-%m-%d_%H-%M-%S")) + self.stack = None def cleanup(self): - if isdir(self.path): + """Remove Report data from filesystem. + + Args: + None + + Returns: + None + """ + if self.path and isdir(self.path): rmtree(self.path) + self.path = None - @staticmethod - def crash_hash(crash_info): - """Create CrashInfo object from logs. + @property + def crash_hash(self): + """Create SHA1 hash from signature. Args: - crash_info (CrashInfo): Binary file being tested. + None Returns: str: Hash of the raw signature of the crash. """ - max_frames = Report.crash_signature_max_frames(crash_info, 5) - sig = Report.crash_signature(crash_info, max_frames) - return sha1(sig.rawSignature.encode("utf-8")).hexdigest()[:16] + return sha1(self.crash_signature.rawSignature.encode("utf-8")).hexdigest()[:16] - def crash_info(self, target_binary): + @property + def crash_info(self): """Create CrashInfo object from logs. Args: - target_binary (str): Binary file being tested. + None Returns: - CrashInfo: CrashInfo based on Result log data. + CrashInfo: CrashInfo based on log data. """ if self._crash_info is None: + assert self.path is not None # read in the log files and create a CrashInfo object - aux_data = None - if self.log_aux is not None: - with open(pathjoin(self.path, self.log_aux), "rb") as log_fp: + if self._logs.aux is not None: + with open(self._logs.aux, "rb") as log_fp: aux_data = log_fp.read().decode("utf-8", errors="ignore").splitlines() - stderr_file = pathjoin(self.path, self.log_err) - stdout_file = pathjoin(self.path, self.log_out) + else: + aux_data = None # create ProgramConfiguration that can be reported to a FM server - if isfile("%s.fuzzmanagerconf" % (target_binary,)): + if isfile("%s.fuzzmanagerconf" % (self._target_binary,)): # attempt to use ".fuzzmanagerconf" - fm_cfg = ProgramConfiguration.fromBinary(target_binary) + fm_cfg = ProgramConfiguration.fromBinary(self._target_binary) else: - log.debug("'%s.fuzzmanagerconf' does not exist", target_binary) + LOG.debug("'%s.fuzzmanagerconf' does not exist", self._target_binary) fm_cfg = None if fm_cfg is None: - log.debug("creating ProgramConfiguration") + LOG.debug("creating ProgramConfiguration") cpu = machine().lower() fm_cfg = ProgramConfiguration( - basename(target_binary), + basename(self._target_binary), "x86_64" if cpu == "amd64" else cpu, system()) - with open(stderr_file, "rb") as err_fp, open(stdout_file, "rb") as out_fp: + with open(self._logs.stderr, "rb") as err_fp, open(self._logs.stdout, "rb") as out_fp: self._crash_info = CrashInfo.fromRawCrashData( out_fp.read().decode("utf-8", errors="ignore").splitlines(), err_fp.read().decode("utf-8", errors="ignore").splitlines(), @@ -141,10 +151,20 @@ def crash_info(self, target_binary): auxCrashData=aux_data) return self._crash_info - @staticmethod - def crash_signature(crash_info, max_frames=5): - return crash_info.createCrashSignature( - maxFrames=Report.crash_signature_max_frames(crash_info, max_frames)) + @property + def crash_signature(self): + """Create CrashSignature object from CrashInfo. + + Args: + None + + Returns: + CrashSignature: CrashSignature based on log data. + """ + if self._signature is None: + self._signature = self.crash_info.createCrashSignature( + maxFrames=self.crash_signature_max_frames(self.crash_info)) + return self._signature @staticmethod def crash_signature_max_frames(crash_info, suggested_frames=8): @@ -156,48 +176,74 @@ def crash_signature_max_frames(crash_info, suggested_frames=8): suggested_frames += 6 return suggested_frames - @classmethod - def from_path(cls, path, size_limit=MAX_LOG_SIZE): - """Create Report from a directory containing logs. + @property + def major(self): + """The inclusive bucketing hash based on the stack trace + data found in logs. Args: - path (str): Directory containing log files. - size_limit (int): Maximum size in bytes of a log file. + None Returns: - Report: Result object based on log data. + str: major hash string. """ - return cls(path, Report.select_logs(path), size_limit=size_limit) - - @property - def major(self): if self.stack and self.stack.major is not None: return self.stack.major return self.DEFAULT_MAJOR @property def minor(self): + """The specific bucketing hash based on the stack trace + data found in logs. + + Args: + None + + Returns: + str: minor hash string. + """ if self.stack and self.stack.minor is not None: return self.stack.minor return self.DEFAULT_MINOR @property def preferred(self): - return self.log_aux if self.log_aux is not None else self.log_err + """Log file containing most relevant data. + + Args: + None + + Returns: + str: Name of log. + """ + return self._logs.aux or self._logs.stderr @staticmethod def select_logs(log_path): - if not isdir(log_path): - raise IOError("log_path does not exist %r" % log_path) - log_files = listdir(log_path) - if not log_files: - raise IOError("No logs found in %r" % log_path) - logs = {"aux": None, "stderr": None, "stdout": None} + """Scan log_path for file containing stderr, stdout and other (aux) + data and build a LogMap. + + Args: + log_path (str): Path to scan for log files. + + Returns: + LogMap: A LogMap pointing to files or None if log_path is empty. + """ + # scan path for files + to_scan = list() + for entry in listdir(log_path): + full_path = pathjoin(log_path, entry) + if isfile(full_path): + to_scan.append(full_path) + if not to_scan: + LOG.warning("No files found in %r", log_path) + return None # order by creation date because the oldest log is likely the cause of the issue - log_files.sort(key=lambda x: stat(pathjoin(log_path, x)).st_mtime) + to_scan.sort(key=lambda x: stat(x).st_mtime) # pattern to identify the ASan crash triggered when the parent process goes away + # TODO: this may no longer be required re_e10s_forced = re_compile(r""" ==\d+==ERROR:.+?SEGV\son.+?0x[0]+\s\(.+?T2\).+? #0\s+0x[0-9a-f]+\sin\s+mozilla::ipc::MessageChannel::OnChannelErrorFromLink @@ -210,12 +256,12 @@ def select_logs(log_path): "use-after-", "-buffer-overflow on", ": SEGV on ", "access-violation on ", "negative-size-param", "attempting free on ", "-param-overlap") + log_aux = None # look for sanitizer (ASan, UBSan, etc...) logs - for fname in (log_file for log_file in log_files if "asan" in log_file): + for fname in (x for x in to_scan if "asan" in x): # grab first chunk of log to help triage - with open(pathjoin(log_path, fname), "r") as log_fp: - log_data = log_fp.read(4096) - + with open(fname, "r") as log_fp: + log_data = log_fp.read(65536) # look for interesting crash info in the log if "==ERROR:" in log_data: # check for e10s forced crash @@ -223,58 +269,72 @@ def select_logs(log_path): continue # make sure there is something that looks like a stack frame in the log if "#0 " in log_data: - logs["aux"] = fname + log_aux = fname if any(x in log_data for x in interesting_sanitizer_tokens): break # this is the likely cause of the crash continue # probably the most interesting but lets keep looking - - # UBSan error (non-ASan builds) - if ": runtime error: " in log_data: - logs["aux"] = fname - - # catch all (choose the one with info for now) - if logs["aux"] is None and stat(pathjoin(log_path, fname)).st_size: - logs["aux"] = fname + if log_aux is None: + # UBSan error (non-ASan builds) + if ": runtime error: " in log_data: + log_aux = fname + # catch all (choose the one with info for now) + elif log_data: + log_aux = fname # look for Valgrind logs - if logs["aux"] is None: - for fname in (log_file for log_file in log_files if "valgrind" in log_file): - if stat(pathjoin(log_path, fname)).st_size: - logs["aux"] = fname + if log_aux is None: + for fname in (x for x in to_scan if "valgrind" in x): + if stat(fname).st_size: + log_aux = fname break # prefer ASan logs over minidump logs - if logs["aux"] is None: + if log_aux is None: re_dump_req = re_compile(r"\d+\|0\|.+?\|google_breakpad::ExceptionHandler::WriteMinidump") - for fname in (log_file for log_file in log_files if "minidump" in log_file): - with open(pathjoin(log_path, fname), "r") as log_fp: - log_data = log_fp.read(4096) + for fname in (x for x in to_scan if "minidump" in x): + with open(fname, "r") as log_fp: + log_data = log_fp.read(65536) # this will select log that contains "Crash|SIGSEGV|" or # the desired "DUMP_REQUESTED" log # TODO: review this it may be too strict # see https://searchfox.org/mozilla-central/source/accessible/ipc/DocAccessibleParent.cpp#452 if "Crash|DUMP_REQUESTED|" not in log_data or re_dump_req.search(log_data): - logs["aux"] = fname + log_aux = fname break # look for ffpuppet worker logs, worker logs should be used if nothing else is available - if logs["aux"] is None: - for fname in (log_file for log_file in log_files if "ffp_worker" in log_file): - if logs["aux"] is not None: + if log_aux is None: + for fname in (x for x in to_scan if "ffp_worker" in x): + if log_aux is not None: # we only expect one log here... - log.warning("aux log previously selected: %s, overwriting!", logs["aux"]) - logs["aux"] = fname + LOG.warning("aux log previously selected: %s, overwriting!", log_aux) + log_aux = fname - for fname in log_files: + # look for stderr and stdout log files + log_err = None + log_out = None + for fname in to_scan: if "stderr" in fname: - logs["stderr"] = fname + log_err = fname elif "stdout" in fname: - logs["stdout"] = fname + log_out = fname - return logs + result = LogMap(log_aux, log_err, log_out) + if not any(result): + LOG.warning("No logs found in %r", log_path) + return result @staticmethod def tail(in_file, size_limit): + """Tail the given file. This is destructive. + + Args: + in_file (str): Path to file to work with. + size_limit (int): Maximum size of file after tail operation. + + Returns: + None + """ assert size_limit > 0 if stat(in_file).st_size <= size_limit: return @@ -292,46 +352,36 @@ def tail(in_file, size_limit): class Reporter(metaclass=ABCMeta): @abstractmethod - def _process_report(self, report): + def _post_submit(self): pass @abstractmethod - def _reset(self): + def _pre_submit(self, report): pass @abstractmethod def _submit_report(self, report, test_cases): pass - def submit(self, test_cases, log_path=None, report=None): - """Submit report containing results. Either `log_path` or `report` must - be specified. + def submit(self, test_cases, report): + """Submit report containing results. Args: test_cases (iterable): A collection of testcases, ordered newest to oldest, the newest being the mostly likely to trigger the result (crash, assert... etc). - log_path (str): Path to logs from the Target. A Report will - be created from this. report (Report): Report to submit. Returns: None """ - if log_path is not None: - assert report is None, "Only 'log_path' or 'report' can be specified!" - if not isdir(log_path): - raise IOError("No such directory %r" % log_path) - report = Report.from_path(log_path) - elif report is not None: - assert isinstance(report, Report) - else: - raise AssertionError("Either 'log_path' or 'report' must be specified!") - self._process_report(report) + assert isinstance(report, Report) + assert report.path is not None + self._pre_submit(report) self._submit_report(report, test_cases) if report is not None: report.cleanup() - self._reset() + self._post_submit() class FilesystemReporter(Reporter): @@ -339,12 +389,12 @@ class FilesystemReporter(Reporter): def __init__(self, report_path=None, major_bucket=True): self.major_bucket = major_bucket - self.report_path = pathjoin(getcwd(), "results") if report_path is None else report_path + self.report_path = report_path or pathjoin(getcwd(), "results") - def _process_report(self, report): + def _pre_submit(self, report): pass - def _reset(self): + def _post_submit(self): pass def _submit_report(self, report, test_cases): @@ -364,7 +414,7 @@ def _submit_report(self, report, test_cases): # move logs into bucket directory log_path = pathjoin(dest_path, "%s_%s" % (report.prefix, "logs")) if isdir(log_path): - log.warning("Report log path exists %r", log_path) + LOG.warning("Report log path exists %r", log_path) move(report.path, log_path) # avoid filling the disk free_space = disk_usage(log_path).free @@ -388,19 +438,13 @@ class FuzzManagerReporter(Reporter): QUAL_REDUCER_ERROR = 9 # reducer error QUAL_NOT_REPRODUCIBLE = 10 # could not reproduce the testcase - def __init__(self, target_binary, tool=None): + def __init__(self, tool=None): self._extra_metadata = {} self.force_report = False self.quality = self.QUAL_UNREDUCED - self.target_binary = target_binary self.tool = tool # optional tool name - @staticmethod - def create_crash_info(report, target_binary): - # TODO: this is here to preserve the old way of operation (used by reducer) - return report.crash_info(target_binary) - - def _reset(self): + def _post_submit(self): self._extra_metadata = {} @staticmethod @@ -414,7 +458,7 @@ def sanity_check(bin_file): None """ if not isfile(FuzzManagerReporter.FM_CONFIG): - raise IOError("Missing: %s" % FuzzManagerReporter.FM_CONFIG) + raise IOError("Missing: %s" % (FuzzManagerReporter.FM_CONFIG,)) if not isfile("".join([bin_file, ".fuzzmanagerconf"])): raise IOError("Missing: %s.fuzzmanagerconf" % (bin_file,)) ProgramConfiguration.fromBinary(bin_file) @@ -426,14 +470,14 @@ def quality_name(cls, value): return name return "unknown quality (%r)" % (value,) - def _process_report(self, report): + def _pre_submit(self, report): self._process_rr_trace(report) def _process_rr_trace(self, report): # don't report large files to FuzzManager trace_path = pathjoin(report.path, "rr-traces") if isdir(trace_path): - log.info("Ignored rr trace") + LOG.info("Ignored rr trace") self._extra_metadata["rr-trace"] = "ignored" # remove traces so they are not uploaded to FM (because they are huge) # use S3FuzzManagerReporter instead @@ -443,38 +487,34 @@ def _process_rr_trace(self, report): def _ignored(report): # This is here to prevent reporting stack-less crashes # that were caused by system OOM or bogus other crashes - log_file = pathjoin(report.path, report.preferred) - with open(log_file, "rb") as log_fp: + with open(report.preferred, "rb") as log_fp: log_data = log_fp.read().decode("utf-8", errors="ignore") mem_errs = ( "ERROR: Failed to mmap", ": AddressSanitizer failed to allocate") + # ignore sanitizer OOMs missing stack for msg in mem_errs: if msg in log_data and "#0 " not in log_data: return True + # ignore Valgrind crashes if log_data.startswith("VEX temporary storage exhausted."): - # ignore Valgrind crashes return True return False def _submit_report(self, report, test_cases): - # prepare data for submission as CrashInfo - crash_info = report.crash_info(self.target_binary) - assert crash_info is not None - # search for a cached signature match and if the signature # is already in the cache and marked as frequent, don't bother submitting with InterProcessLock(pathjoin(grz_tmp(), "fm_sigcache.lock")): collector = Collector() - cache_sig_file, cache_metadata = collector.search(crash_info) + cache_sig_file, cache_metadata = collector.search(report.crash_info) if cache_metadata is not None: if cache_metadata["frequent"]: - log.info("Frequent crash matched existing signature: %s", + LOG.info("Frequent crash matched existing signature: %s", cache_metadata["shortDescription"]) if not self.force_report: return elif "bug__id" in cache_metadata: - log.info("Crash matched existing signature (bug %s): %s", + LOG.info("Crash matched existing signature (bug %s): %s", cache_metadata["bug__id"], cache_metadata["shortDescription"]) # we will still report this one, but no more @@ -484,17 +524,17 @@ def _submit_report(self, report, test_cases): else: # there is no signature, create one locally so we can count # the number of times we've seen it - max_frames = Report.crash_signature_max_frames(crash_info) - cache_sig_file = collector.generate(crash_info, numFrames=max_frames) + max_frames = report.crash_signature_max_frames(report.crash_info) + cache_sig_file = collector.generate(report.crash_info, numFrames=max_frames) cache_metadata = { "_grizzly_seen_count": 0, "frequent": False, - "shortDescription": crash_info.createShortSignature()} + "shortDescription": report.crash_info.createShortSignature()} if cache_sig_file is None: if self._ignored(report): - log.info("Report is unsupported and is in ignore list") + LOG.info("Report is unsupported and is in ignore list") return - log.warning("Report is unsupported by FM, saved to %r", report.path) + LOG.warning("Report is unsupported by FM, saved to %r", report.path) # TODO: we should check if stackhasher failed too raise RuntimeError("Failed to create FM signature") # limit the number of times we report per cycle @@ -514,17 +554,17 @@ def _submit_report(self, report, test_cases): if not isdir(dump_path): mkdir(dump_path) test_case.dump(dump_path, include_details=True) - crash_info.configuration.addMetadata({"grizzly_input": repr(test_case_meta)}) + report.crash_info.configuration.addMetadata({"grizzly_input": repr(test_case_meta)}) if test_cases: environ_string = " ".join("=".join(kv) for kv in test_cases[0].env_vars.items()) - crash_info.configuration.addMetadata({"recorded_envvars": environ_string}) + report.crash_info.configuration.addMetadata({"recorded_envvars": environ_string}) else: self.quality = self.QUAL_NO_TESTCASE - crash_info.configuration.addMetadata(self._extra_metadata) + report.crash_info.configuration.addMetadata(self._extra_metadata) - # grab screen log + # grab screen log (used in automation) if getenv("WINDOW") is not None: - screen_log = ".".join(["screenlog", getenv("WINDOW")]) + screen_log = pathjoin(getcwd(), ".".join(["screenlog", getenv("WINDOW")])) if isfile(screen_log): target_log = pathjoin(report.path, "screenlog.txt") copyfile(screen_log, target_log) @@ -547,10 +587,10 @@ def _submit_report(self, report, test_cases): # announce shortDescription if crash is not in a bucket if cache_metadata["_grizzly_seen_count"] == 1 and not cache_metadata["frequent"]: - log.info("Submitting new crash %r", cache_metadata["shortDescription"]) + LOG.info("Submitting new crash %r", cache_metadata["shortDescription"]) # submit results to the FuzzManager server - new_entry = collector.submit(crash_info, testCase=zip_name, testCaseQuality=self.quality) - log.info("Logged %d with quality %d", new_entry["id"], self.quality) + new_entry = collector.submit(report.crash_info, testCase=zip_name, testCaseQuality=self.quality) + LOG.info("Logged %d with quality %d", new_entry["id"], self.quality) # remove zipfile if isfile(zip_name): @@ -564,14 +604,14 @@ def compress_rr_trace(src, dest): latest_trace = realpath(pathjoin(src, "latest-trace")) assert isdir(latest_trace), "missing latest-trace directory" rr_arc = pathjoin(dest, "rr.tar.bz2") - log.debug("creating %r from %r", rr_arc, latest_trace) + LOG.debug("creating %r from %r", rr_arc, latest_trace) with tar_open(rr_arc, "w:bz2") as arc_fp: arc_fp.add(latest_trace, arcname=basename(latest_trace)) # remove path containing uncompressed traces rmtree(src) return rr_arc - def _process_report(self, report): + def _pre_submit(self, report): self._process_rr_trace(report) def _process_rr_trace(self, report): @@ -590,12 +630,12 @@ def _process_rr_trace(self, report): if exc.response["Error"]["Code"] == "404": # The object does not exist. pass - else: + else: # pragma: no cover # Something else has gone wrong. raise else: # The object already exists. - log.info("RR trace exists at %s", s3_url) + LOG.info("rr trace exists at %r", s3_url) self._extra_metadata["rr-trace"] = s3_url # remove traces so they are not reported to FM rmtree(trace_path) diff --git a/grizzly/common/test_reporter.py b/grizzly/common/test_reporter.py index ce844702..843a0694 100644 --- a/grizzly/common/test_reporter.py +++ b/grizzly/common/test_reporter.py @@ -18,27 +18,18 @@ from .storage import TestCase -def test_report_01(): - """test creating a simple Report""" - report = Report("no_dir", dict()) - assert report.path == "no_dir" - assert report.log_aux is None - assert report.log_err is None - assert report.log_out is None - assert report.stack is None - assert report.preferred is None - report.cleanup() - -def test_report_02(tmp_path): - """test from_path() with boring logs (no stack)""" +def test_report_01(tmp_path): + """test Report() with boring logs (no stack)""" + (tmp_path / "not_a_log.txt").touch() (tmp_path / "log_stderr.txt").write_bytes(b"STDERR log") (tmp_path / "log_stdout.txt").write_bytes(b"STDOUT log") - report = Report.from_path(str(tmp_path)) + report = Report(str(tmp_path), "a.bin", size_limit=0) + assert report._target_binary == "a.bin" assert report.path == str(tmp_path) - assert report.log_err.endswith("log_stderr.txt") - assert report.log_out.endswith("log_stdout.txt") + assert report._logs.aux is None + assert report._logs.stderr.endswith("log_stderr.txt") + assert report._logs.stdout.endswith("log_stdout.txt") assert report.preferred.endswith("log_stderr.txt") - assert report.log_aux is None assert report.stack is None assert Report.DEFAULT_MAJOR == report.major assert Report.DEFAULT_MINOR == report.minor @@ -46,18 +37,18 @@ def test_report_02(tmp_path): report.cleanup() assert not tmp_path.exists() -def test_report_03(tmp_path): - """test from_path()""" +def test_report_02(tmp_path): + """test Report() with crash logs""" (tmp_path / "log_stderr.txt").write_bytes(b"STDERR log") (tmp_path / "log_stdout.txt").write_bytes(b"STDOUT log") with (tmp_path / "log_asan_blah.txt").open("wb") as log_fp: log_fp.write(b" #0 0xbad000 in foo /file1.c:123:234\n") log_fp.write(b" #1 0x1337dd in bar /file2.c:1806:19") - report = Report.from_path(str(tmp_path)) + report = Report(str(tmp_path), "bin") assert report.path == str(tmp_path) - assert report.log_aux.endswith("log_asan_blah.txt") - assert report.log_err.endswith("log_stderr.txt") - assert report.log_out.endswith("log_stdout.txt") + assert report._logs.aux.endswith("log_asan_blah.txt") + assert report._logs.stderr.endswith("log_stderr.txt") + assert report._logs.stdout.endswith("log_stdout.txt") assert report.preferred.endswith("log_asan_blah.txt") assert report.stack is not None assert Report.DEFAULT_MAJOR != report.major @@ -65,7 +56,7 @@ def test_report_03(tmp_path): assert report.prefix is not None report.cleanup() -def test_report_04(tmp_path): +def test_report_03(tmp_path): """test Report.tail()""" tmp_file = tmp_path / "file.txt" tmp_file.write_bytes(b"blah\ntest\n123\xEF\x00FOO") @@ -79,10 +70,15 @@ def test_report_04(tmp_path): assert log_data.startswith(b"[LOG TAILED]\n") assert log_data[13:] == b"FOO" +def test_report_04(tmp_path): + """test Report.select_logs() uninteresting data""" + # test with empty path + assert Report.select_logs(str(tmp_path)) is None + (tmp_path / "not_a_log.txt").touch() + assert not any(Report.select_logs(str(tmp_path))) + def test_report_05(tmp_path): """test Report.select_logs()""" - with pytest.raises(IOError, match="log_path does not exist"): - Report.select_logs("missing_path") # small log with nothing interesting with (tmp_path / "log_asan.txt.1").open("wb") as log_fp: log_fp.write(b"SHORT LOG\n") @@ -109,9 +105,9 @@ def test_report_05(tmp_path): # should be ignored in favor of "GOOD LOG" (tmp_path / "log_ffp_worker_blah.txt").write_bytes(b"worker log") log_map = Report.select_logs(str(tmp_path)) - assert "GOOD LOG" in (tmp_path / log_map["aux"]).read_text() - assert "STDERR" in (tmp_path / log_map["stderr"]).read_text() - assert "STDOUT" in (tmp_path / log_map["stdout"]).read_text() + assert "GOOD LOG" in (tmp_path / log_map.aux).read_text() + assert "STDERR" in (tmp_path / log_map.stderr).read_text() + assert "STDOUT" in (tmp_path / log_map.stdout).read_text() def test_report_06(tmp_path): """test minidump with Report.select_logs()""" @@ -123,9 +119,9 @@ def test_report_06(tmp_path): log_fp.write(b"minidump log\n") (tmp_path / "log_ffp_worker_blah.txt").write_bytes(b"worker log") log_map = Report.select_logs(str(tmp_path)) - assert (tmp_path / log_map["stderr"]).is_file() - assert (tmp_path / log_map["stdout"]).is_file() - assert "minidump log" in (tmp_path / log_map["aux"]).read_text() + assert (tmp_path / log_map.stderr).is_file() + assert (tmp_path / log_map.stdout).is_file() + assert "minidump log" in (tmp_path / log_map.aux).read_text() def test_report_07(tmp_path): """test selecting preferred DUMP_REQUESTED minidump with Report.select_logs()""" @@ -147,19 +143,21 @@ def test_report_07(tmp_path): log_fp.write(b"0|0|bar.so|sadf|a.cc:1234|3066|0x0\n") log_fp.write(b"0|1|gar.so|fdsa|b.cc:4323|1644|0x12\n") log_map = Report.select_logs(str(tmp_path)) - assert (tmp_path / log_map["stderr"]).is_file() - assert (tmp_path / log_map["stdout"]).is_file() - assert "google_breakpad::ExceptionHandler::WriteMinidump" in (tmp_path / log_map["aux"]).read_text() + assert (tmp_path / log_map.stderr).is_file() + assert (tmp_path / log_map.stdout).is_file() + assert "google_breakpad::ExceptionHandler::WriteMinidump" in (tmp_path / log_map.aux).read_text() def test_report_08(tmp_path): """test selecting worker logs with Report.select_logs()""" (tmp_path / "log_stderr.txt").write_bytes(b"STDERR log") (tmp_path / "log_stdout.txt").write_bytes(b"STDOUT log") - (tmp_path / "log_ffp_worker_blah.txt").write_bytes(b"worker log") + (tmp_path / "log_ffp_worker_1.txt").write_bytes(b"worker log") + # we should only ever see one but if we see multiple we warn, so test that. + (tmp_path / "log_ffp_worker_2.txt").write_bytes(b"worker log") log_map = Report.select_logs(str(tmp_path)) - assert (tmp_path / log_map["stderr"]).is_file() - assert (tmp_path / log_map["stdout"]).is_file() - assert "worker log" in (tmp_path / log_map["aux"]).read_text() + assert (tmp_path / log_map.stderr).is_file() + assert (tmp_path / log_map.stdout).is_file() + assert "worker log" in (tmp_path / log_map.aux).read_text() def test_report_09(tmp_path): """test prioritizing *San logs with Report.select_logs()""" @@ -192,25 +190,25 @@ def test_report_09(tmp_path): log_fp.write(b"BAD LOG\n") log_fp.write(b"ERROR: Failed to mmap\n") # must be 2nd line log_map = Report.select_logs(str(tmp_path)) - assert "GOOD LOG" in (tmp_path / log_map["aux"]).read_text() + assert "GOOD LOG" in (tmp_path / log_map.aux).read_text() def test_report_10(tmp_path): - """test Report size_limit""" + """test Report() size_limit""" (tmp_path / "log_stderr.txt").write_bytes(b"STDERR log\n" * 200) (tmp_path / "log_stdout.txt").write_bytes(b"STDOUT log\n" * 200) (tmp_path / "unrelated.txt").write_bytes(b"nothing burger\n" * 200) (tmp_path / "rr-trace").mkdir() size_limit = len("STDERR log\n") - report = Report.from_path(str(tmp_path), size_limit=size_limit) + report = Report(str(tmp_path), "bin", size_limit=size_limit) assert report.path == str(tmp_path) - assert report.log_err.endswith("log_stderr.txt") - assert report.log_out.endswith("log_stdout.txt") + assert report._logs.aux is None + assert report._logs.stderr.endswith("log_stderr.txt") + assert report._logs.stdout.endswith("log_stdout.txt") assert report.preferred.endswith("log_stderr.txt") - assert report.log_aux is None assert report.stack is None size_limit += len("[LOG TAILED]\n") - assert os.stat(os.path.join(report.path, report.log_err)).st_size == size_limit - assert os.stat(os.path.join(report.path, report.log_out)).st_size == size_limit + assert os.stat(os.path.join(report.path, report._logs.stderr)).st_size == size_limit + assert os.stat(os.path.join(report.path, report._logs.stdout)).st_size == size_limit assert os.stat(os.path.join(report.path, "unrelated.txt")).st_size == size_limit report.cleanup() assert not tmp_path.is_dir() @@ -221,21 +219,21 @@ def test_report_11(tmp_path): (tmp_path / "log_stdout.txt").write_bytes(b"STDOUT log") (tmp_path / "log_valgrind.txt").write_bytes(b"valgrind log") log_map = Report.select_logs(str(tmp_path)) - assert (tmp_path / log_map["stderr"]).is_file() - assert (tmp_path / log_map["stdout"]).is_file() - assert "valgrind log" in (tmp_path / log_map["aux"]).read_text() + assert (tmp_path / log_map.stderr).is_file() + assert (tmp_path / log_map.stdout).is_file() + assert "valgrind log" in (tmp_path / log_map.aux).read_text() def test_report_12(tmp_path): - """test Report.crash_info()""" + """test Report.crash_info""" (tmp_path / "log_stderr.txt").write_bytes(b"STDERR log") (tmp_path / "log_stdout.txt").write_bytes(b"STDOUT log") with (tmp_path / "log_asan_blah.txt").open("wb") as log_fp: log_fp.write(b" #0 0xbad000 in foo /file1.c:123:234\n") log_fp.write(b" #1 0x1337dd in bar /file2.c:1806:19") # no binary.fuzzmanagerconf - report = Report.from_path(str(tmp_path)) + report = Report(str(tmp_path), target_binary="fake_bin") assert report._crash_info is None - assert report.crash_info("fake_bin") is not None + assert report.crash_info is not None assert report._crash_info is not None # with binary.fuzzmanagerconf with (tmp_path / "fake_bin.fuzzmanagerconf").open("wb") as conf: @@ -243,13 +241,13 @@ def test_report_12(tmp_path): conf.write(b"platform = x86-64\n") conf.write(b"product = mozilla-central\n") conf.write(b"os = linux\n") - report = Report.from_path(str(tmp_path)) + report = Report(str(tmp_path), target_binary=str(tmp_path / "fake_bin")) assert report._crash_info is None - assert report.crash_info(str(tmp_path / "fake_bin")) is not None + assert report.crash_info is not None assert report._crash_info is not None def test_report_13(mocker, tmp_path): - """test Report.crash_signature() and Report.crash_hash()""" + """test Report.crash_signature and Report.crash_hash""" mocker.patch("grizzly.common.reporter.ProgramConfiguration", autospec=True) (tmp_path / "log_stderr.txt").write_bytes(b"STDERR log") (tmp_path / "log_stdout.txt").write_bytes(b"STDOUT log") @@ -257,14 +255,11 @@ def test_report_13(mocker, tmp_path): log_fp.write(b"==1==ERROR: AddressSanitizer: SEGV on unknown address 0x0 (pc 0x0 bp 0x0 sp 0x0 T0)\n") log_fp.write(b" #0 0xbad000 in foo /file1.c:123:234\n") log_fp.write(b" #1 0x1337dd in bar /file2.c:1806:19") - report = Report.from_path(str(tmp_path)) - assert report._crash_info is None - info = report.crash_info("fake_bin") - sig = Report.crash_signature(info) - assert sig.symptoms - short_sig = info.createShortSignature() - assert short_sig == "[@ foo]" - assert Report.crash_hash(info) + report = Report(str(tmp_path), "bin") + assert report._signature is None + assert report.crash_signature + assert report.crash_info.createShortSignature() == "[@ foo]" + assert report.crash_hash def test_report_14(mocker): """test Report.crash_signature_max_frames()""" @@ -274,25 +269,16 @@ def test_report_14(mocker): info.backtrace = ("std::panicking::rust_panic", "std::panicking::rust_panic_with_hook") assert Report.crash_signature_max_frames(info) == 14 -def test_reporter_01(mocker, tmp_path): +def test_reporter_01(mocker): """test creating a simple Reporter""" class SimpleReporter(Reporter): - def _process_report(self, report): + def _pre_submit(self, report): pass - def _reset(self): + def _post_submit(self): pass def _submit_report(self, report, test_cases): pass reporter = SimpleReporter() - with pytest.raises(AssertionError, match="Either 'log_path' or 'report' must be specified!"): - reporter.submit([]) - with pytest.raises(IOError, match="No such directory 'fake_dir'"): - reporter.submit([], log_path="fake_dir") - with pytest.raises(IOError, match="No logs found in"): - reporter.submit([], log_path=str(tmp_path)) - with pytest.raises(AssertionError, match="Only 'log_path' or 'report' can be specified!"): - reporter.submit([], log_path=str(tmp_path), report=mocker.Mock()) - # submit a report reporter.submit([], report=mocker.Mock(spec=Report)) def test_filesystem_reporter_01(tmp_path): @@ -307,13 +293,13 @@ def test_filesystem_reporter_01(tmp_path): report_path = tmp_path / "reports" report_path.mkdir() reporter = FilesystemReporter(report_path=str(report_path)) - reporter.submit([], log_path=str(log_path)) - buckets = [x for x in report_path.iterdir()] + reporter.submit([], Report(str(log_path), "fake_bin")) + buckets = tuple(report_path.iterdir()) # check major bucket assert len(buckets) == 1 assert buckets[0].is_dir() # check log path exists - log_dirs = [x for x in buckets[0].iterdir()] + log_dirs = tuple(buckets[0].iterdir()) assert len(log_dirs) == 1 assert log_dirs[0].is_dir() assert "_logs" in str(log_dirs[0]) @@ -333,7 +319,7 @@ def test_filesystem_reporter_02(tmp_path, mocker): report_path = tmp_path / "reports" assert not report_path.exists() reporter = FilesystemReporter(report_path=str(report_path)) - reporter.submit(testcases, log_path=str(log_path)) + reporter.submit(testcases, Report(str(log_path), "fake_bin")) assert not log_path.exists() assert report_path.exists() assert len(tuple(report_path.glob("*"))) == 1 @@ -346,7 +332,7 @@ def test_filesystem_reporter_02(tmp_path, mocker): testcases = list() for _ in range(2): testcases.append(mocker.Mock(spec=TestCase)) - reporter.submit(testcases, log_path=str(log_path)) + reporter.submit(testcases, Report(str(log_path), "fake_bin")) for tstc in testcases: assert tstc.dump.call_count == 1 assert len(tuple(report_path.glob("*"))) == 2 @@ -363,22 +349,23 @@ def test_filesystem_reporter_03(tmp_path): reporter = FilesystemReporter(report_path=str(report_path)) reporter.DISK_SPACE_ABORT = 2 ** 50 with pytest.raises(RuntimeError) as exc: - reporter.submit([], log_path=str(log_path)) + reporter.submit([], Report(str(log_path), "fake_bin")) assert "Running low on disk space" in str(exc.value) def test_filesystem_reporter_04(mocker, tmp_path): """test FilesystemReporter w/o major bucket""" - report = mocker.Mock(spec=Report) report_path = (tmp_path / "report") report_path.mkdir() - report.path = str(report_path) - report.prefix = "0000_2020_01_01" + report = mocker.Mock( + spec=Report, + path=str(report_path), + prefix="0000_2020_01_01") reporter = FilesystemReporter(report_path=str(tmp_path), major_bucket=False) - reporter.submit([], report=report) + reporter.submit([], report) assert not report_path.is_dir() assert not report.major.call_count -def test_fuzzmanager_reporter_01(tmp_path, mocker): +def test_fuzzmanager_reporter_01(mocker, tmp_path): """test FuzzManagerReporter.sanity_check()""" fake_reporter = mocker.patch("grizzly.common.reporter.ProgramConfiguration") fake_reporter.fromBinary.return_value = mocker.Mock(spec=ProgramConfiguration) @@ -399,111 +386,161 @@ def test_fuzzmanager_reporter_01(tmp_path, mocker): FuzzManagerReporter.sanity_check(str(fake_bin)) assert fake_reporter.fromBinary.call_count == 1 -def test_fuzzmanager_reporter_02(tmp_path): - """test FuzzManagerReporter.submit() empty path""" - reporter = FuzzManagerReporter("fake_bin") - report_path = tmp_path / "report" - report_path.mkdir() - with pytest.raises(IOError) as exc: - reporter.submit([], log_path=str(report_path)) - assert "No logs found in" in str(exc.value) - -def test_fuzzmanager_reporter_03(tmp_path, mocker): +def test_fuzzmanager_reporter_02(mocker, tmp_path): """test FuzzManagerReporter.submit()""" + mocker.patch("grizzly.common.reporter.getcwd", autospec=True, return_value=str(tmp_path)) + mocker.patch("grizzly.common.reporter.getenv", autospec=True, return_value="0") fake_crashinfo = mocker.patch("grizzly.common.reporter.CrashInfo", autospec=True) fake_crashinfo.fromRawCrashData.return_value.createShortSignature.return_value = "test [@ test]" fake_collector = mocker.patch("grizzly.common.reporter.Collector", autospec=True) fake_collector.return_value.search.return_value = (None, None) - fake_collector.return_value.generate.return_value = str(tmp_path / "fake_sig_file") + fake_collector.return_value.generate.return_value = str(tmp_path / "fm_file.signature") log_path = tmp_path / "log_path" log_path.mkdir() (log_path / "log_ffp_worker_blah.txt").touch() (log_path / "log_stderr.txt").touch() (log_path / "log_stdout.txt").touch() - report = Report.from_path(str(log_path)) - fake_test = mocker.Mock(spec=TestCase) - fake_test.adapter_name = "adapter" - fake_test.input_fname = "input" - fake_test.env_vars = {"TEST": "1"} - reporter = FuzzManagerReporter(str("fake_bin")) - reporter.submit([fake_test], report=report) + (log_path / "rr-traces").mkdir() + (tmp_path / "screenlog.0").touch() + fake_test = mocker.Mock( + spec=TestCase, + adapter_name="adapter", + env_vars={"TEST": "1"}, + input_fname="input") + reporter = FuzzManagerReporter("fake_bin") + reporter.submit([fake_test], Report(str(log_path), "fake_bin")) assert not log_path.is_dir() assert fake_test.dump.call_count == 1 assert fake_collector.return_value.submit.call_count == 1 + meta_data = (tmp_path / "fm_file.metadata").read_text() + assert "\"frequent\": false" in meta_data + assert "\"_grizzly_seen_count\": 1" in meta_data + assert "\"shortDescription\": \"test [@ test]\"" in meta_data + +def test_fuzzmanager_reporter_03(mocker, tmp_path): + """test FuzzManagerReporter.submit() - no test / mark as frequent""" + mocker.patch("grizzly.common.reporter.getcwd", autospec=True, return_value=str(tmp_path)) + fake_crashinfo = mocker.patch("grizzly.common.reporter.CrashInfo", autospec=True) + fake_crashinfo.fromRawCrashData.return_value.createShortSignature.return_value = "test [@ test]" + fake_collector = mocker.patch("grizzly.common.reporter.Collector", autospec=True) + fake_collector.return_value.search.return_value = (None, None) + fake_collector.return_value.generate.return_value = str(tmp_path / "fm_file.signature") + log_path = tmp_path / "log_path" + log_path.mkdir() + (log_path / "log_stderr.txt").touch() + (log_path / "log_stdout.txt").touch() + reporter = FuzzManagerReporter("fake_bin") + reporter.MAX_REPORTS = 1 + reporter.submit([], Report(str(log_path), "fake_bin")) + assert fake_collector.return_value.submit.call_count == 1 + meta_data = (tmp_path / "fm_file.metadata").read_text() + assert "\"frequent\": true" in meta_data + assert "\"_grizzly_seen_count\": 1" in meta_data + assert "\"shortDescription\": \"test [@ test]\"" in meta_data -def test_fuzzmanager_reporter_04(tmp_path, mocker): +def test_fuzzmanager_reporter_04(mocker, tmp_path): """test FuzzManagerReporter.submit() hit frequent crash""" mocker.patch("grizzly.common.reporter.CrashInfo", autospec=True) fake_collector = mocker.patch("grizzly.common.reporter.Collector", autospec=True) - fake_collector.return_value.search.return_value = (None, {"frequent": True, "shortDescription": "[@ test]"}) - reporter = FuzzManagerReporter("fake_bin") + fake_collector.return_value.search.return_value = ( + None, + {"frequent": True, "shortDescription": "[@ test]"}) log_path = tmp_path / "log_path" log_path.mkdir() (log_path / "log_stderr.txt").touch() (log_path / "log_stdout.txt").touch() - reporter.submit([], log_path=str(log_path)) + reporter = FuzzManagerReporter("fake_bin") + reporter.submit([], Report(str(log_path), "fake_bin")) fake_collector.return_value.submit.assert_not_called() -def test_fuzzmanager_reporter_05(tmp_path, mocker): +def test_fuzzmanager_reporter_05(mocker, tmp_path): """test FuzzManagerReporter.submit() hit existing crash""" mocker.patch("grizzly.common.reporter.CrashInfo", autospec=True) fake_collector = mocker.patch("grizzly.common.reporter.Collector", autospec=True) fake_collector.return_value.search.return_value = ( - None, {"bug__id":1, "frequent": False, "shortDescription": "[@ test]"}) - reporter = FuzzManagerReporter("fake_bin") + None, + {"bug__id":1, "frequent": False, "shortDescription": "[@ test]"}) log_path = tmp_path / "log_path" log_path.mkdir() (log_path / "log_stderr.txt").touch() (log_path / "log_stdout.txt").touch() + reporter = FuzzManagerReporter("fake_bin") reporter._ignored = lambda x: True - reporter.submit([], log_path=str(log_path)) + reporter.submit([], Report(str(log_path), "fake_bin")) fake_collector.return_value.submit.assert_not_called() -def test_fuzzmanager_reporter_06(tmp_path, mocker): +def test_fuzzmanager_reporter_06(mocker, tmp_path): """test FuzzManagerReporter.submit() no signature""" - mocker.patch("grizzly.common.reporter.CrashInfo", autospec=True) fake_collector = mocker.patch("grizzly.common.reporter.Collector", autospec=True) fake_collector.return_value.search.return_value = (None, None) fake_collector.return_value.generate.return_value = None - reporter = FuzzManagerReporter("fake_bin") log_path = tmp_path / "log_path" log_path.mkdir() (log_path / "log_stderr.txt").touch() (log_path / "log_stdout.txt").touch() + reporter = FuzzManagerReporter("fake_bin") with pytest.raises(RuntimeError) as exc: - reporter.submit([], log_path=str(log_path)) + reporter.submit([], Report(str(log_path), "fake_bin")) assert "Failed to create FM signature" in str(exc.value) - fake_collector.return_value.submit.assert_not_called() - # test ignore unsymbolized crash + +def test_fuzzmanager_reporter_07(mocker, tmp_path): + """test FuzzManagerReporter.submit() unsymbolized crash""" + fake_collector = mocker.patch("grizzly.common.reporter.Collector", autospec=True) + fake_collector.return_value.search.return_value = (None, None) + fake_collector.return_value.generate.return_value = None + log_path = tmp_path / "log_path" + log_path.mkdir() + (log_path / "log_stderr.txt").touch() + (log_path / "log_stdout.txt").touch() + reporter = FuzzManagerReporter("fake_bin") reporter._ignored = lambda x: True - reporter.submit([], log_path=str(log_path)) + reporter.submit([], Report(str(log_path), "fake_bin")) fake_collector.return_value.submit.assert_not_called() -def test_s3fuzzmanager_reporter_01(tmp_path, mocker): +def test_fuzzmanager_reporter_08(): + """test FuzzManagerReporter.quality_name()""" + assert FuzzManagerReporter.quality_name(0) == "QUAL_REDUCED_RESULT" + assert FuzzManagerReporter.quality_name(-1) == "unknown quality (-1)" + +def test_fuzzmanager_reporter_09(mocker, tmp_path): + """test FuzzManagerReporter._ignored()""" + log_file = (tmp_path / "test.log") + log_file.touch() + report = mocker.Mock(spec=Report, path=str(tmp_path), preferred=str(log_file)) + # not ignored + assert not FuzzManagerReporter._ignored(report) + # ignored - sanitizer OOM missing stack + log_file.write_bytes(b"ERROR: Failed to mmap") + assert FuzzManagerReporter._ignored(report) + # ignored - Valgrind OOM + log_file.write_bytes(b"VEX temporary storage exhausted.") + assert FuzzManagerReporter._ignored(report) + +def test_s3fuzzmanager_reporter_01(mocker, tmp_path): """test S3FuzzManagerReporter.sanity_check()""" mocker.patch("grizzly.common.reporter.FuzzManagerReporter", autospec=True) fake_bin = tmp_path / "bin" + # test GRZ_S3_BUCKET missing with pytest.raises(EnvironmentError) as exc: S3FuzzManagerReporter.sanity_check(str(fake_bin)) assert "'GRZ_S3_BUCKET' is not set in environment" in str(exc.value) + # test GRZ_S3_BUCKET set pytest.importorskip("boto3") - os.environ["GRZ_S3_BUCKET"] = "test" - try: - S3FuzzManagerReporter.sanity_check(str(fake_bin)) - finally: - os.environ.pop("GRZ_S3_BUCKET", None) + mocker.patch("grizzly.common.reporter.getenv", autospec=True, return_value="test") + S3FuzzManagerReporter.sanity_check(str(fake_bin)) -def test_s3fuzzmanager_reporter_02(tmp_path, mocker): - """test S3FuzzManagerReporter._process_report()""" +def test_s3fuzzmanager_reporter_02(mocker, tmp_path): + """test S3FuzzManagerReporter._pre_submit()""" pytest.importorskip("boto3") pytest.importorskip("botocore") + mocker.patch("grizzly.common.reporter.getenv", autospec=True, return_value="test") fake_resource = mocker.patch("grizzly.common.reporter.resource", autospec=True) fake_report = mocker.Mock(spec=Report) fake_report.path = "no-path" reporter = S3FuzzManagerReporter("fake_bin") # test will missing rr-trace - assert reporter._process_report(fake_report) is None + assert reporter._pre_submit(fake_report) is None assert not reporter._extra_metadata # test will exiting rr-trace @@ -511,11 +548,7 @@ def test_s3fuzzmanager_reporter_02(tmp_path, mocker): trace_dir.mkdir(parents=True) fake_report.minor = "1234abcd" fake_report.path = str(tmp_path) - os.environ["GRZ_S3_BUCKET"] = "test" - try: - reporter._process_report(fake_report) - finally: - os.environ.pop("GRZ_S3_BUCKET", None) + reporter._pre_submit(fake_report) assert not tuple(tmp_path.glob("*")) assert "rr-trace" in reporter._extra_metadata assert fake_report.minor in reporter._extra_metadata["rr-trace"] @@ -531,11 +564,7 @@ def __init__(self, message, response): self.response = response mocker.patch("grizzly.common.reporter.ClientError", new=FakeClientError) fake_resource.return_value.Object.side_effect = FakeClientError("test", {"Error": {"Code": "404"}}) - os.environ["GRZ_S3_BUCKET"] = "test" - try: - reporter._process_report(fake_report) - finally: - os.environ.pop("GRZ_S3_BUCKET", None) + reporter._pre_submit(fake_report) assert not tuple(tmp_path.glob("*")) assert "rr-trace" in reporter._extra_metadata assert fake_report.minor in reporter._extra_metadata["rr-trace"] diff --git a/grizzly/main.py b/grizzly/main.py index dc418ec6..08047aa4 100644 --- a/grizzly/main.py +++ b/grizzly/main.py @@ -94,10 +94,10 @@ def main(args): log.debug("initializing the Reporter") if args.fuzzmanager: log.info("Results will be reported via FuzzManager") - reporter = FuzzManagerReporter(args.binary, tool=args.tool) + reporter = FuzzManagerReporter(tool=args.tool) elif args.s3_fuzzmanager: log.info("Results will be reported via FuzzManager w/ large attachments in S3") - reporter = S3FuzzManagerReporter(args.binary, tool=args.tool) + reporter = S3FuzzManagerReporter(tool=args.tool) else: reporter = FilesystemReporter() log.info("Results will be stored in %r", reporter.report_path) diff --git a/grizzly/reduce/reduce.py b/grizzly/reduce/reduce.py index 9edc21ba..d187df41 100644 --- a/grizzly/reduce/reduce.py +++ b/grizzly/reduce/reduce.py @@ -6,7 +6,6 @@ Given a build and testcase, try to reproduce it using a set of strategies. """ from __future__ import absolute_import -import glob import hashlib import io import json @@ -249,7 +248,7 @@ class ReductionJob(object): __slots__ = [ '_any_crash', '_best_testcase', '_cache_iter_harness_created', '_env_mod', '_fixed_timeout', '_force_no_harness', '_idle_threshold', '_idle_timeout', '_ignore', - '_input_fname', '_interesting_prefix', '_iter_timeout', '_landing_page', '_log_handler', + '_input_fname', '_interesting_report', '_iter_timeout', '_landing_page', '_log_handler', '_min_crashes', '_no_harness', '_orig_sig', '_original_relaunch', '_other_crashes', '_reduce_file', '_repeat', '_reporter', '_result_cache', '_result_code', '_server', '_server_map', '_signature', '_skip', '_skip_analysis', '_skipped', '_status', '_target', '_tcroot', '_testcase', @@ -273,7 +272,7 @@ def __init__(self, ignore, target, iter_timeout, no_harness, any_crash, skip, mi self._idle_timeout = idle_timeout self._ignore = ignore # things to ignore self._input_fname = None - self._interesting_prefix = None + self._interesting_report = None self._iter_timeout = iter_timeout self._landing_page = None # the file to point the target at self._min_crashes = min_crashes @@ -448,16 +447,6 @@ def lithium_interesting(self, temp_prefix): result = self._result_cache[cache_key]['result'] if result: LOG.info("Interesting (cached)") - cached_prefix = self._result_cache[cache_key]['prefix'] - for filename in glob.glob(r"%s_*" % cached_prefix): - suffix = os.path.basename(filename).split("_", 1) - if os.path.isfile(filename): - shutil.copy(filename, "%s_%s" % (temp_prefix, suffix[1])) - elif os.path.isdir(filename): - shutil.copytree(filename, "%s_%s" % (temp_prefix, suffix[1])) - else: - raise RuntimeError("Cannot copy non-file/non-directory: %s" - % (filename,)) else: LOG.info("Uninteresting (cached)") return result @@ -487,16 +476,17 @@ def lithium_interesting(self, temp_prefix): self._status.report() self._status.iteration += 1 run_prefix = "%s(%d)" % (temp_prefix, try_num) - if self._run(testcase, run_prefix): + interesting_report = self._run(testcase, run_prefix) + if interesting_report: # track the maximum duration of the successful reduction attempts if testcase.duration > max_duration: max_duration = testcase.duration n_crashes += 1 if n_crashes >= self._min_crashes: - self.on_interesting_crash(run_prefix) + self.on_interesting_crash(interesting_report) if self._use_result_cache: self._result_cache[cache_key] = { - 'result': True, + 'result': interesting_report, 'prefix': run_prefix } self._best_testcase = testcase @@ -509,7 +499,7 @@ def lithium_interesting(self, temp_prefix): # No need to save the temp_prefix on uninteresting testcases # But let's do it anyway to stay consistent self._result_cache[cache_key] = { - 'result': False, + 'result': None, 'prefix': run_prefix } return False @@ -597,9 +587,9 @@ def _run(self, testcase, temp_prefix): temp_prefix (str): A unique prefix for any files written during this iteration. Returns: - bool: True if reduced testcase is still interesting. + Report: Report from reduced testcase if still interesting else None. """ - interesting = False + interesting = None # if target is closed and server is alive, we should restart it or else the first request # against /first_test will 404 @@ -669,23 +659,21 @@ def _dyn_resp_close(): # pragma: no cover os.mkdir(result_logs) self._target.save_logs(result_logs) - # create a CrashInfo - crash = FuzzManagerReporter.create_crash_info( - Report.from_path(result_logs), - self._target.binary) + # create report + report = Report(result_logs, self._target.binary) - short_sig = crash.createShortSignature() + short_sig = report.crash_info.createShortSignature() if short_sig == "No crash detected": # XXX: need to change this to support reducing timeouts? LOG.info("Uninteresting: no crash detected") - elif self._orig_sig is None or self._orig_sig.matches(crash): - interesting = True + elif self._orig_sig is None or self._orig_sig.matches(report.crash_info): + interesting = report LOG.info("Interesting: %s", short_sig) if self._orig_sig is None and not self._any_crash: - self._orig_sig = Report.crash_signature(crash) + self._orig_sig = report.crash_signature else: LOG.info("Uninteresting: different signature: %s", short_sig) - self.on_other_crash_found(testcase, temp_prefix) + self.on_other_crash_found(testcase, report) elif result.status == RunResult.IGNORED: LOG.info("Uninteresting: ignored") @@ -956,43 +944,39 @@ def close(self, keep_temp=False): self._target.cleanup() self._status.cleanup() - def _report_result(self, testcase, temp_prefix, quality_value, force=False): + def _report_result(self, testcase, report, quality_value, force=False): self._reporter.quality = quality_value self._reporter.force_report = force - self._reporter.submit([testcase], log_path=temp_prefix + "_logs") + self._reporter.submit([testcase], report) - def on_interesting_crash(self, temp_prefix): + def on_interesting_crash(self, report): # called for any interesting crash - self._interesting_prefix = temp_prefix + self._interesting_report = report def on_result(self, result_code): pass - def on_other_crash_found(self, testcase, temp_prefix): + def on_other_crash_found(self, testcase, report): """ If we hit an alternate crash, store the testcase in a tmp folder. If the same crash is encountered again, only keep the newest one. """ - crash_info = FuzzManagerReporter.create_crash_info( - Report.from_path(temp_prefix + "_logs"), - self._target.binary) - crash_hash = Report.crash_hash(crash_info) - short_sig = crash_info.createShortSignature() - if crash_hash in self._other_crashes: + short_sig = report.crash_info.createShortSignature() + if report.crash_hash in self._other_crashes: LOG.info("Found alternate crash (newer): %s", short_sig) # already counted when initially found self._status.ignored += 1 else: LOG.info("Found alternate crash: %s", short_sig) self._status.count_result(short_sig) - self._other_crashes[crash_hash] = {"tc": testcase, "prefix": temp_prefix} + self._other_crashes[report.crash_hash] = {"tc": testcase, "report": report} def _report_other_crashes(self): """ After reduce is finished, report any alternate results (if they don't match the collector cache). """ for entry in self._other_crashes.values(): - self._report_result(entry["tc"], entry["prefix"], FuzzManagerReporter.QUAL_UNREDUCED) + self._report_result(entry["tc"], entry["report"], FuzzManagerReporter.QUAL_UNREDUCED) def run(self, strategies=None): """Run reduction. @@ -1090,7 +1074,7 @@ def run(self, strategies=None): raise ReducerError("Reducer succeeded but nothing was reduced!") self._report_result(self._best_testcase, - self._interesting_prefix, + self._interesting_report, FuzzManagerReporter.QUAL_REDUCED_RESULT, force=True) @@ -1147,7 +1131,7 @@ def from_args(cls, args, target): LOG.debug("initializing the Reporter") if args.fuzzmanager: LOG.info("Reporting issues via FuzzManager") - job.set_reporter(FuzzManagerReporter(args.binary, tool=args.tool)) + job.set_reporter(FuzzManagerReporter(tool=args.tool)) else: reporter = FilesystemReporter() job.set_reporter(reporter) diff --git a/grizzly/reduce/test_common.py b/grizzly/reduce/test_common.py index 87459144..7a24a239 100644 --- a/grizzly/reduce/test_common.py +++ b/grizzly/reduce/test_common.py @@ -2,18 +2,19 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. +# pylint: disable=protected-access from __future__ import unicode_literals import os from grizzly.target.target import Target -from grizzly.common import Reporter +from grizzly.common import Report, Reporter from grizzly.reduce import crash from grizzly.reduce.reduce import ReductionJob class BaseFakeReporter(Reporter): - def _process_report(self, _): + def _post_submit(self): pass - def _reset(self): + def _pre_submit(self, _): pass def _submit_report(self, *_args, **_kwds): @@ -98,6 +99,11 @@ def create_target_binary(target, tmp_path): ) target.binary = str(tmp_path / "firefox") +def write_fail_log(path): + with open(os.path.join(path, "log_stderr.txt"), "wb") as log_fp: + log_fp.write(b"==1==ERROR: AddressSanitizer: SEGV on unknown address 0x0 (pc 0x0 bp 0x0 sp 0x0 T0)\n") + log_fp.write(b" #0 0xbad000 in foo /file1.c:123:234\n") + log_fp.write(b" #1 0x1337dd in bar /file2.c:1806:19") class TestReductionJob(ReductionJob): """Stub to fake parts of grizzly.reduce.ReductionJob needed for testing the reduce loop""" @@ -122,7 +128,13 @@ def _run(self, testcase, temp_prefix): self.target.save_logs(result_logs) testcase.duration = 0.1 with open(self.reduce_file) as fp: - return "required" in fp.read() + if "required" in fp.read(): + write_fail_log(result_logs) + report = Report(result_logs, self.target.binary) + else: + report = None + return report + def lithium_cleanup(self): pass @@ -155,11 +167,14 @@ def _run(self, testcase, temp_prefix): testcase.duration = 0.1 with open(self.reduce_file) as fp: if "required" in fp.read(): - self.on_other_crash_found(testcase, temp_prefix) + write_fail_log(result_logs) + report = Report(result_logs, self.target.binary) + self.on_other_crash_found(testcase, report) + else: + report = None if self.__first_run: self.__first_run = False - return True - return False + return report class TestReductionJobKeepHarness(TestReductionJob): @@ -179,10 +194,15 @@ def _run(self, testcase, temp_prefix): testcase.duration = 0.1 if self.__init_data is not None: with open(self.reduce_file) as fp: - return self.__init_data == fp.read() + if self.__init_data == fp.read(): + write_fail_log(result_logs) + return Report(result_logs, self.target.binary) else: with open(self.reduce_file) as fp: - return "required" in fp.read() + if "required" in fp.read(): + write_fail_log(result_logs) + return Report(result_logs, self.target.binary) + return None class TestReductionJobSemiReliable(TestReductionJob): @@ -206,6 +226,9 @@ def _run(self, testcase, temp_prefix): self.target.save_logs(result_logs) testcase.duration = 0.1 if self.__require_no_harness and not self._no_harness: - return False + return None self.__interesting_count += 1 - return self.__interesting_count <= self.__interesting_times + if self.__interesting_count <= self.__interesting_times: + write_fail_log(result_logs) + return Report(result_logs, self.target.binary) + return None diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 01c09d44..6d7bc3b9 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -141,6 +141,7 @@ def run(self, testcases, repeat=1, min_results=1): assert repeat > 0 assert min_results > 0 assert min_results <= repeat + assert self.status is None self.status = Status.start() test_count = len(testcases) @@ -191,7 +192,7 @@ def _dyn_close(): # pragma: no cover LOG.error("Target launch error. Check browser logs for details.") log_path = mkdtemp(prefix="logs_", dir=grz_tmp("logs")) self.target.save_logs(log_path) - self._reports_other["STARTUP"] = Report.from_path(log_path) + self._reports_other["STARTUP"] = Report(log_path, self.target.binary) raise self.target.step() LOG.info("Performing replay (%d/%d)...", self.status.iteration, repeat) @@ -222,41 +223,39 @@ def _dyn_close(): # pragma: no cover if result.status == RunResult.FAILED: log_path = mkdtemp(prefix="logs_", dir=grz_tmp("logs")) self.target.save_logs(log_path) - report = Report.from_path(log_path) + report = Report(log_path, self.target.binary) # check signatures - crash_info = report.crash_info(self.target.binary) - short_sig = crash_info.createShortSignature() + short_sig = report.crash_info.createShortSignature() if not self._any_crash and self._signature is None and short_sig != "No crash detected": # signature has not been specified use the first one created - self._signature = report.crash_signature(crash_info) + self._signature = report.crash_signature if short_sig == "No crash detected": # TODO: verify report.major == "NO_STACK" otherwise FM failed to parse the logs # TODO: change this to support hangs/timeouts, etc LOG.info("Result: No crash detected") - crash_hash = None - elif self._any_crash or self._signature.matches(crash_info): + elif self._any_crash or self._signature.matches(report.crash_info): self.status.count_result(short_sig) LOG.info("Result: %s (%s:%s)", short_sig, report.major[:8], report.minor[:8]) - crash_hash = report.crash_hash(crash_info) - if crash_hash not in self._reports_expected: - LOG.debug("now tracking %s", crash_hash) - self._reports_expected[crash_hash] = report + if report.crash_hash not in self._reports_expected: + LOG.debug("now tracking %s", report.crash_hash) + self._reports_expected[report.crash_hash] = report report = None # don't remove report + else: + LOG.debug("already tracking %s", report.crash_hash) assert self._any_crash or len(self._reports_expected) == 1 else: LOG.info("Result: Different signature: %s (%s:%s)", short_sig, report.major[:8], report.minor[:8]) self.status.ignored += 1 - crash_hash = report.crash_hash(crash_info) - if crash_hash not in self._reports_other: - LOG.debug("now tracking %s", crash_hash) - self._reports_other[crash_hash] = report + if report.crash_hash not in self._reports_other: + LOG.debug("now tracking %s", report.crash_hash) + self._reports_other[report.crash_hash] = report report = None # don't remove report + else: + LOG.debug("already tracking %s", report.crash_hash) # purge untracked report if report is not None: - if crash_hash is not None: - LOG.debug("already tracking %s", crash_hash) report.cleanup() report = None elif result.status == RunResult.IGNORED: diff --git a/grizzly/replay/test_replay.py b/grizzly/replay/test_replay.py index b08d64f1..6b9d90ce 100644 --- a/grizzly/replay/test_replay.py +++ b/grizzly/replay/test_replay.py @@ -145,15 +145,15 @@ def test_replay_06(mocker): def test_replay_07(mocker, tmp_path): """test ReplayManager.run() - test signatures""" - report = mocker.patch("grizzly.replay.replay.Report", autospec=True) mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) report_0 = mocker.Mock(spec=Report) - report_0.crash_info.return_value.createShortSignature.return_value = "No crash detected" + report_0.crash_info.createShortSignature.return_value = "No crash detected" report_1 = mocker.Mock(spec=Report, major="0123abcd", minor="01239999") - report_1.crash_info.return_value.createShortSignature.return_value = "[@ test1]" + report_1.crash_info.createShortSignature.return_value = "[@ test1]" report_2 = mocker.Mock(spec=Report, major="0123abcd", minor="abcd9876") - report_2.crash_info.return_value.createShortSignature.return_value = "[@ test2]" - report.from_path.side_effect = (report_0, report_1, report_2) + report_2.crash_info.createShortSignature.return_value = "[@ test2]" + fake_report = mocker.patch("grizzly.replay.replay.Report", autospec=True) + fake_report.side_effect = (report_0, report_1, report_2) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) signature = mocker.Mock() @@ -165,7 +165,7 @@ def test_replay_07(mocker, tmp_path): with ReplayManager([], server, target, signature=signature, use_harness=False) as replay: assert not replay.run(testcases, repeat=3, min_results=2) assert replay._signature == signature - assert report.from_path.call_count == 3 + assert fake_report.call_count == 3 assert replay.status.iteration == 3 assert replay.status.results == 1 assert replay.status.ignored == 1 @@ -178,17 +178,17 @@ def test_replay_07(mocker, tmp_path): def test_replay_08(mocker, tmp_path): """test ReplayManager.run() - any crash""" - report = mocker.patch("grizzly.replay.replay.Report", autospec=True) mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) report_0 = mocker.Mock(spec=Report) - report_0.crash_info.return_value.createShortSignature.return_value = "No crash detected" + report_0.crash_info.createShortSignature.return_value = "No crash detected" report_1 = mocker.Mock(spec=Report, major="0123abcd", minor="01239999") - report_1.crash_info.return_value.createShortSignature.return_value = "[@ test1]" - report_1.crash_hash.return_value = "hash1" + report_1.crash_info.createShortSignature.return_value = "[@ test1]" + report_1.crash_hash = "hash1" report_2 = mocker.Mock(spec=Report, major="0123abcd", minor="abcd9876") - report_2.crash_info.return_value.createShortSignature.return_value = "[@ test2]" - report_2.crash_hash.return_value = "hash2" - report.from_path.side_effect = (report_0, report_1, report_2) + report_2.crash_info.createShortSignature.return_value = "[@ test2]" + report_2.crash_hash = "hash2" + fake_report = mocker.patch("grizzly.replay.replay.Report", autospec=True) + fake_report.side_effect = (report_0, report_1, report_2) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) target = mocker.Mock(spec=Target, binary="fake_bin") @@ -198,12 +198,10 @@ def test_replay_08(mocker, tmp_path): with ReplayManager([], server, target, any_crash=True, use_harness=False) as replay: assert replay.run(testcases, repeat=3, min_results=2) assert replay._signature is None - assert report.from_path.call_count == 3 + assert fake_report.call_count == 3 assert replay.status.iteration == 3 assert replay.status.results == 2 assert replay.status.ignored == 0 - assert report_1.crash_hash.call_count == 1 - assert report_2.crash_hash.call_count == 1 assert len(replay.reports) == 2 assert not replay.other_reports assert report_0.cleanup.call_count == 1 @@ -261,9 +259,9 @@ def test_replay_09(mocker, tmp_path): def test_replay_10(mocker, tmp_path): """test ReplayManager.run() - TargetLaunchError""" - report = mocker.patch("grizzly.replay.replay.Report", autospec=True) mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) - report.from_path.side_effect = (mocker.Mock(spec=Report),) + fake_report = mocker.patch("grizzly.replay.replay.Report", autospec=True) + fake_report.side_effect = (mocker.Mock(spec=Report),) server = mocker.Mock(spec=Sapphire, port=0x1337) target = mocker.Mock(spec=Target) target.launch.side_effect = TargetLaunchError diff --git a/grizzly/session.py b/grizzly/session.py index 362a4f10..aac7027d 100644 --- a/grizzly/session.py +++ b/grizzly/session.py @@ -109,13 +109,12 @@ def report_result(self): # create working directory for target logs result_logs = mkdtemp(prefix="logs_", dir=grz_tmp("logs")) self.target.save_logs(result_logs) - report = Report.from_path(result_logs) - crash_info = report.crash_info(self.target.binary) - short_sig = crash_info.createShortSignature() + report = Report(result_logs, self.target.binary) + short_sig = report.crash_info.createShortSignature() log.info("Result: %s (%s:%s)", short_sig, report.major[:8], report.minor[:8]) # order test cases newest to oldest self.iomanager.tests.reverse() - self.reporter.submit(self.iomanager.tests, report=report) + self.reporter.submit(self.iomanager.tests, report) if isdir(result_logs): rmtree(result_logs) self.status.count_result(short_sig) diff --git a/grizzly/test_session.py b/grizzly/test_session.py index 8533895c..fb07082e 100644 --- a/grizzly/test_session.py +++ b/grizzly/test_session.py @@ -11,11 +11,27 @@ from pytest import raises from sapphire import Sapphire, ServerMap, SERVED_ALL, SERVED_NONE, SERVED_REQUEST, SERVED_TIMEOUT -from .common import Adapter, IOManager, Reporter, RunResult, Status, TestCase +from .common import Adapter, IOManager, Report, Reporter, RunResult, Status, TestCase from .session import LogOutputLimiter, Session, SessionError from .target import Target, TargetLaunchError +class NullReporter(Reporter): + def __init__(self): + self.submit_calls = 0 + + def _post_submit(self): + pass + + def _pre_submit(self, report): + pass + + def _submit_report(self, report, test_cases): + assert isinstance(report, Report) + for test in test_cases: + assert isinstance(test, TestCase) + self.submit_calls += 1 + def test_session_01(tmp_path, mocker): """test Session with playback Adapter""" class PlaybackAdapter(Adapter): @@ -170,11 +186,11 @@ def test_session_07(tmp_path, mocker): fake_adapter.TEST_DURATION = 10 fake_iomgr = mocker.Mock(spec=IOManager, harness=None, server_map=ServerMap(), tests=deque()) fake_iomgr.create_testcase.return_value = mocker.Mock(spec=TestCase) - fake_reporter = mocker.Mock(spec=Reporter) fake_serv = mocker.Mock(spec=Sapphire, port=0x1337) fake_target = mocker.Mock(spec=Target, prefs="prefs.js") fake_target.monitor.launches = 1 - with Session(fake_adapter, fake_iomgr, fake_reporter, fake_serv, fake_target) as session: + reporter = NullReporter() + with Session(fake_adapter, fake_iomgr, reporter, fake_serv, fake_target) as session: session.run([], iteration_limit=1) assert fake_runner.return_value.run.call_count == 1 assert fake_adapter.on_served.call_count == 1 @@ -184,7 +200,7 @@ def test_session_07(tmp_path, mocker): assert session.status.iteration == 1 assert session.status.results == 1 assert session.status.ignored == 0 - assert fake_reporter.submit.call_count == 1 + assert reporter.submit_calls == 1 def test_session_08(tmp_path, mocker): """test Session.run() ignoring failures""" @@ -230,17 +246,17 @@ def test_session_09(tmp_path, mocker): input_files=[], server_map=ServerMap(), tests=deque()) - fake_reporter = mocker.Mock(spec=Reporter) fake_serv = mocker.Mock(spec=Sapphire, port=0x1337) fake_target = mocker.Mock(spec=Target) fake_target.monitor.launches = 1 - with Session(fake_adapter, fake_iomgr, fake_reporter, fake_serv, fake_target) as session: + reporter = NullReporter() + with Session(fake_adapter, fake_iomgr, reporter, fake_serv, fake_target) as session: with raises(TargetLaunchError, match=""): session.run([], iteration_limit=1) assert session.status.iteration == 1 assert session.status.results == 1 assert session.status.ignored == 0 - assert fake_reporter.submit.call_count == 1 + assert reporter.submit_calls == 1 def test_session_10(tmp_path, mocker): """test Session.report_result()""" @@ -253,19 +269,16 @@ def test_session_10(tmp_path, mocker): log_fp.write(b"SEGV on unknown address 0x0 (pc 0x0 bp 0x0 sp 0x0 T0)\n") log_fp.write(b" #0 0xbad000 in foo /file1.c:123:234\n") log_fp.write(b" #1 0x1337dd in bar /file2.c:1806:19\n") - fake_report = mocker.patch("grizzly.session.Report", autospec=True) mocker.patch("grizzly.session.mkdtemp", autospec=True, return_value=str(tmpd)) Status.PATH = str(tmp_path) fake_iomgr = mocker.Mock(spec=IOManager, tests=deque()) - fake_reporter = mocker.Mock(spec=Reporter) fake_target = mocker.Mock(spec=Target, binary="bin") - with Session(None, fake_iomgr, fake_reporter, None, fake_target) as session: + reporter = NullReporter() + with Session(None, fake_iomgr, reporter, None, fake_target) as session: session.report_result() assert fake_target.save_logs.call_count == 1 fake_target.save_logs.assert_called_with(str(tmpd)) - assert fake_report.from_path.return_value.crash_info.call_count == 1 - fake_report.from_path.return_value.crash_info.assert_called_with("bin") - assert fake_reporter.submit.call_count == 1 + assert reporter.submit_calls == 1 assert not tmpd.is_dir() def test_log_output_limiter_01(mocker): From e630f60044c23ec30b223a2358457b916f8138f8 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 22 Sep 2020 11:34:41 -0700 Subject: [PATCH 015/531] Add ReplayResult * Update Replay report handling * TargetLaunchError now include a Report * TargetLaunchError handling updates * Removed GRZ_BROWSER_LOGS * FilesystemReporter now requires report path * Added tests, fixes and nits --- grizzly/common/reporter.py | 23 +- grizzly/common/storage.py | 1 + grizzly/common/test_reporter.py | 80 +++-- grizzly/common/test_runner.py | 5 +- grizzly/common/test_storage.py | 2 +- grizzly/main.py | 16 +- grizzly/reduce/reduce.py | 2 +- grizzly/reduce/test_interesting.py | 7 +- grizzly/reduce/test_main.py | 4 +- grizzly/replay/replay.py | 219 +++++++------- grizzly/replay/test_main.py | 65 ++-- grizzly/replay/test_replay.py | 429 ++++++++++++++++++--------- grizzly/session.py | 9 +- grizzly/target/puppet_target.py | 32 +- grizzly/target/target.py | 5 + grizzly/target/test_puppet_target.py | 39 +-- grizzly/test_main.py | 15 +- grizzly/test_session.py | 10 +- 18 files changed, 578 insertions(+), 385 deletions(-) diff --git a/grizzly/common/reporter.py b/grizzly/common/reporter.py index 48d2892c..aafce0f9 100644 --- a/grizzly/common/reporter.py +++ b/grizzly/common/reporter.py @@ -86,6 +86,18 @@ def __init__(self, log_path, target_binary, size_limit=MAX_LOG_SIZE): self.prefix = "%s_%s" % (self.DEFAULT_MINOR, strftime("%Y-%m-%d_%H-%M-%S")) self.stack = None + @staticmethod + def calc_hash(signature): + """Create unique hash from a signature. + + Args: + None + + Returns: + str: Hash of the raw signature. + """ + return sha1(signature.rawSignature.encode("utf-8")).hexdigest()[:16] + def cleanup(self): """Remove Report data from filesystem. @@ -101,7 +113,7 @@ def cleanup(self): @property def crash_hash(self): - """Create SHA1 hash from signature. + """Create unique hash from a signature. Args: None @@ -109,7 +121,7 @@ def crash_hash(self): Returns: str: Hash of the raw signature of the crash. """ - return sha1(self.crash_signature.rawSignature.encode("utf-8")).hexdigest()[:16] + return self.calc_hash(self.crash_signature) @property def crash_info(self): @@ -387,9 +399,12 @@ def submit(self, test_cases, report): class FilesystemReporter(Reporter): DISK_SPACE_ABORT = 512 * 1024 * 1024 # 512 MB - def __init__(self, report_path=None, major_bucket=True): + __slots__ = ("major_bucket", "report_path") + + def __init__(self, report_path, major_bucket=True): self.major_bucket = major_bucket - self.report_path = report_path or pathjoin(getcwd(), "results") + assert report_path + self.report_path = report_path def _pre_submit(self, report): pass diff --git a/grizzly/common/storage.py b/grizzly/common/storage.py index 239dec32..352c3650 100644 --- a/grizzly/common/storage.py +++ b/grizzly/common/storage.py @@ -404,6 +404,7 @@ def purge_optional(self, keep): Returns: None """ + # TODO: should we limit or warn on multiple calls to prevent issues? keep = set(keep) to_remove = [] for idx, tfile in enumerate(self._files.optional): diff --git a/grizzly/common/test_reporter.py b/grizzly/common/test_reporter.py index 843a0694..5804dafd 100644 --- a/grizzly/common/test_reporter.py +++ b/grizzly/common/test_reporter.py @@ -17,6 +17,12 @@ from .reporter import FilesystemReporter, FuzzManagerReporter, Report, Reporter, S3FuzzManagerReporter from .storage import TestCase +def _create_crash_log(log_path): + with log_path.open("w") as log_fp: + log_fp.write("==1==ERROR: AddressSanitizer: SEGV on unknown address 0x0") + log_fp.write(" (pc 0x0 bp 0x0 sp 0x0 T0)\n") + log_fp.write(" #0 0xbad000 in foo /file1.c:123:234\n") + log_fp.write(" #1 0x1337dd in bar /file2.c:1806:19") def test_report_01(tmp_path): """test Report() with boring logs (no stack)""" @@ -41,9 +47,7 @@ def test_report_02(tmp_path): """test Report() with crash logs""" (tmp_path / "log_stderr.txt").write_bytes(b"STDERR log") (tmp_path / "log_stdout.txt").write_bytes(b"STDOUT log") - with (tmp_path / "log_asan_blah.txt").open("wb") as log_fp: - log_fp.write(b" #0 0xbad000 in foo /file1.c:123:234\n") - log_fp.write(b" #1 0x1337dd in bar /file2.c:1806:19") + _create_crash_log(tmp_path / "log_asan_blah.txt") report = Report(str(tmp_path), "bin") assert report.path == str(tmp_path) assert report._logs.aux.endswith("log_asan_blah.txt") @@ -74,6 +78,7 @@ def test_report_04(tmp_path): """test Report.select_logs() uninteresting data""" # test with empty path assert Report.select_logs(str(tmp_path)) is None + # empty file (tmp_path / "not_a_log.txt").touch() assert not any(Report.select_logs(str(tmp_path))) @@ -227,9 +232,7 @@ def test_report_12(tmp_path): """test Report.crash_info""" (tmp_path / "log_stderr.txt").write_bytes(b"STDERR log") (tmp_path / "log_stdout.txt").write_bytes(b"STDOUT log") - with (tmp_path / "log_asan_blah.txt").open("wb") as log_fp: - log_fp.write(b" #0 0xbad000 in foo /file1.c:123:234\n") - log_fp.write(b" #1 0x1337dd in bar /file2.c:1806:19") + _create_crash_log(tmp_path / "log_asan_blah.txt") # no binary.fuzzmanagerconf report = Report(str(tmp_path), target_binary="fake_bin") assert report._crash_info is None @@ -251,10 +254,7 @@ def test_report_13(mocker, tmp_path): mocker.patch("grizzly.common.reporter.ProgramConfiguration", autospec=True) (tmp_path / "log_stderr.txt").write_bytes(b"STDERR log") (tmp_path / "log_stdout.txt").write_bytes(b"STDOUT log") - with (tmp_path / "log_asan_blah.txt").open("wb") as log_fp: - log_fp.write(b"==1==ERROR: AddressSanitizer: SEGV on unknown address 0x0 (pc 0x0 bp 0x0 sp 0x0 T0)\n") - log_fp.write(b" #0 0xbad000 in foo /file1.c:123:234\n") - log_fp.write(b" #1 0x1337dd in bar /file2.c:1806:19") + _create_crash_log(tmp_path / "log_asan_blah.txt") report = Report(str(tmp_path), "bin") assert report._signature is None assert report.crash_signature @@ -287,12 +287,10 @@ def test_filesystem_reporter_01(tmp_path): log_path.mkdir() (log_path / "log_stderr.txt").write_bytes(b"STDERR log") (log_path / "log_stdout.txt").write_bytes(b"STDOUT log") - with (log_path / "log_asan_blah.txt").open("wb") as log_fp: - log_fp.write(b" #0 0xbad000 in foo /file1.c:123:234\n") - log_fp.write(b" #1 0x1337dd in bar /file2.c:1806:19") + _create_crash_log(tmp_path / "log_asan_blah.txt") report_path = tmp_path / "reports" report_path.mkdir() - reporter = FilesystemReporter(report_path=str(report_path)) + reporter = FilesystemReporter(str(report_path)) reporter.submit([], Report(str(log_path), "fake_bin")) buckets = tuple(report_path.iterdir()) # check major bucket @@ -310,31 +308,23 @@ def test_filesystem_reporter_02(tmp_path, mocker): log_path.mkdir() (log_path / "log_stderr.txt").write_bytes(b"STDERR log") (log_path / "log_stdout.txt").write_bytes(b"STDOUT log") - with (log_path / "log_asan_blah.txt").open("wb") as log_fp: - log_fp.write(b" #0 0xbad000 in foo /file1.c:123:234\n") - log_fp.write(b" #1 0x1337dd in bar /file2.c:1806:19") - testcases = list() - for _ in range(10): - testcases.append(mocker.Mock(spec=TestCase)) + _create_crash_log(log_path / "log_asan_blah.txt") + tests = list(mocker.Mock(spec=TestCase) for _ in range(10)) report_path = tmp_path / "reports" assert not report_path.exists() - reporter = FilesystemReporter(report_path=str(report_path)) - reporter.submit(testcases, Report(str(log_path), "fake_bin")) + reporter = FilesystemReporter(str(report_path)) + reporter.submit(tests, Report(str(log_path), "fake_bin")) assert not log_path.exists() assert report_path.exists() assert len(tuple(report_path.glob("*"))) == 1 - for tstc in testcases: - assert tstc.dump.call_count == 1 + assert all(x.dump.call_count == 1 for x in tests) # call report a 2nd time log_path.mkdir() (log_path / "log_stderr.txt").write_bytes(b"STDERR log") (log_path / "log_stdout.txt").write_bytes(b"STDOUT log") - testcases = list() - for _ in range(2): - testcases.append(mocker.Mock(spec=TestCase)) - reporter.submit(testcases, Report(str(log_path), "fake_bin")) - for tstc in testcases: - assert tstc.dump.call_count == 1 + tests = list(mocker.Mock(spec=TestCase) for _ in range(2)) + reporter.submit(tests, Report(str(log_path), "fake_bin")) + assert all(x.dump.call_count == 1 for x in tests) assert len(tuple(report_path.glob("*"))) == 2 assert len(tuple(report_path.glob("NO_STACK"))) == 1 @@ -344,26 +334,24 @@ def test_filesystem_reporter_03(tmp_path): log_path.mkdir() (log_path / "log_stderr.txt").write_bytes(b"STDERR log") (log_path / "log_stdout.txt").write_bytes(b"STDOUT log") - report_path = tmp_path / "reports" - report_path.mkdir() - reporter = FilesystemReporter(report_path=str(report_path)) + reporter = FilesystemReporter(str(tmp_path / "reports")) reporter.DISK_SPACE_ABORT = 2 ** 50 - with pytest.raises(RuntimeError) as exc: + with pytest.raises(RuntimeError, match="Running low on disk space"): reporter.submit([], Report(str(log_path), "fake_bin")) - assert "Running low on disk space" in str(exc.value) def test_filesystem_reporter_04(mocker, tmp_path): """test FilesystemReporter w/o major bucket""" - report_path = (tmp_path / "report") - report_path.mkdir() + fake_report = (tmp_path / "fake_report") + fake_report.mkdir() report = mocker.Mock( spec=Report, - path=str(report_path), - prefix="0000_2020_01_01") - reporter = FilesystemReporter(report_path=str(tmp_path), major_bucket=False) + path=str(fake_report), + prefix="test_prefix") + reporter = FilesystemReporter(str(tmp_path / "dst"), major_bucket=False) reporter.submit([], report) - assert not report_path.is_dir() + assert not fake_report.is_dir() assert not report.major.call_count + assert any((tmp_path / "dst").glob("test_prefix_logs")) def test_fuzzmanager_reporter_01(mocker, tmp_path): """test FuzzManagerReporter.sanity_check()""" @@ -479,9 +467,8 @@ def test_fuzzmanager_reporter_06(mocker, tmp_path): (log_path / "log_stderr.txt").touch() (log_path / "log_stdout.txt").touch() reporter = FuzzManagerReporter("fake_bin") - with pytest.raises(RuntimeError) as exc: + with pytest.raises(RuntimeError, match="Failed to create FM signature"): reporter.submit([], Report(str(log_path), "fake_bin")) - assert "Failed to create FM signature" in str(exc.value) def test_fuzzmanager_reporter_07(mocker, tmp_path): """test FuzzManagerReporter.submit() unsymbolized crash""" @@ -521,9 +508,8 @@ def test_s3fuzzmanager_reporter_01(mocker, tmp_path): mocker.patch("grizzly.common.reporter.FuzzManagerReporter", autospec=True) fake_bin = tmp_path / "bin" # test GRZ_S3_BUCKET missing - with pytest.raises(EnvironmentError) as exc: + with pytest.raises(EnvironmentError, match="'GRZ_S3_BUCKET' is not set in environment"): S3FuzzManagerReporter.sanity_check(str(fake_bin)) - assert "'GRZ_S3_BUCKET' is not set in environment" in str(exc.value) # test GRZ_S3_BUCKET set pytest.importorskip("boto3") mocker.patch("grizzly.common.reporter.getenv", autospec=True, return_value="test") @@ -549,7 +535,7 @@ def test_s3fuzzmanager_reporter_02(mocker, tmp_path): fake_report.minor = "1234abcd" fake_report.path = str(tmp_path) reporter._pre_submit(fake_report) - assert not tuple(tmp_path.glob("*")) + assert not any(tmp_path.glob("*")) assert "rr-trace" in reporter._extra_metadata assert fake_report.minor in reporter._extra_metadata["rr-trace"] fake_resource.return_value.meta.client.upload_file.assert_not_called() @@ -565,7 +551,7 @@ def __init__(self, message, response): mocker.patch("grizzly.common.reporter.ClientError", new=FakeClientError) fake_resource.return_value.Object.side_effect = FakeClientError("test", {"Error": {"Code": "404"}}) reporter._pre_submit(fake_report) - assert not tuple(tmp_path.glob("*")) + assert not any(tmp_path.glob("*")) assert "rr-trace" in reporter._extra_metadata assert fake_report.minor in reporter._extra_metadata["rr-trace"] assert fake_resource.return_value.meta.client.upload_file.call_count == 1 diff --git a/grizzly/common/test_runner.py b/grizzly/common/test_runner.py index da9be05f..b8d89935 100644 --- a/grizzly/common/test_runner.py +++ b/grizzly/common/test_runner.py @@ -9,6 +9,7 @@ from sapphire import Sapphire, SERVED_ALL, SERVED_NONE, SERVED_REQUEST, SERVED_TIMEOUT, ServerMap +from .reporter import Report from .runner import _IdleChecker, Runner, RunResult from .storage import TestCase from ..target import Target, TargetLaunchError, TargetLaunchTimeout @@ -176,8 +177,8 @@ def test_runner_08(mocker): assert target.launch.call_count == 1 target.reset_mock() - target.launch.side_effect = TargetLaunchError - with raises(TargetLaunchError): + target.launch.side_effect = TargetLaunchError("test", mocker.Mock(spec=Report)) + with raises(TargetLaunchError, match="test"): runner.launch("http://a/") assert target.launch.call_count == 1 target.reset_mock() diff --git a/grizzly/common/test_storage.py b/grizzly/common/test_storage.py index 983673b0..c090e48c 100644 --- a/grizzly/common/test_storage.py +++ b/grizzly/common/test_storage.py @@ -89,7 +89,7 @@ def test_testcase_04(tmp_path): """test TestCase.add_environ_var() and TestCase.env_vars""" with TestCase("land_page.html", "redirect.html", "test-adapter") as tcase: tcase.add_environ_var("TEST_ENV_VAR", "1") - assert len(list(tcase.env_vars)) == 1 + assert len(tcase.env_vars) == 1 tcase.add_environ_var("TEST_NONE", None) assert len(tcase.env_vars) == 2 dmp_path = tmp_path / "dmp_test" diff --git a/grizzly/main.py b/grizzly/main.py index 08047aa4..b52c9f9a 100644 --- a/grizzly/main.py +++ b/grizzly/main.py @@ -3,11 +3,15 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from logging import basicConfig, DEBUG, getLogger +from os.path import join as pathjoin +from os import getcwd from sapphire import Sapphire from .adapters import get as get_adapter -from .common import FilesystemReporter, FuzzManagerReporter, IOManager, S3FuzzManagerReporter +from .common.iomanager import IOManager +from .common.reporter import FilesystemReporter, FuzzManagerReporter, S3FuzzManagerReporter +from .common.utils import grz_tmp from .session import Session from .target import load as load_target, TargetLaunchError, TargetLaunchTimeout @@ -99,7 +103,7 @@ def main(args): log.info("Results will be reported via FuzzManager w/ large attachments in S3") reporter = S3FuzzManagerReporter(tool=args.tool) else: - reporter = FilesystemReporter() + reporter = FilesystemReporter(pathjoin(getcwd(), "results")) log.info("Results will be stored in %r", reporter.report_path) # set 'auto_close=1' so the client error pages (code 4XX) will @@ -126,7 +130,13 @@ def main(args): log.info("Ctrl+C detected.") return Session.EXIT_ABORT - except (TargetLaunchError, TargetLaunchTimeout): + except (TargetLaunchError, TargetLaunchTimeout) as exc: + log.error(str(exc)) + if isinstance(exc, TargetLaunchError) and exc.report: + path = grz_tmp("launch_failures") + log.error("Logs can be found here %r", path) + reporter = FilesystemReporter(path, major_bucket=False) + reporter.submit([], exc.report) return Session.EXIT_LAUNCH_FAILURE finally: diff --git a/grizzly/reduce/reduce.py b/grizzly/reduce/reduce.py index d187df41..008f8f5a 100644 --- a/grizzly/reduce/reduce.py +++ b/grizzly/reduce/reduce.py @@ -1133,7 +1133,7 @@ def from_args(cls, args, target): LOG.info("Reporting issues via FuzzManager") job.set_reporter(FuzzManagerReporter(tool=args.tool)) else: - reporter = FilesystemReporter() + reporter = FilesystemReporter(os.path.join(os.getcwd(), "results")) job.set_reporter(reporter) LOG.info("Results will be stored in %r", reporter.report_path) diff --git a/grizzly/reduce/test_interesting.py b/grizzly/reduce/test_interesting.py index 859469aa..8622eada 100644 --- a/grizzly/reduce/test_interesting.py +++ b/grizzly/reduce/test_interesting.py @@ -8,6 +8,7 @@ import time import pytest import sapphire +from ..common import Report from ..reduce.reduce import ReductionJob from ..target.target import Target, TargetLaunchError, TargetLaunchTimeout from .test_common import FakeTarget, create_target_binary @@ -120,14 +121,14 @@ def detect_failure(self, ignored, *args, **kwds): assert obj.target._calls["detect_failure"] == 1 -def test_target_relaunch_error(tmp_path): +def test_target_relaunch_error(mocker, tmp_path): "target should be launched only once on TargetLaunchError" class MyTarget(FakeTarget): def launch(self, *args, **kwds): FakeTarget.launch(self, *args, **kwds) - raise TargetLaunchError() + raise TargetLaunchError("test", mocker.Mock(spec=Report)) with ReductionJob([], MyTarget(), 30, False, False, 0, 1, 1, 0, 0) as obj: create_target_binary(obj.target, tmp_path) @@ -136,7 +137,7 @@ def launch(self, *args, **kwds): (tmp_path / "test.html").touch() obj.reduce_file = str(tmp_path / "test.html") obj.lithium_init() - with pytest.raises(TargetLaunchError): + with pytest.raises(TargetLaunchError, match="test"): obj.lithium_interesting(str(prefix)) assert obj.server is not None assert obj.target._calls["launch"] == 1 diff --git a/grizzly/reduce/test_main.py b/grizzly/reduce/test_main.py index d4928128..bfd4c5b1 100644 --- a/grizzly/reduce/test_main.py +++ b/grizzly/reduce/test_main.py @@ -152,8 +152,8 @@ def test_main_strategies(mocker, monkeypatch, tmp_path): # noqa pylint: disable class FakeReporter(BaseFakeReporter): - def __init__(self, *args, **kwds): - super(FakeReporter, self).__init__(*args, **kwds) + def __init__(self, _): + super(FakeReporter, self).__init__() self.report_path = "foo" def _submit_report(self, _report, test_cases): diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 6d7bc3b9..fd0e5ffa 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -26,12 +26,21 @@ LOG = getLogger("replay") +class ReplayResult(object): + __slots__ = ("count", "expected", "report", "served") + + def __init__(self, report, served, expected): + self.count = 1 + self.expected = expected + self.report = report + self.served = served + + class ReplayManager(object): HARNESS_FILE = pathjoin(dirname(__file__), "..", "common", "harness.html") __slots__ = ("ignore", "server", "status", "target", "_any_crash", - "_harness", "_reports_expected", "_reports_other", "_runner", - "_signature", "_unpacked") + "_harness", "_runner", "_signature", "_unpacked") def __init__(self, ignore, server, target, any_crash=False, signature=None, use_harness=True): self.ignore = ignore @@ -40,8 +49,6 @@ def __init__(self, ignore, server, target, any_crash=False, signature=None, use_ self.target = target self._any_crash = any_crash self._harness = None - self._reports_expected = dict() - self._reports_other = dict() self._runner = Runner(self.server, self.target) # TODO: make signature a property self._signature = signature @@ -64,67 +71,38 @@ def cleanup(self): Returns: None """ - for report in self._reports_expected.values(): - report.cleanup() - self._reports_expected.clear() - for report in self._reports_other.values(): - report.cleanup() - self._reports_other.clear() if self.status is not None: self.status.cleanup() - @property - def other_reports(self): - """Reports from results that do not match: - - the given signature - - the initial result (if any-crash is not specified) - - Args: - None - - Returns: - iterable: Reports. - """ - return self._reports_other.values() - - @property - def reports(self): - """Reports from results. - - Args: - None - - Returns: - iterable: Reports. - """ - return self._reports_expected.values() - @staticmethod - def report_to_filesystem(path, reports, other_reports=None, tests=None): + def report_to_filesystem(path, results, tests=None): """Use FilesystemReporter to write reports and testcase to disk in a known location. Args: path (str): Location to write data. - reports (iterable): Reports to output. - other_reports (iterable): Reports to output. + results (iterable): ReplayResult to output. tests (iterable): Testcases to output. Returns: None """ - if reports: - reporter = FilesystemReporter( - report_path=pathjoin(path, "reports"), - major_bucket=False) - for report in reports: - reporter.submit(tests or [], report=report) - if other_reports: - reporter = FilesystemReporter( - report_path=pathjoin(path, "other_reports"), - major_bucket=False) - for report in other_reports: + others = list(x.report for x in results if not x.expected) + if others: + reporter = FilesystemReporter(pathjoin(path, "other_reports"), major_bucket=False) + for report in others: reporter.submit(tests or [], report=report) + expected = list(x for x in results if x.expected) + if expected: + if tests and len(expected) == 1: + # only purge optional is reporting a single testcase + assert len(tests) >= len(expected[0].served) + for test, served in zip(tests, expected[0].served): + LOG.debug("calling test.purge_optional() with %r", served) + test.purge_optional(served) + reporter = FilesystemReporter(pathjoin(path, "reports"), major_bucket=False) + for result in expected: + reporter.submit(tests or [], report=result.report) def run(self, testcases, repeat=1, min_results=1): """Run testcase replay. @@ -136,16 +114,15 @@ def run(self, testcases, repeat=1, min_results=1): be considered successful. Returns: - bool: True if results were reproduced otherwise False. + list: List of ReplayResults that were found running testcases. """ assert repeat > 0 assert min_results > 0 assert min_results <= repeat + assert testcases assert self.status is None self.status = Status.start() - test_count = len(testcases) - assert test_count > 0 server_map = ServerMap() if self._harness is not None: @@ -158,9 +135,13 @@ def _dyn_close(): # pragma: no cover server_map.set_dynamic_response("grz_close_browser", _dyn_close, mime_type="text/html") server_map.set_dynamic_response("grz_harness", lambda: self._harness, mime_type="text/html") - success = False + # track unprocessed results + reports = dict() + # track unpacked testcases unpacked = list() try: + sig_hash = Report.calc_hash(self._signature) if self._signature else None + test_count = len(testcases) LOG.debug("unpacking testcases (%d)...", test_count) for test in testcases: dst_path = mkdtemp(prefix="tc_", dir=grz_tmp("serve")) @@ -181,22 +162,16 @@ def _dyn_close(): # pragma: no cover self.server.port, close_after=self.target.rl_reset * test_count, forced_close=self.target.forced_close) - try: - # The environment from the initial testcase is used because - # a sequence of testcases is expected to be run without - # relaunching the Target to match the functionality of - # Grizzly. If this is not the case each TestCase should - # be run individually. - self._runner.launch(location, env_mod=testcases[0].env_vars) - except TargetLaunchError: - LOG.error("Target launch error. Check browser logs for details.") - log_path = mkdtemp(prefix="logs_", dir=grz_tmp("logs")) - self.target.save_logs(log_path) - self._reports_other["STARTUP"] = Report(log_path, self.target.binary) - raise + # The environment from the initial testcase is used because + # a sequence of testcases is expected to be run without + # relaunching the Target to match the functionality of + # Grizzly. If this is not the case each TestCase should + # be run individually. + self._runner.launch(location, env_mod=testcases[0].env_vars) self.target.step() LOG.info("Performing replay (%d/%d)...", self.status.iteration, repeat) # run tests + served = list() for test_idx in range(test_count): LOG.debug("running test: %d of %d", test_idx + 1, test_count) # update redirects @@ -211,23 +186,24 @@ def _dyn_close(): # pragma: no cover testcases[test_idx].landing_page, required=False) # run testcase - result = self._runner.run( + run_result = self._runner.run( self.ignore, server_map, testcases[test_idx], test_path=unpacked[test_idx], wait_for_callback=self._harness is None) - if result.status != RunResult.COMPLETE: + served.append(run_result.served) + if run_result.status != RunResult.COMPLETE: break - # process results - if result.status == RunResult.FAILED: + # process run results + if run_result.status == RunResult.FAILED: log_path = mkdtemp(prefix="logs_", dir=grz_tmp("logs")) self.target.save_logs(log_path) report = Report(log_path, self.target.binary) # check signatures short_sig = report.crash_info.createShortSignature() if not self._any_crash and self._signature is None and short_sig != "No crash detected": - # signature has not been specified use the first one created + LOG.debug("no signature given, using %r", short_sig) self._signature = report.crash_signature if short_sig == "No crash detected": # TODO: verify report.major == "NO_STACK" otherwise FM failed to parse the logs @@ -237,31 +213,37 @@ def _dyn_close(): # pragma: no cover self.status.count_result(short_sig) LOG.info("Result: %s (%s:%s)", short_sig, report.major[:8], report.minor[:8]) - if report.crash_hash not in self._reports_expected: - LOG.debug("now tracking %s", report.crash_hash) - self._reports_expected[report.crash_hash] = report + if sig_hash: + LOG.debug("using provided signature (hash) to bucket") + bucket_hash = sig_hash + else: + bucket_hash = report.crash_hash + if bucket_hash not in reports: + reports[bucket_hash] = ReplayResult(report, served, True) + LOG.debug("now tracking %s", bucket_hash) report = None # don't remove report else: - LOG.debug("already tracking %s", report.crash_hash) - assert self._any_crash or len(self._reports_expected) == 1 + reports[bucket_hash].count += 1 + LOG.debug("already tracking %s", bucket_hash) else: LOG.info("Result: Different signature: %s (%s:%s)", short_sig, report.major[:8], report.minor[:8]) self.status.ignored += 1 - if report.crash_hash not in self._reports_other: + if report.crash_hash not in reports: + reports[report.crash_hash] = ReplayResult(report, served, False) LOG.debug("now tracking %s", report.crash_hash) - self._reports_other[report.crash_hash] = report report = None # don't remove report else: + reports[report.crash_hash].count += 1 LOG.debug("already tracking %s", report.crash_hash) # purge untracked report if report is not None: report.cleanup() report = None - elif result.status == RunResult.IGNORED: + elif run_result.status == RunResult.IGNORED: self.status.ignored += 1 LOG.info("Result: Ignored (%d)", self.status.ignored) - elif result.status == RunResult.ERROR: + elif run_result.status == RunResult.ERROR: LOG.error("ERROR: Replay malfunction, test case was not served") break @@ -270,13 +252,14 @@ def _dyn_close(): # pragma: no cover if self.status.iteration < repeat: LOG.debug("skipping remaining attempts") # failed to reproduce issue - LOG.debug("results (%d) < expected (%s) after %d attempts", + LOG.debug("results (%d) < expected, %s after %d attempts", self.status.results, min_results, self.status.iteration) break + # check if complete (results found) if self.status.results >= min_results: assert self.status.results == min_results - success = True - LOG.debug("results == expected (%s) after %d attempts", + assert sum(x.count for x in reports.values() if x.expected) >= min_results + LOG.debug("results == expected, %s after %d attempts", min_results, self.status.iteration) break @@ -287,15 +270,40 @@ def _dyn_close(): # pragma: no cover # trigger relaunch by closing the browser if needed self.target.check_relaunch() + + # process results + results = list() + if self._any_crash: + assert all(x.expected for x in reports.values()) + if sum(x.count for x in reports.values()) >= min_results: + results = list(reports.values()) + else: + LOG.debug("%d (any_crash) less than minimum %d", self.status.results, min_results) + for report in reports.values(): + report.report.cleanup() + else: + assert sum(x.expected for x in reports.values()) <= 1 + # filter out unreliable expected results + for crash_hash, report in reports.items(): + if report.expected and report.count < min_results: + LOG.debug("%r less than minimum (%d/%d)", crash_hash, report.count, min_results) + report.report.cleanup() + continue + results.append(report) + # active reports have been moved to results + # clear reports to avoid cleanup of active reports + reports.clear() + return results + finally: + # remove unpacked testcase data for tc_path in unpacked: rmtree(tc_path) - if success: - LOG.info("Result successfully reproduced") - else: - LOG.info("Failed to reproduce results") - self.target.close() - return success + self.target.close() + # remove unprocessed reports + for report in reports.values(): + report.report.cleanup() + @classmethod def main(cls, args): @@ -329,6 +337,7 @@ def main(cls, args): return 1 replay = None + results = None target = None tmp_prefs = None try: @@ -380,12 +389,17 @@ def main(cls, args): any_crash=args.any_crash, signature=signature, use_harness=not args.no_harness) - success = replay.run(testcases, repeat=repeat, min_results=args.min_crashes) - if args.logs: + results = replay.run(testcases, repeat=repeat, min_results=args.min_crashes) + # handle results + success = any(x.expected for x in results) + if success: + LOG.info("Result successfully reproduced") + else: + LOG.info("Failed to reproduce results") + if args.logs and results: replay.report_to_filesystem( args.logs, - replay.reports, - replay.other_reports, + results, testcases if args.include_test else None) # TODO: add fuzzmanager reporting return 0 if success else 1 @@ -393,18 +407,23 @@ def main(cls, args): except KeyboardInterrupt: return 1 - except (TargetLaunchError, TargetLaunchTimeout): - if args.logs and replay is not None: - replay.report_to_filesystem( - args.logs, - replay.reports, - replay.other_reports) + except (TargetLaunchError, TargetLaunchTimeout) as exc: + LOG.error(str(exc)) + if isinstance(exc, TargetLaunchError) and exc.report: + path = grz_tmp("launch_failures") + LOG.error("Logs can be found here %r", path) + reporter = FilesystemReporter(path, major_bucket=False) + reporter.submit([], exc.report) return 1 finally: LOG.warning("Shutting down...") if replay is not None: replay.cleanup() + if results: + # cleanup unreported results + for result in results: + result.report.cleanup() if target is not None: target.cleanup() for testcase in testcases: diff --git a/grizzly/replay/test_main.py b/grizzly/replay/test_main.py index b6b404c4..6324dfc1 100644 --- a/grizzly/replay/test_main.py +++ b/grizzly/replay/test_main.py @@ -11,7 +11,7 @@ from pytest import raises -from ..common import TestCase, TestCaseLoadFailure +from ..common import Report, TestCase, TestCaseLoadFailure from ..target import Target, TargetLaunchError, TargetLaunchTimeout from ..replay import ReplayManager from ..replay.args import ReplayArgs @@ -87,21 +87,21 @@ def _fake_save_logs(result_logs): target.save_logs = _fake_save_logs load_target.return_value.return_value = target # setup args - args = mocker.Mock() - args.fuzzmanager = False - args.ignore = ["fake", "timeout"] log_path = (tmp_path / "logs") - args.logs = str(log_path) (tmp_path / "test.html").touch() - args.input = str(tmp_path / "test.html") - args.min_crashes = 2 (tmp_path / "prefs.js").touch() - args.prefs = str(tmp_path / "prefs.js") - args.relaunch = 1 - args.repeat = 4 (tmp_path / "sig.json").write_bytes(b"{\"symptoms\": [{\"type\": \"crashAddress\", \"address\": \"0\"}]}") - args.sig = str(tmp_path / "sig.json") - args.timeout = 10 + args = mocker.Mock( + fuzzmanager=False, + ignore=["fake", "timeout"], + input=str(tmp_path / "test.html"), + logs=str(log_path), + min_crashes=2, + prefs=str(tmp_path / "prefs.js"), + relaunch=1, + repeat=4, + sig=str(tmp_path / "sig.json"), + timeout=10) assert ReplayManager.main(args) == 0 assert target.forced_close assert target.reverse.call_count == 1 @@ -134,12 +134,6 @@ def test_main_02(mocker): relaunch=1, repeat=1, sig=None) - # target launch error - mocker.patch("grizzly.replay.replay.ReplayManager.run", side_effect=TargetLaunchError) - assert ReplayManager.main(args) == 1 - # target launch timeout - mocker.patch("grizzly.replay.replay.ReplayManager.run", side_effect=TargetLaunchTimeout) - assert ReplayManager.main(args) == 1 # user abort fake_load_target.side_effect = KeyboardInterrupt assert ReplayManager.main(args) == 1 @@ -159,7 +153,38 @@ def test_main_02(mocker): assert ReplayManager.main(args) == 1 assert fake_load_target.call_count == 0 -def test_main_03(mocker): +def test_main_03(mocker, tmp_path): + """test ReplayManager.main() target exceptions""" + mocker.patch("grizzly.replay.replay.FuzzManagerReporter", autospec=True) + mocker.patch("grizzly.replay.replay.load_target", autospec=True) + mocker.patch("grizzly.replay.replay.Sapphire", autospec=True) + mocker.patch("grizzly.replay.replay.TestCase", autospec=True) + fake_tmp = (tmp_path / "grz_tmp") + fake_tmp.mkdir() + mocker.patch("grizzly.replay.replay.grz_tmp", autospec=True, return_value=fake_tmp) + # setup args + args = mocker.Mock( + ignore=None, + input="test", + min_crashes=1, + no_harenss=True, + prefs=None, + relaunch=1, + repeat=1, + sig=None) + # target launch error + fake_logs = (tmp_path / "fake_report") + fake_logs.mkdir() + report = mocker.Mock(spec=Report, prefix="fake_report", path=str(fake_logs)) + mocker.patch("grizzly.replay.replay.ReplayManager.run", side_effect=TargetLaunchError("", report)) + assert ReplayManager.main(args) == 1 + assert not fake_logs.is_dir() + assert any(fake_tmp.glob("fake_report_logs")) + # target launch timeout + mocker.patch("grizzly.replay.replay.ReplayManager.run", side_effect=TargetLaunchTimeout) + assert ReplayManager.main(args) == 1 + +def test_main_04(mocker): """test ReplayManager.main() loading GRZ_FORCED_CLOSE from test case""" mocker.patch("grizzly.replay.replay.Sapphire.serve_path", return_value=(None, ["x.html"])) target = mocker.Mock(spec=Target, forced_close=True) @@ -187,7 +212,7 @@ def test_main_03(mocker): assert target.cleanup.call_count == 1 assert not target.forced_close -def test_main_04(mocker, tmp_path): +def test_main_05(mocker, tmp_path): """test ReplayManager.main() loading/generating prefs.js""" serve_path = mocker.patch("grizzly.replay.replay.Sapphire.serve_path", autospec=True) serve_path.return_value = (None, ["test.html"]) # passed to mocked Target.detect_failure diff --git a/grizzly/replay/test_replay.py b/grizzly/replay/test_replay.py index 6b9d90ce..f16fef61 100644 --- a/grizzly/replay/test_replay.py +++ b/grizzly/replay/test_replay.py @@ -11,9 +11,9 @@ from pytest import raises from sapphire import Sapphire, SERVED_ALL, SERVED_REQUEST -from .replay import ReplayManager +from .replay import ReplayManager, ReplayResult from ..common import Report, Status, TestCase -from ..target import Target, TargetLaunchError +from ..target import Target def _fake_save_logs_result(result_logs, meta=False): # pylint: disable=unused-argument @@ -31,14 +31,8 @@ def _fake_save_logs_result(result_logs, meta=False): # pylint: disable=unused-a def test_replay_01(mocker): """test ReplayManager.cleanup()""" replay = ReplayManager([], mocker.Mock(spec=Sapphire), mocker.Mock(spec=Target), [mocker.Mock()]) - replay._reports_expected = {"A": mocker.Mock(spec=Report)} - replay._reports_other = {"B": mocker.Mock(spec=Report)} replay.status = mocker.Mock(spec=Status) - ereport = tuple(replay.reports)[0] - oreport = tuple(replay.other_reports)[0] replay.cleanup() - assert ereport.cleanup.call_count == 1 - assert oreport.cleanup.call_count == 1 assert replay.status.cleanup.call_count == 1 def test_replay_02(mocker, tmp_path): @@ -46,42 +40,66 @@ def test_replay_02(mocker, tmp_path): mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target) + target = mocker.Mock(spec=Target, closed=True, forced_close=True, rl_reset=1) target.RESULT_NONE = Target.RESULT_NONE - target.closed = True target.detect_failure.return_value = Target.RESULT_NONE - target.forced_close = True - target.rl_reset = 1 - with TestCase("land_page.html", "redirect.html", "test-adapter") as testcase: + with TestCase("index.html", "redirect.html", "test-adapter") as testcase: with ReplayManager([], server, target, use_harness=True) as replay: assert not replay.run([testcase]) + assert replay._signature is None assert replay.status.ignored == 0 assert replay.status.iteration == 1 assert replay.status.results == 0 - assert not replay.reports + assert target.close.call_count == 1 + assert target.check_relaunch.call_count == 0 assert not any(tmp_path.glob("*")) -def test_replay_03(mocker, tmp_path): +def test_replay_03(mocker): + """test ReplayManager.run() - no repro - with repeats""" + server = mocker.Mock(spec=Sapphire, port=0x1337) + server.serve_path.return_value = (SERVED_ALL, ["index.html"]) + target = mocker.Mock(spec=Target, closed=True, rl_reset=100) + target.RESULT_NONE = Target.RESULT_NONE + target.detect_failure.return_value = Target.RESULT_NONE + with TestCase("index.html", "redirect.html", "test-adapter") as testcase: + with ReplayManager([], server, target, use_harness=True) as replay: + assert not replay.run([testcase], repeat=10, min_results=1) + assert replay._signature is None + assert replay.status.ignored == 0 + assert replay.status.iteration == 10 + assert replay.status.results == 0 + assert target.close.call_count == 1 + assert target.check_relaunch.call_count == 9 + +def test_replay_04(mocker, tmp_path): """test ReplayManager.run() - successful repro""" mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) + served = ["index.html"] server = mocker.Mock(spec=Sapphire, port=0x1337) - server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target) + server.serve_path.return_value = (SERVED_ALL, served) + target = mocker.Mock(spec=Target, binary="C:\\fake_bin") target.RESULT_FAILURE = Target.RESULT_FAILURE - target.binary = "C:\\fake_bin" target.detect_failure.return_value = Target.RESULT_FAILURE target.save_logs = _fake_save_logs_result - with TestCase("land_page.html", "redirect.html", "test-adapter") as testcase: + with TestCase("index.html", "redirect.html", "test-adapter") as testcase: with ReplayManager([], server, target, use_harness=False) as replay: - assert replay.run([testcase]) + results = replay.run([testcase]) + assert replay._signature is not None assert replay.status.ignored == 0 assert replay.status.iteration == 1 assert replay.status.results == 1 - assert len(replay.reports) == 1 - assert not replay.other_reports + assert target.close.call_count == 1 + assert target.check_relaunch.call_count == 0 + assert len(results) == 1 + assert results[0].count == 1 + assert results[0].expected + assert results[0].report + assert len(results[0].served) == 1 + assert results[0].served[0] == served + results[0].report.cleanup() assert not any(tmp_path.glob("*")) -def test_replay_04(mocker): +def test_replay_05(mocker): """test ReplayManager.run() - Error (landing page not requested/served)""" server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_REQUEST, ["x"]) @@ -94,12 +112,13 @@ def test_replay_04(mocker): assert replay.status.ignored == 0 assert replay.status.iteration == 1 assert replay.status.results == 0 - assert not replay.reports - assert not replay.other_reports + # target.close() called once in runner and once by ReplayManager.run() + assert target.close.call_count == 2 assert target.check_relaunch.call_count == 0 -def test_replay_05(mocker): +def test_replay_06(mocker, tmp_path): """test ReplayManager.run() - ignored""" + mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) target = mocker.Mock(spec=Target) @@ -108,14 +127,15 @@ def test_replay_05(mocker): testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] with ReplayManager([], server, target, use_harness=False) as replay: assert not replay.run(testcases) + assert target.close.call_count == 1 assert replay.status.ignored == 1 assert replay.status.iteration == 1 assert replay.status.results == 0 - assert not replay.reports - assert not replay.other_reports + assert not any(tmp_path.glob("*")) -def test_replay_06(mocker): +def test_replay_07(mocker, tmp_path): """test ReplayManager.run() - early exit""" + mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) target = mocker.Mock(spec=Target, binary="path/fake_bin") @@ -125,68 +145,105 @@ def test_replay_06(mocker): target.save_logs = _fake_save_logs_result testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] # early failure - target.detect_failure.side_effect = [Target.RESULT_FAILURE, Target.RESULT_IGNORED, Target.RESULT_NONE] + target.detect_failure.side_effect = (Target.RESULT_FAILURE, Target.RESULT_IGNORED, Target.RESULT_NONE) with ReplayManager([], server, target, use_harness=False) as replay: assert not replay.run(testcases, repeat=4, min_results=3) + assert target.close.call_count == 1 assert replay.status.iteration == 3 assert replay.status.results == 1 assert replay.status.ignored == 1 - assert len(replay.reports) == 1 # early success - target.detect_failure.side_effect = [Target.RESULT_FAILURE, Target.RESULT_IGNORED, Target.RESULT_FAILURE] + target.reset_mock() + target.detect_failure.side_effect = (Target.RESULT_FAILURE, Target.RESULT_IGNORED, Target.RESULT_FAILURE) with ReplayManager([], server, target, use_harness=False) as replay: - assert replay.run(testcases, repeat=4, min_results=2) + results = replay.run(testcases, repeat=4, min_results=2) + assert target.close.call_count == 1 assert replay.status.iteration == 3 assert replay.status.results == 2 assert replay.status.ignored == 1 - assert len(replay._reports_expected) == 1 - assert not replay._reports_other - assert len(replay.reports) == 1 + assert len(results) == 1 -def test_replay_07(mocker, tmp_path): - """test ReplayManager.run() - test signatures""" +def test_replay_08(mocker, tmp_path): + """test ReplayManager.run() - test signatures - fail to meet minimum""" mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) report_0 = mocker.Mock(spec=Report) report_0.crash_info.createShortSignature.return_value = "No crash detected" - report_1 = mocker.Mock(spec=Report, major="0123abcd", minor="01239999") + report_1 = mocker.Mock(spec=Report, crash_hash="hash1", major="0123abcd", minor="01239999") report_1.crash_info.createShortSignature.return_value = "[@ test1]" - report_2 = mocker.Mock(spec=Report, major="0123abcd", minor="abcd9876") + report_2 = mocker.Mock(spec=Report, crash_hash="hash2", major="0123abcd", minor="abcd9876") report_2.crash_info.createShortSignature.return_value = "[@ test2]" + report_3 = mocker.Mock(spec=Report, crash_hash="hash2", major="0123abcd", minor="abcd9876") + report_3.crash_info.createShortSignature.return_value = "[@ test2]" fake_report = mocker.patch("grizzly.replay.replay.Report", autospec=True) - fake_report.side_effect = (report_0, report_1, report_2) + fake_report.side_effect = (report_0, report_1, report_2, report_3) + fake_report.calc_hash.return_value = "bucketHASH" server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) signature = mocker.Mock() - signature.matches.side_effect = (True, False) + signature.matches.side_effect = (True, False, False) target = mocker.Mock(spec=Target, binary="fake_bin") target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] with ReplayManager([], server, target, signature=signature, use_harness=False) as replay: - assert not replay.run(testcases, repeat=3, min_results=2) + results = replay.run(testcases, repeat=4, min_results=2) + assert target.close.call_count == 1 assert replay._signature == signature - assert fake_report.call_count == 3 - assert replay.status.iteration == 3 + assert replay.status.iteration == 4 assert replay.status.results == 1 - assert replay.status.ignored == 1 - assert len(replay.reports) == 1 - assert len(replay.other_reports) == 1 - assert report_0.cleanup.call_count == 1 - assert report_1.cleanup.call_count == 0 - assert report_2.cleanup.call_count == 0 + assert replay.status.ignored == 2 + assert fake_report.call_count == 4 + assert len(results) == 1 + assert not results[0].expected + assert results[0].count == 2 + assert report_0.cleanup.call_count == 1 + assert report_1.cleanup.call_count == 1 + assert report_2.cleanup.call_count == 0 + assert report_3.cleanup.call_count == 1 + assert signature.matches.call_count == 3 + +def test_replay_09(mocker, tmp_path): + """test ReplayManager.run() - test signatures - multiple matches""" + mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) + report_0 = mocker.Mock(spec=Report, crash_hash="hash1", major="0123abcd", minor="01239999") + report_0.crash_info.createShortSignature.return_value = "[@ test1]" + report_1 = mocker.Mock(spec=Report, crash_hash="hash2", major="0123abcd", minor="abcd9876") + report_1.crash_info.createShortSignature.return_value = "[@ test2]" + fake_report = mocker.patch("grizzly.replay.replay.Report", autospec=True) + fake_report.side_effect = (report_0, report_1) + fake_report.calc_hash.return_value = "bucketHASH" + server = mocker.Mock(spec=Sapphire, port=0x1337) + server.serve_path.return_value = (SERVED_ALL, ["a.html"]) + signature = mocker.Mock() + signature.matches.side_effect = (True, True) + target = mocker.Mock(spec=Target, binary="fake_bin") + target.RESULT_FAILURE = Target.RESULT_FAILURE + target.detect_failure.return_value = Target.RESULT_FAILURE + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="a.html", optional=[])] + with ReplayManager([], server, target, signature=signature, use_harness=False) as replay: + results = replay.run(testcases, repeat=2, min_results=2) + assert target.close.call_count == 1 + assert replay._signature == signature + assert replay.status.iteration == 2 + assert replay.status.results == 2 + assert replay.status.ignored == 0 + assert fake_report.call_count == 2 + assert len(results) == 1 + assert results[0].expected + assert results[0].count == 2 + assert report_0.cleanup.call_count == 0 + assert report_1.cleanup.call_count == 1 assert signature.matches.call_count == 2 -def test_replay_08(mocker, tmp_path): - """test ReplayManager.run() - any crash""" +def test_replay_10(mocker, tmp_path): + """test ReplayManager.run() - any crash - success""" mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) report_0 = mocker.Mock(spec=Report) report_0.crash_info.createShortSignature.return_value = "No crash detected" - report_1 = mocker.Mock(spec=Report, major="0123abcd", minor="01239999") + report_1 = mocker.Mock(spec=Report, crash_hash="hash1", major="0123abcd", minor="01239999") report_1.crash_info.createShortSignature.return_value = "[@ test1]" - report_1.crash_hash = "hash1" - report_2 = mocker.Mock(spec=Report, major="0123abcd", minor="abcd9876") + report_2 = mocker.Mock(spec=Report, crash_hash="hash2", major="0123abcd", minor="abcd9876") report_2.crash_info.createShortSignature.return_value = "[@ test2]" - report_2.crash_hash = "hash2" fake_report = mocker.patch("grizzly.replay.replay.Report", autospec=True) fake_report.side_effect = (report_0, report_1, report_2) server = mocker.Mock(spec=Sapphire, port=0x1337) @@ -196,109 +253,157 @@ def test_replay_08(mocker, tmp_path): target.detect_failure.return_value = Target.RESULT_FAILURE testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] with ReplayManager([], server, target, any_crash=True, use_harness=False) as replay: - assert replay.run(testcases, repeat=3, min_results=2) + results = replay.run(testcases, repeat=3, min_results=2) + assert target.close.call_count == 1 assert replay._signature is None - assert fake_report.call_count == 3 assert replay.status.iteration == 3 assert replay.status.results == 2 assert replay.status.ignored == 0 - assert len(replay.reports) == 2 - assert not replay.other_reports - assert report_0.cleanup.call_count == 1 - assert report_1.cleanup.call_count == 0 - assert report_2.cleanup.call_count == 0 + assert fake_report.call_count == 3 + assert len(results) == 2 + assert all(x.expected for x in results) + assert sum(x.count for x in results if x.expected) == 2 + assert report_0.cleanup.call_count == 1 + assert report_1.cleanup.call_count == 0 + assert report_2.cleanup.call_count == 0 -def test_replay_09(mocker, tmp_path): - """test ReplayManager.report_to_filesystem()""" - # no reports - ReplayManager.report_to_filesystem(str(tmp_path), []) - assert not any(tmp_path.glob("*")) - # with reports and tests - (tmp_path / "report_expected").mkdir() - expected = [ - mocker.Mock( - spec=Report, - path=str(tmp_path / "report_expected"), - prefix="expected")] - (tmp_path / "report_other1").mkdir() - (tmp_path / "report_other2").mkdir() - other = [ - mocker.Mock( - spec=Report, - path=str(tmp_path / "report_other1"), - prefix="other1"), - mocker.Mock( - spec=Report, - path=str(tmp_path / "report_other2"), - prefix="other2")] - test = mocker.Mock(spec=TestCase) - path = tmp_path / "dest" - ReplayManager.report_to_filesystem(str(path), expected, other, tests=[test]) - assert test.dump.call_count == 3 # called once per report - assert not (tmp_path / "report_expected").is_dir() - assert not (tmp_path / "report_other1").is_dir() - assert not (tmp_path / "report_other2").is_dir() - assert path.is_dir() - assert (path / "reports").is_dir() - assert (path / "reports" / "expected_logs").is_dir() - assert (path / "other_reports").is_dir() - assert (path / "other_reports" / "other1_logs").is_dir() - assert (path / "other_reports" / "other2_logs").is_dir() - # with reports and not tests - (tmp_path / "report_expected").mkdir() - expected = [ - mocker.Mock( - spec=Report, - path=str(tmp_path / "report_expected"), - prefix="expected")] - path = tmp_path / "dest2" - ReplayManager.report_to_filesystem(str(path), expected) - assert not (tmp_path / "report_expected").is_dir() - assert path.is_dir() - assert (path / "reports" / "expected_logs").is_dir() +def test_replay_11(mocker, tmp_path): + """test ReplayManager.run() - any crash - fail to meet minimum""" + mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) + report_0 = mocker.Mock(spec=Report) + report_0.crash_info.createShortSignature.return_value = "No crash detected" + report_1 = mocker.Mock(spec=Report, crash_hash="hash1", major="0123abcd", minor="01239999") + report_1.crash_info.createShortSignature.return_value = "[@ test1]" + report_2 = mocker.Mock(spec=Report, crash_hash="hash2", major="0123abcd", minor="abcd9876") + report_2.crash_info.createShortSignature.return_value = "[@ test2]" + report_3 = mocker.Mock(spec=Report) + report_3.crash_info.createShortSignature.return_value = "No crash detected" + fake_report = mocker.patch("grizzly.replay.replay.Report", autospec=True) + fake_report.side_effect = (report_0, report_1, report_2, report_3) + server = mocker.Mock(spec=Sapphire, port=0x1337) + server.serve_path.return_value = (SERVED_ALL, ["index.html"]) + target = mocker.Mock(spec=Target, binary="fake_bin") + target.RESULT_FAILURE = Target.RESULT_FAILURE + target.detect_failure.return_value = Target.RESULT_FAILURE + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] + with ReplayManager([], server, target, any_crash=True, use_harness=False) as replay: + assert not replay.run(testcases, repeat=4, min_results=3) + assert replay._signature is None + assert replay.status.iteration == 4 + assert replay.status.results == 2 + assert replay.status.ignored == 0 + assert fake_report.call_count == 4 + assert report_0.cleanup.call_count == 1 + assert report_1.cleanup.call_count == 1 + assert report_2.cleanup.call_count == 1 + assert report_3.cleanup.call_count == 1 -def test_replay_10(mocker, tmp_path): - """test ReplayManager.run() - TargetLaunchError""" +def test_replay_12(mocker, tmp_path): + """test ReplayManager.run() - no signature - use first crash""" mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) + report_1 = mocker.Mock(spec=Report, crash_hash="hash1", major="0123", minor="9999") + report_1.crash_info.createShortSignature.return_value = "[@ test1]" + auto_sig = mocker.Mock() + auto_sig.matches.side_effect = (True, False, True) + report_1.crash_signature = auto_sig + report_2 = mocker.Mock(spec=Report, crash_hash="hash2", major="abcd", minor="9876") + report_2.crash_info.createShortSignature.return_value = "[@ test2]" + report_3 = mocker.Mock(spec=Report, crash_hash="hash1", major="0123", minor="9999") + report_3.crash_info.createShortSignature.return_value = "[@ test1]" fake_report = mocker.patch("grizzly.replay.replay.Report", autospec=True) - fake_report.side_effect = (mocker.Mock(spec=Report),) + fake_report.side_effect = (report_1, report_2, report_3) server = mocker.Mock(spec=Sapphire, port=0x1337) - target = mocker.Mock(spec=Target) - target.launch.side_effect = TargetLaunchError - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html")] + server.serve_path.return_value = (SERVED_ALL, ["index.html"]) + target = mocker.Mock(spec=Target, binary="fake_bin") + target.RESULT_FAILURE = Target.RESULT_FAILURE + target.detect_failure.return_value = Target.RESULT_FAILURE + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] with ReplayManager([], server, target, use_harness=False) as replay: - with raises(TargetLaunchError): - replay.run(testcases) - assert not any(replay.reports) - assert any(replay.other_reports) - assert "STARTUP" in replay._reports_other + results = replay.run(testcases, repeat=3, min_results=2) + assert target.close.call_count == 1 + assert replay._signature == auto_sig + assert replay.status.iteration == 3 + assert replay.status.results == 2 + assert replay.status.ignored == 1 + assert fake_report.call_count == 3 + assert len(results) == 2 + assert sum(x.expected for x in results) == 1 + assert sum(x.count for x in results if x.expected) == 2 + assert report_1.cleanup.call_count == 0 + assert report_2.cleanup.call_count == 0 + assert report_3.cleanup.call_count == 1 -def test_replay_11(mocker): +def test_replay_13(mocker, tmp_path): + """test ReplayManager.run() - unexpected exception""" + mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) + report_0 = mocker.Mock(spec=Report, crash_hash="hash1", major="0123abcd", minor="01239999") + report_0.crash_info.createShortSignature.return_value = "[@ test1]" + fake_report = mocker.patch("grizzly.replay.replay.Report", autospec=True) + fake_report.side_effect = (report_0,) + server = mocker.Mock(spec=Sapphire, port=0x1337) + server.serve_path.side_effect = ((SERVED_ALL, ["index.html"]), KeyboardInterrupt) + target = mocker.Mock(spec=Target, binary="fake_bin") + target.RESULT_FAILURE = Target.RESULT_FAILURE + target.detect_failure.return_value = Target.RESULT_FAILURE + testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] + with ReplayManager([], server, target, any_crash=True, use_harness=False) as replay: + with raises(KeyboardInterrupt): + replay.run(testcases, repeat=3, min_results=2) + assert target.close.call_count == 1 + assert replay._signature is None + assert replay.status.iteration == 2 + assert replay.status.results == 1 + assert replay.status.ignored == 0 + assert fake_report.call_count == 1 + assert report_0.cleanup.call_count == 1 + +def test_replay_14(mocker): """test ReplayManager.run() - multiple TestCases - no repro""" server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target) + target = mocker.Mock(spec=Target, closed=True, forced_close=True, rl_reset=1) target.RESULT_NONE = Target.RESULT_NONE - target.closed = True target.detect_failure.return_value = Target.RESULT_NONE - target.forced_close = True - target.rl_reset = 1 testcases = [ mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] with ReplayManager([], server, target, use_harness=True) as replay: assert not replay.run(testcases) + assert target.close.call_count == 1 assert replay.status.ignored == 0 assert replay.status.iteration == 1 assert replay.status.results == 0 - assert not replay.reports assert all(x.dump.call_count == 1 for x in testcases) -def test_replay_12(mocker): - """test ReplayManager.run() - multiple TestCases - successful repro""" +def test_replay_15(mocker): + """test ReplayManager.run() - multiple TestCases - no repro - with repeats""" server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) + target = mocker.Mock(spec=Target, closed=True, forced_close=True, rl_reset=100) + target.RESULT_NONE = Target.RESULT_NONE + target.detect_failure.return_value = Target.RESULT_NONE + testcases = [ + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] + with ReplayManager([], server, target, use_harness=True) as replay: + assert not replay.run(testcases, repeat=10) + assert server.serve_path.call_count == 30 + assert target.close.call_count == 1 + assert replay.status.ignored == 0 + assert replay.status.iteration == 10 + assert replay.status.results == 0 + assert all(x.dump.call_count == 1 for x in testcases) + +def test_replay_16(mocker, tmp_path): + """test ReplayManager.run() - multiple TestCases - successful repro""" + mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) + server = mocker.Mock(spec=Sapphire, port=0x1337) + server.serve_path.side_effect = ( + (SERVED_ALL, ["a.html"]), + (SERVED_ALL, ["b.html"]), + (SERVED_ALL, ["c.html"])) target = mocker.Mock(spec=Target, binary="fake_bin", rl_reset=1) target.RESULT_FAILURE = Target.RESULT_FAILURE target.RESULT_NONE = Target.RESULT_NONE @@ -308,14 +413,64 @@ def test_replay_12(mocker): Target.RESULT_FAILURE) target.save_logs = _fake_save_logs_result testcases = [ - mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), - mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), - mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] + mocker.Mock(spec=TestCase, env_vars=[], landing_page="a.html", optional=[]), + mocker.Mock(spec=TestCase, env_vars=[], landing_page="b.html", optional=[]), + mocker.Mock(spec=TestCase, env_vars=[], landing_page="c.html", optional=[])] with ReplayManager([], server, target, use_harness=True) as replay: - assert replay.run(testcases) + results = replay.run(testcases) + assert target.close.call_count == 1 assert replay.status.ignored == 0 assert replay.status.iteration == 1 assert replay.status.results == 1 - assert len(replay.reports) == 1 - assert not replay.other_reports + assert len(results) == 1 + assert len(results[0].served) == len(testcases) + assert results[0].served[0][0] == "a.html" + assert results[0].served[1][0] == "b.html" + assert results[0].served[2][0] == "c.html" assert all(x.dump.call_count == 1 for x in testcases) + +def test_replay_17(mocker, tmp_path): + """test ReplayManager.report_to_filesystem()""" + # no reports + ReplayManager.report_to_filesystem(str(tmp_path), []) + assert not any(tmp_path.glob("*")) + # with reports and tests + (tmp_path / "report_expected").mkdir() + result0 = mocker.Mock(ReplayResult, count=1, expected=True, served=[]) + result0.report = mocker.Mock( + spec=Report, + path=str(tmp_path / "report_expected"), + prefix="expected") + (tmp_path / "report_other1").mkdir() + result1 = mocker.Mock(ReplayResult, count=1, expected=False, served=None) + result1.report = mocker.Mock( + spec=Report, + path=str(tmp_path / "report_other1"), + prefix="other1") + (tmp_path / "report_other2").mkdir() + result2 = mocker.Mock(ReplayResult, count=1, expected=False, served=None) + result2.report = mocker.Mock( + spec=Report, + path=str(tmp_path / "report_other2"), + prefix="other2") + test = mocker.Mock(spec=TestCase) + path = tmp_path / "dest" + ReplayManager.report_to_filesystem(str(path), [result0, result1, result2], tests=[test]) + assert test.dump.call_count == 3 # called once per report + assert not (tmp_path / "report_expected").is_dir() + assert not (tmp_path / "report_other1").is_dir() + assert not (tmp_path / "report_other2").is_dir() + assert path.is_dir() + assert (path / "reports").is_dir() + assert (path / "reports" / "expected_logs").is_dir() + assert (path / "other_reports").is_dir() + assert (path / "other_reports" / "other1_logs").is_dir() + assert (path / "other_reports" / "other2_logs").is_dir() + # with reports and no tests + (tmp_path / "report_expected").mkdir() + result0.reset_mock() + path = tmp_path / "dest2" + ReplayManager.report_to_filesystem(str(path), [result0]) + assert not (tmp_path / "report_expected").is_dir() + assert path.is_dir() + assert (path / "reports" / "expected_logs").is_dir() diff --git a/grizzly/session.py b/grizzly/session.py index aac7027d..6dc4e24c 100644 --- a/grizzly/session.py +++ b/grizzly/session.py @@ -10,7 +10,6 @@ from time import sleep, time from .common import grz_tmp, Report, Runner, RunResult, Status, TestFile -from .target import TargetLaunchError __all__ = ("SessionError", "LogOutputLimiter", "Session") @@ -156,13 +155,7 @@ def _dyn_close(): # pragma: no cover forced_close=self.target.forced_close, timeout=self.adapter.TEST_DURATION) log.info("Launching target") - try: - runner.launch(location, max_retries=3, retry_delay=0) - except TargetLaunchError: - # this result likely has nothing to do with Grizzly - log.error("Target launch error. Check browser logs for details.") - self.report_result() - raise + runner.launch(location, max_retries=3, retry_delay=0) self.target.step() # create and populate a test case diff --git a/grizzly/target/puppet_target.py b/grizzly/target/puppet_target.py index 3aef4f56..366e048a 100644 --- a/grizzly/target/puppet_target.py +++ b/grizzly/target/puppet_target.py @@ -3,11 +3,11 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from logging import getLogger -from os import close, getenv, kill, makedirs, unlink -from os.path import abspath, isdir, isfile +from os import close, kill, unlink +from os.path import abspath, isfile from platform import system import signal -from time import localtime, sleep, strftime, time +from time import sleep, time from tempfile import mkdtemp, mkstemp from psutil import AccessDenied, NoSuchProcess, Process, process_iter @@ -17,6 +17,7 @@ from .target_monitor import TargetMonitor from .target import Target, TargetLaunchError, TargetLaunchTimeout, TargetError +from ..common.reporter import Report from ..common.utils import grz_tmp @@ -28,14 +29,13 @@ class PuppetTarget(Target): - __slots__ = ("use_rr", "use_valgrind", "_browser_logs", "_puppet", "_remove_prefs") + __slots__ = ("use_rr", "use_valgrind", "_puppet", "_remove_prefs") def __init__(self, binary, extension, launch_timeout, log_limit, memory_limit, relaunch, **kwds): super(PuppetTarget, self).__init__(binary, extension, launch_timeout, log_limit, memory_limit, relaunch) self.use_rr = kwds.pop("rr", False) self.use_valgrind = kwds.pop("valgrind", False) - self._browser_logs = None self._remove_prefs = False # create Puppet object self._puppet = FFPuppet( @@ -59,8 +59,6 @@ def add_abort_token(self, token): def cleanup(self): # prevent parallel calls to FFPuppet.close() and/or FFPuppet.clean_up() - if self._browser_logs: - self.close() with self._lock: self._puppet.clean_up() if self._remove_prefs and self._prefs and isfile(self._prefs): @@ -70,16 +68,6 @@ def close(self): # prevent parallel calls to FFPuppet.close() and/or FFPuppet.clean_up() with self._lock: self._puppet.close() - # save logs in lock to avoid a parallel clean_up() removing them - if self._browser_logs: - log_path = mkdtemp( - prefix=strftime("%Y%m%d-%H%M%S_", localtime()), - suffix="_browser_logs", - dir=self._browser_logs) - LOG.debug("saving browser logs to %r", log_path) - self._puppet.save_logs(log_path) - # only save logs once per launch - self._browser_logs = None @property def closed(self): @@ -208,12 +196,6 @@ def dump_coverage(self, timeout=15): sleep(delay) def launch(self, location, env_mod=None): - # GRZ_BROWSER_LOGS is intended to be used to aid in debugging. - # when close() is called a copy of the browser logs will be saved - # to the directory specified by GRZ_BROWSER_LOGS - self._browser_logs = getenv("GRZ_BROWSER_LOGS") - if self._browser_logs and not isdir(self._browser_logs): - makedirs(self._browser_logs) self.rl_countdown = self.rl_reset # setup environment env_mod = dict(env_mod or []) @@ -235,7 +217,9 @@ def launch(self, location, env_mod=None): self.close() if isinstance(exc, BrowserTimeoutError): raise TargetLaunchTimeout(str(exc)) - raise TargetLaunchError(str(exc)) + log_path = mkdtemp(prefix="launch_fail_", dir=grz_tmp("logs")) + self.save_logs(log_path) + raise TargetLaunchError(str(exc), Report(log_path, self.binary)) def log_size(self): return self._puppet.log_length("stderr") + self._puppet.log_length("stdout") diff --git a/grizzly/target/target.py b/grizzly/target/target.py index 08bb733d..7bd0ac16 100644 --- a/grizzly/target/target.py +++ b/grizzly/target/target.py @@ -44,6 +44,9 @@ class TargetError(Exception): class TargetLaunchError(TargetError): """Raised if a failure during launch occurs""" + def __init__(self, message, report): + super().__init__(message) + self.report = report class TargetLaunchTimeout(TargetError): @@ -111,6 +114,8 @@ def close(self): def closed(self): pass + # TODO: add collect_report()? + @abstractmethod def detect_failure(self, ignored, was_timeout): pass diff --git a/grizzly/target/test_puppet_target.py b/grizzly/target/test_puppet_target.py index 951af45a..0ecf0b51 100644 --- a/grizzly/target/test_puppet_target.py +++ b/grizzly/target/test_puppet_target.py @@ -21,7 +21,6 @@ def test_puppet_target_01(mocker, tmp_path): fake_file.touch() with PuppetTarget(str(fake_file), None, 300, 25, 5000, 25) as target: assert target.closed - assert target._browser_logs is None assert not target._remove_prefs prefs_file = target.prefs assert isfile(prefs_file) @@ -50,18 +49,26 @@ def test_puppet_target_02(mocker, tmp_path): with PuppetTarget(str(fake_file), None, 300, 25, 5000, 35) as target: target.prefs = str(fake_file) assert not target._remove_prefs + # launch success target.launch("launch_target_page") - assert target._browser_logs is None assert fake_ffp.return_value.launch.call_count == 1 assert fake_ffp.return_value.close.call_count == 0 - fake_ffp.return_value.launch.side_effect = BrowserTimeoutError - with raises(TargetLaunchTimeout): + target.close() + # launch timeout + fake_ffp.reset_mock() + fake_ffp.return_value.launch.side_effect = BrowserTimeoutError("timeout") + with raises(TargetLaunchTimeout, match="timeout"): target.launch("launch_target_page") - assert fake_ffp.return_value.launch.call_count == 2 - assert fake_ffp.return_value.close.call_count == 1 - fake_ffp.return_value.launch.side_effect = BrowserTerminatedError - with raises(TargetLaunchError): + assert fake_ffp.return_value.save_logs.call_count == 0 + # launch failure + fake_ffp.reset_mock() + (tmp_path / "log_stderr.txt").write_text("fake log") + (tmp_path / "log_stdout.txt").write_text("fake log") + mocker.patch("grizzly.target.puppet_target.mkdtemp", autospec=True, return_value=str(tmp_path)) + fake_ffp.return_value.launch.side_effect = BrowserTerminatedError("fail") + with raises(TargetLaunchError, match="fail"): target.launch("launch_target_page") + assert fake_ffp.return_value.save_logs.call_count == 1 def test_puppet_target_03(mocker, tmp_path): """test PuppetTarget.detect_failure()""" @@ -278,22 +285,6 @@ def test_puppet_target_06(mocker, tmp_path): assert fake_ffp.return_value.clone_log.call_count == 1 def test_puppet_target_07(mocker, tmp_path): - """test PuppetTarget with GRZ_BROWSER_LOGS set""" - browser_logs = (tmp_path / "browser_logs") - fake_getenv = mocker.patch("grizzly.target.puppet_target.getenv", autospec=True) - fake_getenv.return_value = str(browser_logs) - fake_ffp = mocker.patch("grizzly.target.puppet_target.FFPuppet", autospec=True) - fake_file = tmp_path / "fake" - fake_file.touch() - with PuppetTarget(str(fake_file), None, 300, 25, 5000, 35) as target: - target.launch("launch_target_page") - assert target._browser_logs == str(browser_logs) - assert browser_logs.is_dir() - target.cleanup() - assert target._browser_logs is None - assert fake_ffp.return_value.save_logs.call_count == 1 - -def test_puppet_target_08(mocker, tmp_path): """test PuppetTarget.prefs""" mocker.patch("grizzly.target.puppet_target.FFPuppet", autospec=True) fake_file = tmp_path / "fake" diff --git a/grizzly/test_main.py b/grizzly/test_main.py index 19952119..1ee28d42 100644 --- a/grizzly/test_main.py +++ b/grizzly/test_main.py @@ -7,7 +7,7 @@ from pytest import raises from sapphire import Sapphire -from .common import Adapter +from .common import Adapter, Report from .main import main from .session import Session from .target import TargetLaunchError @@ -89,7 +89,7 @@ def test_main_02(mocker): with raises(RuntimeError, match=r"Test duration \([0-9]+s\) should be less than browser timeout \([0-9]+s\)"): main(args) -def test_main_03(mocker): +def test_main_03(mocker, tmp_path): """test main() exit codes""" fake_adapter = mocker.Mock(spec=Adapter) fake_adapter.TEST_DURATION = 10 @@ -107,5 +107,14 @@ def test_main_03(mocker): args.input = "fake" fake_session.return_value.run.side_effect = KeyboardInterrupt assert main(args) == Session.EXIT_ABORT - fake_session.return_value.run.side_effect = TargetLaunchError("test") + # test TargetLaunchError + fake_tmp = (tmp_path / "grz_tmp") + fake_tmp.mkdir() + mocker.patch("grizzly.main.grz_tmp", return_value=str(fake_tmp)) + fake_logs = (tmp_path / "report") + report = mocker.Mock(spec=Report, prefix="fake_report", path=str(fake_logs)) + fake_logs.mkdir() + fake_session.return_value.run.side_effect = TargetLaunchError("test", report) assert main(args) == Session.EXIT_LAUNCH_FAILURE + assert any(fake_tmp.glob("fake_report_logs")) + assert not fake_logs.is_dir() diff --git a/grizzly/test_session.py b/grizzly/test_session.py index fb07082e..0bbb2353 100644 --- a/grizzly/test_session.py +++ b/grizzly/test_session.py @@ -237,7 +237,7 @@ def test_session_09(tmp_path, mocker): Status.PATH = str(tmp_path) mocker.patch("grizzly.session.Report", autospec=True) fake_runner = mocker.patch("grizzly.session.Runner", autospec=True) - fake_runner.return_value.launch.side_effect = TargetLaunchError + fake_runner.return_value.launch.side_effect = TargetLaunchError("test", mocker.Mock(spec=Report)) mocker.patch("grizzly.session.TestFile", autospec=True) fake_adapter = mocker.Mock(spec=Adapter) fake_iomgr = mocker.Mock( @@ -249,14 +249,12 @@ def test_session_09(tmp_path, mocker): fake_serv = mocker.Mock(spec=Sapphire, port=0x1337) fake_target = mocker.Mock(spec=Target) fake_target.monitor.launches = 1 - reporter = NullReporter() - with Session(fake_adapter, fake_iomgr, reporter, fake_serv, fake_target) as session: - with raises(TargetLaunchError, match=""): + with Session(fake_adapter, fake_iomgr, mocker.Mock(), fake_serv, fake_target) as session: + with raises(TargetLaunchError, match="test"): session.run([], iteration_limit=1) assert session.status.iteration == 1 - assert session.status.results == 1 + assert session.status.results == 0 assert session.status.ignored == 0 - assert reporter.submit_calls == 1 def test_session_10(tmp_path, mocker): """test Session.report_result()""" From b60f3d9942481a7a8ccb9ac2505e3e5bf616f297 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 23 Sep 2020 15:07:40 -0700 Subject: [PATCH 016/531] Add TestCase.purge_optional() sanity checking --- grizzly/common/storage.py | 28 ++++++++++++++++++++-------- grizzly/common/test_storage.py | 17 +++++++++++++---- grizzly/replay/test_main.py | 28 ++++------------------------ grizzly/replay/test_replay.py | 8 ++++---- grizzly/test_session.py | 12 +++++++++++- 5 files changed, 52 insertions(+), 41 deletions(-) diff --git a/grizzly/common/storage.py b/grizzly/common/storage.py index 352c3650..58de4a08 100644 --- a/grizzly/common/storage.py +++ b/grizzly/common/storage.py @@ -384,13 +384,13 @@ def load_single(cls, path, load_prefs, adjacent=False): @property def optional(self): - """Get file names of optional TestFiles + """Get file names of optional TestFiles. Args: None - Returns: - generator: file names (str) of optional files + Yields: + str: File names of optional files. """ for test in self._files.optional: yield test.file_name @@ -404,14 +404,26 @@ def purge_optional(self, keep): Returns: None """ - # TODO: should we limit or warn on multiple calls to prevent issues? - keep = set(keep) - to_remove = [] - for idx, tfile in enumerate(self._files.optional): - if tfile.file_name not in keep: + # filter required files from opt_files files to keep + keep_opt = list() + for fname in set(keep): + if fname not in (x.file_name for x in self._files.required): + keep_opt.append(fname) + opt_files = tuple(x.file_name for x in self._files.optional) + if not opt_files: + assert not keep_opt + # nothing to purge + return None + # sanity check keep (cannot remove file that does not exist) + assert all(fname in opt_files for fname in keep_opt) + # purge + to_remove = list() + for idx, fname in enumerate(opt_files): + if fname not in keep_opt: to_remove.append(idx) for idx in reversed(to_remove): self._files.optional.pop(idx).close() + return None @staticmethod def scan_path(path): diff --git a/grizzly/common/test_storage.py b/grizzly/common/test_storage.py index c090e48c..aef61d1e 100644 --- a/grizzly/common/test_storage.py +++ b/grizzly/common/test_storage.py @@ -4,6 +4,7 @@ # file, You can obtain one at http://mozilla.org/MPL/2.0/. # pylint: disable=protected-access +from itertools import chain import json import re import os @@ -100,7 +101,7 @@ def test_testcase_04(tmp_path): assert data["env"]["TEST_ENV_VAR"] == "1" assert data["env"]["TEST_NONE"] is None -def test_testcase_05(tmp_path): +def test_testcase_05(): """test TestCase.purge_optional()""" with TestCase("land_page.html", "redirect.html", "test-adapter") as tcase: tcase.add_from_data("foo", "testfile1.bin") @@ -108,13 +109,21 @@ def test_testcase_05(tmp_path): tcase.add_from_data("foo", "testfile3.bin", required=False) tcase.add_from_data("foo", "not_served.bin", required=False) assert len(tuple(tcase.optional)) == 3 + # nothing to remove - with required + tcase.purge_optional(chain(["testfile1.bin"], tcase.optional)) + assert len(tuple(tcase.optional)) == 3 + # nothing to remove - without required tcase.purge_optional(tcase.optional) assert len(tuple(tcase.optional)) == 3 + # remove not_served.bin tcase.purge_optional(["testfile2.bin", "testfile3.bin"]) assert len(tuple(tcase.optional)) == 2 - tcase.dump(str(tmp_path)) - assert tmp_path.glob("testfile1.bin") - assert not any(tmp_path.glob("not_served.bin")) + assert "testfile2.bin" in tcase.optional + assert "testfile3.bin" in tcase.optional + assert "not_served.bin" not in tcase.optional + # remove remaining optional + tcase.purge_optional(["testfile1.bin"]) + assert not any(tcase.optional) def test_testcase_06(): """test TestCase.data_size""" diff --git a/grizzly/replay/test_main.py b/grizzly/replay/test_main.py index 6324dfc1..5f70a878 100644 --- a/grizzly/replay/test_main.py +++ b/grizzly/replay/test_main.py @@ -6,7 +6,6 @@ """ unit tests for grizzly.replay.main """ -from os.path import join as pathjoin from shutil import rmtree from pytest import raises @@ -16,6 +15,8 @@ from ..replay import ReplayManager from ..replay.args import ReplayArgs +from .test_replay import _fake_save_logs + def test_args_01(capsys, tmp_path): """test parsing args""" @@ -73,17 +74,6 @@ def test_main_01(mocker, tmp_path): target.RESULT_IGNORED = Target.RESULT_IGNORED target.RESULT_NONE = Target.RESULT_NONE target.detect_failure.side_effect = (Target.RESULT_FAILURE, Target.RESULT_NONE, Target.RESULT_FAILURE) - def _fake_save_logs(result_logs): - """write fake log data to disk""" - with open(pathjoin(result_logs, "log_stderr.txt"), "w") as log_fp: - log_fp.write("STDERR log\n") - with open(pathjoin(result_logs, "log_stdout.txt"), "w") as log_fp: - log_fp.write("STDOUT log\n") - with open(pathjoin(result_logs, "log_asan_blah.txt"), "w") as log_fp: - log_fp.write("==1==ERROR: AddressSanitizer: ") - log_fp.write("SEGV on unknown address 0x0 (pc 0x0 bp 0x0 sp 0x0 T0)\n") - log_fp.write(" #0 0xbad000 in foo /file1.c:123:234\n") - log_fp.write(" #1 0x1337dd in bar /file2.c:1806:19\n") target.save_logs = _fake_save_logs load_target.return_value.return_value = target # setup args @@ -220,16 +210,6 @@ def test_main_05(mocker, tmp_path): target = mocker.Mock(spec=Target, binary="bin", forced_close=True) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE - def _fake_save_logs(result_logs): - """write fake log data to disk""" - with open(pathjoin(result_logs, "log_stderr.txt"), "w") as log_fp: - pass - with open(pathjoin(result_logs, "log_stdout.txt"), "w") as log_fp: - pass - with open(pathjoin(result_logs, "log_asan_blah.txt"), "w") as log_fp: - log_fp.write("==1==ERROR: AddressSanitizer: ") - log_fp.write("SEGV on unknown address 0x0 (pc 0x0 bp 0x0 sp 0x0 T0)\n") - log_fp.write(" #0 0xbad000 in foo /file1.c:123:234\n") target.save_logs = _fake_save_logs load_target = mocker.patch("grizzly.replay.replay.load_target") load_target.return_value.return_value = target @@ -247,9 +227,9 @@ def _fake_save_logs(result_logs): input_path = (tmp_path / "input") input_path.mkdir() # build a test case - entry_point = (input_path / "target.bin") + entry_point = (input_path / "test.html") entry_point.touch() - with TestCase("target.bin", None, "test-adapter") as src: + with TestCase("test.html", None, "test-adapter") as src: src.add_from_file(str(entry_point)) src.dump(str(input_path), include_details=True) args.input = str(input_path) diff --git a/grizzly/replay/test_replay.py b/grizzly/replay/test_replay.py index f16fef61..50af40f9 100644 --- a/grizzly/replay/test_replay.py +++ b/grizzly/replay/test_replay.py @@ -16,7 +16,7 @@ from ..target import Target -def _fake_save_logs_result(result_logs, meta=False): # pylint: disable=unused-argument +def _fake_save_logs(result_logs, meta=False): # pylint: disable=unused-argument """write fake log data to disk""" with open(pathjoin(result_logs, "log_stderr.txt"), "w") as log_fp: log_fp.write("STDERR log\n") @@ -80,7 +80,7 @@ def test_replay_04(mocker, tmp_path): target = mocker.Mock(spec=Target, binary="C:\\fake_bin") target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE - target.save_logs = _fake_save_logs_result + target.save_logs = _fake_save_logs with TestCase("index.html", "redirect.html", "test-adapter") as testcase: with ReplayManager([], server, target, use_harness=False) as replay: results = replay.run([testcase]) @@ -142,7 +142,7 @@ def test_replay_07(mocker, tmp_path): target.RESULT_FAILURE = Target.RESULT_FAILURE target.RESULT_IGNORED = Target.RESULT_IGNORED target.RESULT_NONE = Target.RESULT_NONE - target.save_logs = _fake_save_logs_result + target.save_logs = _fake_save_logs testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] # early failure target.detect_failure.side_effect = (Target.RESULT_FAILURE, Target.RESULT_IGNORED, Target.RESULT_NONE) @@ -411,7 +411,7 @@ def test_replay_16(mocker, tmp_path): Target.RESULT_NONE, Target.RESULT_NONE, Target.RESULT_FAILURE) - target.save_logs = _fake_save_logs_result + target.save_logs = _fake_save_logs testcases = [ mocker.Mock(spec=TestCase, env_vars=[], landing_page="a.html", optional=[]), mocker.Mock(spec=TestCase, env_vars=[], landing_page="b.html", optional=[]), diff --git a/grizzly/test_session.py b/grizzly/test_session.py index 0bbb2353..82b5720e 100644 --- a/grizzly/test_session.py +++ b/grizzly/test_session.py @@ -41,6 +41,7 @@ def setup(self, input_path, server_map): def generate(self, testcase, server_map): assert testcase.adapter_name == self.NAME testcase.input_fname = "file.bin" + testcase.add_from_data("test", testcase.landing_page) self.remaining -= 1 Status.PATH = str(tmp_path) adapter = PlaybackAdapter() @@ -66,6 +67,7 @@ def setup(self, input_path, server_map): self.enable_harness() def generate(self, testcase, server_map): assert testcase.adapter_name == self.NAME + testcase.add_from_data("test", testcase.landing_page) Status.PATH = str(tmp_path) adapter = FuzzAdapter() adapter.setup(None, None) @@ -85,8 +87,16 @@ def generate(self, testcase, server_map): def test_session_03(tmp_path, mocker): """test Session.dump_coverage()""" + class FuzzAdapter(Adapter): + NAME = "fuzz" + def setup(self, input_path, server_map): + self.enable_harness() + def generate(self, testcase, server_map): + assert testcase.adapter_name == self.NAME + testcase.add_from_data("test", testcase.landing_page) Status.PATH = str(tmp_path) - adapter = mocker.Mock(spec=Adapter, remaining=None) + adapter = FuzzAdapter() + adapter.setup(None, None) fake_serv = mocker.Mock(spec=Sapphire, port=0x1337) fake_target = mocker.Mock(spec=Target, prefs=None, rl_reset=2) fake_target.log_size.return_value = 1000 From 00d4c7ae18602163bf553218fe28c850ae2102bf Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 23 Sep 2020 15:56:43 -0700 Subject: [PATCH 017/531] [replay] Allow multiple calls to ReplayManager.run() --- grizzly/replay/replay.py | 4 +++- grizzly/replay/test_replay.py | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index fd0e5ffa..18596dea 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -120,8 +120,10 @@ def run(self, testcases, repeat=1, min_results=1): assert min_results > 0 assert min_results <= repeat assert testcases - assert self.status is None + if self.status is not None: + LOG.debug("clearing previous status data") + self.status.cleanup() self.status = Status.start() server_map = ServerMap() diff --git a/grizzly/replay/test_replay.py b/grizzly/replay/test_replay.py index 50af40f9..1d85e274 100644 --- a/grizzly/replay/test_replay.py +++ b/grizzly/replay/test_replay.py @@ -430,6 +430,24 @@ def test_replay_16(mocker, tmp_path): assert all(x.dump.call_count == 1 for x in testcases) def test_replay_17(mocker, tmp_path): + """test ReplayManager.run() - multiple calls""" + mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) + server = mocker.Mock(spec=Sapphire, port=0x1337) + server.serve_path.return_value = (SERVED_ALL, ["index.html"]) + target = mocker.Mock(spec=Target, closed=True, forced_close=True, rl_reset=1) + target.RESULT_NONE = Target.RESULT_NONE + target.detect_failure.return_value = Target.RESULT_NONE + with TestCase("index.html", "redirect.html", "test-adapter") as testcase: + with ReplayManager([], server, target, use_harness=True) as replay: + assert not replay.run([testcase]) + assert replay.status.iteration == 1 + assert not replay.run([testcase]) + assert replay.status.iteration == 1 + assert not replay.run([testcase]) + assert replay.status.iteration == 1 + assert server.serve_path.call_count == 3 + +def test_replay_18(mocker, tmp_path): """test ReplayManager.report_to_filesystem()""" # no reports ReplayManager.report_to_filesystem(str(tmp_path), []) From 7c2f5a5222636eb49bacb82d70527e68ea79cd8e Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 23 Sep 2020 16:07:02 -0700 Subject: [PATCH 018/531] [tests] Fix replay test_main_03 --- grizzly/common/reporter.py | 2 +- grizzly/replay/test_main.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/grizzly/common/reporter.py b/grizzly/common/reporter.py index aafce0f9..1a81bd16 100644 --- a/grizzly/common/reporter.py +++ b/grizzly/common/reporter.py @@ -403,7 +403,7 @@ class FilesystemReporter(Reporter): def __init__(self, report_path, major_bucket=True): self.major_bucket = major_bucket - assert report_path + assert isinstance(report_path, str) and report_path self.report_path = report_path def _pre_submit(self, report): diff --git a/grizzly/replay/test_main.py b/grizzly/replay/test_main.py index 5f70a878..0167a52a 100644 --- a/grizzly/replay/test_main.py +++ b/grizzly/replay/test_main.py @@ -151,7 +151,7 @@ def test_main_03(mocker, tmp_path): mocker.patch("grizzly.replay.replay.TestCase", autospec=True) fake_tmp = (tmp_path / "grz_tmp") fake_tmp.mkdir() - mocker.patch("grizzly.replay.replay.grz_tmp", autospec=True, return_value=fake_tmp) + mocker.patch("grizzly.replay.replay.grz_tmp", autospec=True, return_value=str(fake_tmp)) # setup args args = mocker.Mock( ignore=None, From e67c6a65f87092120880fe5296f4fb78e5451a24 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 29 Sep 2020 10:50:41 -0700 Subject: [PATCH 019/531] Add debugger flag checks --- grizzly/args.py | 3 +++ grizzly/main.py | 4 ++-- grizzly/replay/args.py | 3 +++ grizzly/replay/replay.py | 5 +++-- grizzly/replay/test_main.py | 15 ++++++++++++++- 5 files changed, 25 insertions(+), 5 deletions(-) diff --git a/grizzly/args.py b/grizzly/args.py index 62c0d274..cee2b57a 100644 --- a/grizzly/args.py +++ b/grizzly/args.py @@ -212,3 +212,6 @@ def sanity_check(self, args): if args.tool is not None and not (args.fuzzmanager or args.s3_fuzzmanager): self.parser.error("--tool can only be given with --fuzzmanager/--s3-fuzzmanager") + + if args.rr and args.valgrind: + self.parser.error("'--rr' and '--valgrind' cannot be used together") diff --git a/grizzly/main.py b/grizzly/main.py index b52c9f9a..081d0f8a 100644 --- a/grizzly/main.py +++ b/grizzly/main.py @@ -41,10 +41,10 @@ def main(args): log.info("Ignoring: %s", ", ".join(args.ignore)) if args.xvfb: log.info("Running with Xvfb") - if args.valgrind: - log.info("Running with Valgrind. This will be SLOW!") if args.rr: log.info("Running with RR") + elif args.valgrind: + log.info("Running with Valgrind. This will be SLOW!") adapter = None iomanager = None diff --git a/grizzly/replay/args.py b/grizzly/replay/args.py index 78696a14..c66567c4 100644 --- a/grizzly/replay/args.py +++ b/grizzly/replay/args.py @@ -72,5 +72,8 @@ def sanity_check(self, args): if args.repeat < 1: self.parser.error("'--repeat' value must be positive") + if args.rr and args.valgrind: + self.parser.error("'--rr' and '--valgrind' cannot be used together") + if args.sig is not None and not isfile(args.sig): self.parser.error("signature file not found: %r" % (args.sig,)) diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 18596dea..72357f0d 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -312,6 +312,7 @@ def main(cls, args): configure_logging(args.log_level) if args.fuzzmanager: FuzzManagerReporter.sanity_check(args.binary) + # TODO: add fuzzmanager support LOG.info("Starting Grizzly Replay") @@ -319,10 +320,10 @@ def main(cls, args): LOG.info("Ignoring: %s", ", ".join(args.ignore)) if args.xvfb: LOG.info("Running with Xvfb") - if args.valgrind: - LOG.info("Running with Valgrind. This will be SLOW!") if args.rr: LOG.info("Running with RR") + elif args.valgrind: + LOG.info("Running with Valgrind. This will be SLOW!") if args.sig: signature = CrashSignature.fromFile(args.sig) diff --git a/grizzly/replay/test_main.py b/grizzly/replay/test_main.py index 0167a52a..2f91334b 100644 --- a/grizzly/replay/test_main.py +++ b/grizzly/replay/test_main.py @@ -54,6 +54,10 @@ def test_args_01(capsys, tmp_path): with raises(SystemExit): ReplayArgs().parse_args([str(exe), str(inp), "--any-crash", "--sig", "x"]) assert "error: signature is ignored when running with '--any-crash'" in capsys.readouterr()[-1] + # multiple debuggers + with raises(SystemExit): + ReplayArgs().parse_args([str(exe), str(inp), "--rr", "--valgrind"]) + assert "'--rr' and '--valgrind' cannot be used together" in capsys.readouterr()[-1] # force relaunch == 1 with --no-harness args = ReplayArgs().parse_args([str(exe), str(inp), "--no-harness"]) assert args.relaunch == 1 @@ -90,8 +94,10 @@ def test_main_01(mocker, tmp_path): prefs=str(tmp_path / "prefs.js"), relaunch=1, repeat=4, + rr=False, sig=str(tmp_path / "sig.json"), - timeout=10) + timeout=10, + valgrind=False) assert ReplayManager.main(args) == 0 assert target.forced_close assert target.reverse.call_count == 1 @@ -126,13 +132,20 @@ def test_main_02(mocker): sig=None) # user abort fake_load_target.side_effect = KeyboardInterrupt + # coverage + args.rr = True + args.valgrind = False assert ReplayManager.main(args) == 1 # invalid test case fake_load_target.reset_mock() fake_tc.load.side_effect = TestCaseLoadFailure + # coverage + args.rr = False + args.valgrind = True assert ReplayManager.main(args) == 1 assert fake_load_target.call_count == 0 # no test cases + args.valgrind = False fake_tc.load.side_effect = None fake_tc.load.return_value = list() assert ReplayManager.main(args) == 1 From 5b2bb99caec91439c18cf1b7a65390cc117f9287 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 29 Sep 2020 11:07:26 -0700 Subject: [PATCH 020/531] [pylint] Address 'raise-missing-from' reports --- grizzly/common/storage.py | 6 +++--- grizzly/target/puppet_target.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/grizzly/common/storage.py b/grizzly/common/storage.py index 58de4a08..38dfc373 100644 --- a/grizzly/common/storage.py +++ b/grizzly/common/storage.py @@ -282,7 +282,7 @@ def load(cls, path, load_prefs, adjacent=False): zip_fp.extractall(path=unpacked) except (BadZipfile, zlib_error): shutil.rmtree(unpacked, ignore_errors=True) - raise TestCaseLoadFailure("Testcase archive is corrupted") + raise TestCaseLoadFailure("Testcase archive is corrupted") from None path = unpacked else: unpacked = None @@ -339,9 +339,9 @@ def load_single(cls, path, load_prefs, adjacent=False): with open(os.path.join(path, "test_info.json"), "r") as in_fp: info = json.load(in_fp) except IOError: - raise TestCaseLoadFailure("Missing 'test_info.json'") + raise TestCaseLoadFailure("Missing 'test_info.json'") from None except ValueError: - raise TestCaseLoadFailure("Invalid 'test_info.json'") + raise TestCaseLoadFailure("Invalid 'test_info.json'") from None if not isinstance(info.get("target"), str): raise TestCaseLoadFailure("'test_info.json' has invalid 'target' entry") entry_point = os.path.basename(info["target"]) diff --git a/grizzly/target/puppet_target.py b/grizzly/target/puppet_target.py index 366e048a..1382d839 100644 --- a/grizzly/target/puppet_target.py +++ b/grizzly/target/puppet_target.py @@ -216,10 +216,10 @@ def launch(self, location, env_mod=None): LOG.error("FFPuppet LaunchError: %s", str(exc)) self.close() if isinstance(exc, BrowserTimeoutError): - raise TargetLaunchTimeout(str(exc)) + raise TargetLaunchTimeout(str(exc)) from None log_path = mkdtemp(prefix="launch_fail_", dir=grz_tmp("logs")) self.save_logs(log_path) - raise TargetLaunchError(str(exc), Report(log_path, self.binary)) + raise TargetLaunchError(str(exc), Report(log_path, self.binary)) from None def log_size(self): return self._puppet.log_length("stderr") + self._puppet.log_length("stdout") From ede2a058e8d6c0b4d73aa65d59be23be48b02bbd Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 29 Sep 2020 12:19:53 -0700 Subject: [PATCH 021/531] Remove result duration tracking from testcase --- grizzly/common/runner.py | 13 +++++++------ grizzly/common/test_runner.py | 5 +++++ grizzly/reduce/reduce.py | 1 + grizzly/replay/replay.py | 11 +++++++---- grizzly/replay/test_replay.py | 8 +++++--- grizzly/session.py | 1 + grizzly/test_session.py | 4 ++-- 7 files changed, 28 insertions(+), 15 deletions(-) diff --git a/grizzly/common/runner.py b/grizzly/common/runner.py index 2933df2b..5142b43e 100644 --- a/grizzly/common/runner.py +++ b/grizzly/common/runner.py @@ -175,12 +175,12 @@ def run(self, ignore, server_map, testcase, test_path=None, coverage=False, wait forever=wait_for_callback, optional_files=tuple(testcase.optional), server_map=server_map) - testcase.duration = time() - serve_start + duration = time() - serve_start finally: # remove temporary files if test_path is None: rmtree(wwwdir) - result = RunResult(served, timeout=server_status == SERVED_TIMEOUT) + result = RunResult(served, duration, timeout=server_status == SERVED_TIMEOUT) # TODO: fix calling TestCase.add_batch() for multi-test replay # add all include files that were served for url, resource in server_map.include.items(): @@ -208,8 +208,8 @@ def run(self, ignore, server_map, testcase, test_path=None, coverage=False, wait return result def _keep_waiting(self): - """Callback used by the server to determine if should continue to wait - for the requests from the target. + """Callback used by the server to determine if it should continue to + wait for the requests from the target. Args: None @@ -229,9 +229,10 @@ class RunResult(object): FAILED = 3 IGNORED = 4 - __slots__ = ("served", "status", "timeout") + __slots__ = ("duration", "served", "status", "timeout") - def __init__(self, served, status=None, timeout=False): + def __init__(self, served, duration, status=None, timeout=False): + self.duration = duration self.served = served self.status = status self.timeout = timeout diff --git a/grizzly/common/test_runner.py b/grizzly/common/test_runner.py index b8d89935..9233a283 100644 --- a/grizzly/common/test_runner.py +++ b/grizzly/common/test_runner.py @@ -16,6 +16,7 @@ def test_runner_01(mocker, tmp_path): """test Runner()""" + fake_time = mocker.patch("grizzly.common.runner.time", autospec=True) server = mocker.Mock(spec=Sapphire) target = mocker.Mock(spec=Target) target.detect_failure.return_value = target.RESULT_NONE @@ -24,8 +25,10 @@ def test_runner_01(mocker, tmp_path): serv_files = ["a.bin", "/another/file.bin"] testcase = mocker.Mock(spec=TestCase, landing_page=serv_files[0], optional=[]) # all files served + fake_time.side_effect = (1, 2) server.serve_path.return_value = (SERVED_ALL, serv_files) result = runner.run([], ServerMap(), testcase) + assert result.duration == 1 assert result.status == RunResult.COMPLETE assert result.served == serv_files assert not result.timeout @@ -33,6 +36,7 @@ def test_runner_01(mocker, tmp_path): assert target.dump_coverage.call_count == 0 assert testcase.dump.call_count == 1 # some files served + fake_time.side_effect = (1, 2) server.serve_path.return_value = (SERVED_REQUEST, serv_files) result = runner.run([], ServerMap(), testcase, coverage=True) assert result.status == RunResult.COMPLETE @@ -41,6 +45,7 @@ def test_runner_01(mocker, tmp_path): assert target.close.call_count == 0 assert target.dump_coverage.call_count == 1 # existing test path + fake_time.side_effect = (1, 2) testcase.reset_mock() tc_path = (tmp_path / "tc") tc_path.mkdir() diff --git a/grizzly/reduce/reduce.py b/grizzly/reduce/reduce.py index 008f8f5a..bced7f70 100644 --- a/grizzly/reduce/reduce.py +++ b/grizzly/reduce/reduce.py @@ -647,6 +647,7 @@ def _dyn_resp_close(): # pragma: no cover # run test case result = runner.run(self._ignore, self._server_map, testcase, wait_for_callback=self._no_harness) + testcase.duration = result.duration # handle failure if detected if result.status == RunResult.FAILED: diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 72357f0d..d87a49df 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -27,10 +27,11 @@ class ReplayResult(object): - __slots__ = ("count", "expected", "report", "served") + __slots__ = ("count", "durations", "expected", "report", "served") - def __init__(self, report, served, expected): + def __init__(self, report, served, durations, expected): self.count = 1 + self.durations = durations self.expected = expected self.report = report self.served = served @@ -173,6 +174,7 @@ def _dyn_close(): # pragma: no cover self.target.step() LOG.info("Performing replay (%d/%d)...", self.status.iteration, repeat) # run tests + durations = list() served = list() for test_idx in range(test_count): LOG.debug("running test: %d of %d", test_idx + 1, test_count) @@ -194,6 +196,7 @@ def _dyn_close(): # pragma: no cover testcases[test_idx], test_path=unpacked[test_idx], wait_for_callback=self._harness is None) + durations.append(run_result.duration) served.append(run_result.served) if run_result.status != RunResult.COMPLETE: break @@ -221,7 +224,7 @@ def _dyn_close(): # pragma: no cover else: bucket_hash = report.crash_hash if bucket_hash not in reports: - reports[bucket_hash] = ReplayResult(report, served, True) + reports[bucket_hash] = ReplayResult(report, served, durations, True) LOG.debug("now tracking %s", bucket_hash) report = None # don't remove report else: @@ -232,7 +235,7 @@ def _dyn_close(): # pragma: no cover short_sig, report.major[:8], report.minor[:8]) self.status.ignored += 1 if report.crash_hash not in reports: - reports[report.crash_hash] = ReplayResult(report, served, False) + reports[report.crash_hash] = ReplayResult(report, served, durations, False) LOG.debug("now tracking %s", report.crash_hash) report = None # don't remove report else: diff --git a/grizzly/replay/test_replay.py b/grizzly/replay/test_replay.py index 1d85e274..6707ecda 100644 --- a/grizzly/replay/test_replay.py +++ b/grizzly/replay/test_replay.py @@ -96,6 +96,7 @@ def test_replay_04(mocker, tmp_path): assert results[0].report assert len(results[0].served) == 1 assert results[0].served[0] == served + assert len(results[0].durations) == 1 results[0].report.cleanup() assert not any(tmp_path.glob("*")) @@ -427,6 +428,7 @@ def test_replay_16(mocker, tmp_path): assert results[0].served[0][0] == "a.html" assert results[0].served[1][0] == "b.html" assert results[0].served[2][0] == "c.html" + assert len(results[0].durations) == len(testcases) assert all(x.dump.call_count == 1 for x in testcases) def test_replay_17(mocker, tmp_path): @@ -454,19 +456,19 @@ def test_replay_18(mocker, tmp_path): assert not any(tmp_path.glob("*")) # with reports and tests (tmp_path / "report_expected").mkdir() - result0 = mocker.Mock(ReplayResult, count=1, expected=True, served=[]) + result0 = mocker.Mock(ReplayResult, count=1, durations=[1], expected=True, served=[]) result0.report = mocker.Mock( spec=Report, path=str(tmp_path / "report_expected"), prefix="expected") (tmp_path / "report_other1").mkdir() - result1 = mocker.Mock(ReplayResult, count=1, expected=False, served=None) + result1 = mocker.Mock(ReplayResult, count=1, durations=[1], expected=False, served=None) result1.report = mocker.Mock( spec=Report, path=str(tmp_path / "report_other1"), prefix="other1") (tmp_path / "report_other2").mkdir() - result2 = mocker.Mock(ReplayResult, count=1, expected=False, served=None) + result2 = mocker.Mock(ReplayResult, count=1, durations=[1], expected=False, served=None) result2.report = mocker.Mock( spec=Report, path=str(tmp_path / "report_other2"), diff --git a/grizzly/session.py b/grizzly/session.py index 6dc4e24c..b4b4fa7f 100644 --- a/grizzly/session.py +++ b/grizzly/session.py @@ -165,6 +165,7 @@ def _dyn_close(): # pragma: no cover # run test case result = runner.run(ignore, self.iomanager.server_map, current_test, coverage=self.coverage) + current_test.duration = result.duration # adapter callbacks if result.timeout: log.debug("calling self.adapter.on_timeout()") diff --git a/grizzly/test_session.py b/grizzly/test_session.py index 82b5720e..531cb6e9 100644 --- a/grizzly/test_session.py +++ b/grizzly/test_session.py @@ -189,7 +189,7 @@ def test_session_07(tmp_path, mocker): Status.PATH = str(tmp_path) mocker.patch("grizzly.session.Report", autospec=True) fake_runner = mocker.patch("grizzly.session.Runner", autospec=True) - fake_runner.return_value.run.return_value = RunResult(["/fake/file"], status=RunResult.FAILED) + fake_runner.return_value.run.return_value = RunResult(["/fake/file"], 1, status=RunResult.FAILED) mocker.patch("grizzly.session.TestFile", autospec=True) fake_adapter = mocker.Mock(spec=Adapter, remaining=None) fake_adapter.IGNORE_UNSERVED = True @@ -216,7 +216,7 @@ def test_session_08(tmp_path, mocker): """test Session.run() ignoring failures""" Status.PATH = str(tmp_path) fake_runner = mocker.patch("grizzly.session.Runner", autospec=True) - fake_runner.return_value.run.return_value = RunResult([], status=RunResult.IGNORED) + fake_runner.return_value.run.return_value = RunResult([], 0.1, status=RunResult.IGNORED) mocker.patch("grizzly.session.TestFile", autospec=True) fake_adapter = mocker.Mock(spec=Adapter, remaining=None) fake_adapter.IGNORE_UNSERVED = True From 9cbefba8a646797906fc9ac6f563e18c4593453a Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 29 Sep 2020 13:54:18 -0700 Subject: [PATCH 022/531] [replay] Add idle detection support --- grizzly/common/runner.py | 3 ++- grizzly/replay/args.py | 11 ++++++---- grizzly/replay/replay.py | 41 ++++++++++++++++++++++--------------- grizzly/replay/test_main.py | 12 ++++++++++- 4 files changed, 44 insertions(+), 23 deletions(-) diff --git a/grizzly/common/runner.py b/grizzly/common/runner.py index 5142b43e..5ffddd9c 100644 --- a/grizzly/common/runner.py +++ b/grizzly/common/runner.py @@ -76,8 +76,9 @@ def schedule_poll(self, initial=False, now=None): class Runner(object): __slots__ = ("_idle", "_server", "_target") - def __init__(self, server, target, idle_threshold=0, idle_delay=60): + def __init__(self, server, target, idle_threshold=0, idle_delay=0): if idle_threshold > 0: + assert idle_delay > 0 self._idle = _IdleChecker(target.is_idle, idle_threshold, idle_delay) else: self._idle = None diff --git a/grizzly/replay/args.py b/grizzly/replay/args.py index c66567c4..d7e764a8 100644 --- a/grizzly/replay/args.py +++ b/grizzly/replay/args.py @@ -25,11 +25,11 @@ def __init__(self): "--any-crash", action="store_true", help="Any crash is interesting, not only crashes which match the original signature.") replay_args.add_argument( - "--idle-threshold", type=int, default=25, - help="CPU usage threshold to mark the process as idle (default: %(default)s)") + "--idle-delay", type=int, default=30, + help="Number of seconds to wait before polling for idle (default: %(default)s)") replay_args.add_argument( - "--idle-timeout", type=int, default=60, - help="Number of seconds to wait before polling testcase for idle (default: %(default)s)") + "--idle-threshold", type=int, default=0, + help="CPU usage threshold to mark the process as idle (default: disabled)") replay_args.add_argument( "-l", "--logs", help="Location to save logs. If the path exists it must be empty, if it " \ @@ -63,6 +63,9 @@ def sanity_check(self, args): if args.any_crash and args.sig is not None: self.parser.error("signature is ignored when running with '--any-crash'") + if args.idle_threshold and args.idle_delay <= 0: + self.parser.error("'--idle-delay' value must be positive") + if args.min_crashes < 1: self.parser.error("'--min-crashes' value must be positive") diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index d87a49df..58517cda 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -41,7 +41,7 @@ class ReplayManager(object): HARNESS_FILE = pathjoin(dirname(__file__), "..", "common", "harness.html") __slots__ = ("ignore", "server", "status", "target", "_any_crash", - "_harness", "_runner", "_signature", "_unpacked") + "_harness", "_signature", "_unpacked") def __init__(self, ignore, server, target, any_crash=False, signature=None, use_harness=True): self.ignore = ignore @@ -50,7 +50,6 @@ def __init__(self, ignore, server, target, any_crash=False, signature=None, use_ self.target = target self._any_crash = any_crash self._harness = None - self._runner = Runner(self.server, self.target) # TODO: make signature a property self._signature = signature if use_harness: @@ -105,7 +104,7 @@ def report_to_filesystem(path, results, tests=None): for result in expected: reporter.submit(tests or [], report=result.report) - def run(self, testcases, repeat=1, min_results=1): + def run(self, testcases, repeat=1, min_results=1, idle_delay=0, idle_threshold=0): """Run testcase replay. Args: @@ -113,13 +112,17 @@ def run(self, testcases, repeat=1, min_results=1): repeat (int): Maximum number of times to run the TestCase. min_results (int): Minimum number of results needed before run can be considered successful. + idle_delay (int): Number of seconds to wait before polling for idle. + idle_threshold (int): CPU usage threshold to mark the process as idle. Returns: - list: List of ReplayResults that were found running testcases. + list: ReplayResults that were found running testcases. """ - assert repeat > 0 + assert idle_delay >= 0 + assert idle_threshold >= 0 assert min_results > 0 - assert min_results <= repeat + assert repeat > 0 + assert repeat >= min_results assert testcases if self.status is not None: @@ -138,6 +141,7 @@ def _dyn_close(): # pragma: no cover server_map.set_dynamic_response("grz_close_browser", _dyn_close, mime_type="text/html") server_map.set_dynamic_response("grz_harness", lambda: self._harness, mime_type="text/html") + runner = Runner(self.server, self.target, idle_threshold=idle_threshold, idle_delay=idle_delay) # track unprocessed results reports = dict() # track unpacked testcases @@ -156,11 +160,11 @@ def _dyn_close(): # pragma: no cover if self.target.closed: LOG.info("Launching target...") if self._harness is None: - location = self._runner.location( + location = runner.location( "/grz_current_test", self.server.port) else: - location = self._runner.location( + location = runner.location( "/grz_harness", self.server.port, close_after=self.target.rl_reset * test_count, @@ -170,7 +174,7 @@ def _dyn_close(): # pragma: no cover # relaunching the Target to match the functionality of # Grizzly. If this is not the case each TestCase should # be run individually. - self._runner.launch(location, env_mod=testcases[0].env_vars) + runner.launch(location, env_mod=testcases[0].env_vars) self.target.step() LOG.info("Performing replay (%d/%d)...", self.status.iteration, repeat) # run tests @@ -190,7 +194,7 @@ def _dyn_close(): # pragma: no cover testcases[test_idx].landing_page, required=False) # run testcase - run_result = self._runner.run( + run_result = runner.run( self.ignore, server_map, testcases[test_idx], @@ -342,7 +346,6 @@ def main(cls, args): LOG.error("Error: %s", str(exc)) return 1 - replay = None results = None target = None tmp_prefs = None @@ -388,14 +391,20 @@ def main(cls, args): # launch HTTP server used to serve test cases with Sapphire(auto_close=1, timeout=args.timeout) as server: target.reverse(server.port, server.port) - replay = ReplayManager( + with cls( args.ignore, server, target, any_crash=args.any_crash, signature=signature, - use_harness=not args.no_harness) - results = replay.run(testcases, repeat=repeat, min_results=args.min_crashes) + use_harness=not args.no_harness + ) as replay: + results = replay.run( + testcases, + idle_delay=args.idle_delay, + idle_threshold=args.idle_threshold, + min_results=args.min_crashes, + repeat=repeat) # handle results success = any(x.expected for x in results) if success: @@ -403,7 +412,7 @@ def main(cls, args): else: LOG.info("Failed to reproduce results") if args.logs and results: - replay.report_to_filesystem( + cls.report_to_filesystem( args.logs, results, testcases if args.include_test else None) @@ -424,8 +433,6 @@ def main(cls, args): finally: LOG.warning("Shutting down...") - if replay is not None: - replay.cleanup() if results: # cleanup unreported results for result in results: diff --git a/grizzly/replay/test_main.py b/grizzly/replay/test_main.py index 2f91334b..965df971 100644 --- a/grizzly/replay/test_main.py +++ b/grizzly/replay/test_main.py @@ -54,10 +54,14 @@ def test_args_01(capsys, tmp_path): with raises(SystemExit): ReplayArgs().parse_args([str(exe), str(inp), "--any-crash", "--sig", "x"]) assert "error: signature is ignored when running with '--any-crash'" in capsys.readouterr()[-1] - # multiple debuggers + # test multiple debuggers with raises(SystemExit): ReplayArgs().parse_args([str(exe), str(inp), "--rr", "--valgrind"]) assert "'--rr' and '--valgrind' cannot be used together" in capsys.readouterr()[-1] + # test idle args + with raises(SystemExit): + ReplayArgs().parse_args([str(exe), str(inp), "--idle-threshold", "1", "--idle-delay", "0"]) + assert "'--idle-delay' value must be positive" in capsys.readouterr()[-1] # force relaunch == 1 with --no-harness args = ReplayArgs().parse_args([str(exe), str(inp), "--no-harness"]) assert args.relaunch == 1 @@ -87,6 +91,8 @@ def test_main_01(mocker, tmp_path): (tmp_path / "sig.json").write_bytes(b"{\"symptoms\": [{\"type\": \"crashAddress\", \"address\": \"0\"}]}") args = mocker.Mock( fuzzmanager=False, + idle_delay=0, + idle_threshold=0, ignore=["fake", "timeout"], input=str(tmp_path / "test.html"), logs=str(log_path), @@ -202,6 +208,8 @@ def test_main_04(mocker): # setup args args = mocker.Mock( fuzzmanager=False, + idle_delay=0, + idle_threshold=0, ignore=None, input="test", min_crashes=1, @@ -229,6 +237,8 @@ def test_main_05(mocker, tmp_path): # setup args args = mocker.Mock( fuzzmanager=False, + idle_delay=0, + idle_threshold=0, ignore=None, min_crashes=1, relaunch=1, From 1e7e240043135c9951c8256fac76b47c477174f9 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 30 Sep 2020 16:33:09 -0700 Subject: [PATCH 023/531] Add additional idle config sanity checks --- grizzly/common/runner.py | 3 ++- grizzly/common/test_runner.py | 7 +++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/grizzly/common/runner.py b/grizzly/common/runner.py index 5ffddd9c..729e3246 100644 --- a/grizzly/common/runner.py +++ b/grizzly/common/runner.py @@ -30,7 +30,7 @@ def __init__(self, check_cb, threshold, initial_delay, poll_delay=1): assert callable(check_cb) assert initial_delay >= 0 assert poll_delay >= 0 - assert threshold >= 0 + assert 100 > threshold >= 0 self._check_cb = check_cb # callback used to check if target is idle self._init_delay = initial_delay # time to wait before the initial idle poll self._poll_delay = poll_delay # time to wait between subsequent polls @@ -79,6 +79,7 @@ class Runner(object): def __init__(self, server, target, idle_threshold=0, idle_delay=0): if idle_threshold > 0: assert idle_delay > 0 + LOG.debug("using idle check, th %d, delay %ds", idle_threshold, idle_delay) self._idle = _IdleChecker(target.is_idle, idle_threshold, idle_delay) else: self._idle = None diff --git a/grizzly/common/test_runner.py b/grizzly/common/test_runner.py index 9233a283..d36f470a 100644 --- a/grizzly/common/test_runner.py +++ b/grizzly/common/test_runner.py @@ -227,8 +227,8 @@ def test_runner_09(mocker, tmp_path): def test_idle_check_01(mocker): """test simple _IdleChecker""" fake_time = mocker.patch("grizzly.common.runner.time", autospec=True) - ichk = _IdleChecker(mocker.Mock(), 100, 10, poll_delay=1) - assert ichk._threshold == 100 + ichk = _IdleChecker(mocker.Mock(), 95, 10, poll_delay=1) + assert ichk._threshold == 95 assert ichk._init_delay == 10 assert ichk._poll_delay == 1 assert ichk._next_poll is None @@ -243,8 +243,7 @@ def test_idle_check_02(mocker): fake_time = mocker.patch("grizzly.common.runner.time", autospec=True) callbk = mocker.Mock() callbk.return_value = False - #check_cb, delay, duration, threshold - ichk = _IdleChecker(callbk, 100, 10, poll_delay=1) + ichk = _IdleChecker(callbk, 99, 10, poll_delay=1) fake_time.return_value = 0 ichk.schedule_poll() # early check From 0f095a0570f80cd4bafe0f82977da42e7b41ccb6 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 30 Sep 2020 16:34:37 -0700 Subject: [PATCH 024/531] Remove harness default time_limit --- grizzly/common/harness.html | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/grizzly/common/harness.html b/grizzly/common/harness.html index 7a9792ca..04f86c36 100644 --- a/grizzly/common/harness.html +++ b/grizzly/common/harness.html @@ -25,10 +25,6 @@ grzDump('Test case time limit already set') return } - if (time_limit === 0) { - // test case time limit disabled - return - } limit_tmr = setTimeout(() => { grzDump('Test case time limit exceeded') if (!sub.closed){ @@ -71,9 +67,12 @@ } // set the test case timeout once the test loading ends - sub.addEventListener('abort', setTestTimeout) - sub.addEventListener('error', setTestTimeout) - sub.addEventListener('load', setTestTimeout) + if (time_limit > 0) { + grzDump(`Using test case time limit of ${time_limit}`) + sub.addEventListener('abort', setTestTimeout) + sub.addEventListener('error', setTestTimeout) + sub.addEventListener('load', setTestTimeout) + } setTimeout(main, 50) } @@ -97,15 +96,6 @@ } } - if ((time_limit === undefined) || (time_limit < 0)) { - time_limit = 15000 - grzDump(`No valid time limit given, using default of ${time_limit}`) - } else if (time_limit > 0) { - grzDump(`Using test case time limit of ${time_limit}`) - } else { - grzDump(`Test case time limit diabled`) - } - // update banner setBanner('🐻 ⋅ Grizzly Harness ⋅ 🦊') main() From 78055c0b440ed6597117dbd0fcc8b8cc2a11e7ee Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 1 Oct 2020 09:35:31 -0700 Subject: [PATCH 025/531] Update timeout related messages --- grizzly/args.py | 4 +++- grizzly/replay/replay.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/grizzly/args.py b/grizzly/args.py index cee2b57a..796ea32b 100644 --- a/grizzly/args.py +++ b/grizzly/args.py @@ -82,7 +82,9 @@ def __init__(self): help="Number of iterations performed before relaunching the browser (default: %(default)s)") self.launcher_grp.add_argument( "-t", "--timeout", type=int, default=60, - help="Iteration timeout in seconds (default: %(default)s)") + help="Iteration or test case timeout in seconds (default: %(default)s)." + " Browser build types and debuggers can affect the amount of time" + " required to run a test case.") self.launcher_grp.add_argument( "--valgrind", action="store_true", help="Use Valgrind (Linux only)") diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 58517cda..20f5bb02 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -253,7 +253,7 @@ def _dyn_close(): # pragma: no cover self.status.ignored += 1 LOG.info("Result: Ignored (%d)", self.status.ignored) elif run_result.status == RunResult.ERROR: - LOG.error("ERROR: Replay malfunction, test case was not served") + LOG.error("ERROR: Test case was not served. Timeout too short?") break # check status and exit early if possible From ba48dedb595a135b1c7c35420f161b845d09044b Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 1 Oct 2020 12:28:02 -0700 Subject: [PATCH 026/531] Use __name__ as logger name --- grizzly/adapters/__init__.py | 2 +- grizzly/common/reporter.py | 2 +- grizzly/common/runner.py | 2 +- grizzly/common/stack_hasher.py | 2 +- grizzly/common/status.py | 2 +- grizzly/main.py | 2 +- grizzly/reduce/bucket.py | 2 +- grizzly/reduce/crash.py | 2 +- grizzly/reduce/reduce.py | 2 +- grizzly/reduce/strategies.py | 2 +- grizzly/replay/replay.py | 2 +- grizzly/session.py | 2 +- grizzly/target/__init__.py | 2 +- grizzly/target/puppet_target.py | 2 +- grizzly/target/target.py | 2 +- loki/loki.py | 2 +- sapphire/conftest.py | 2 +- sapphire/core.py | 2 +- sapphire/sapphire_job.py | 2 +- sapphire/sapphire_load_manager.py | 2 +- sapphire/sapphire_worker.py | 2 +- sapphire/server_map.py | 2 +- 22 files changed, 22 insertions(+), 22 deletions(-) diff --git a/grizzly/adapters/__init__.py b/grizzly/adapters/__init__.py index 20e48d5c..1da3d466 100644 --- a/grizzly/adapters/__init__.py +++ b/grizzly/adapters/__init__.py @@ -6,7 +6,7 @@ from grizzly.common import Adapter -log = logging.getLogger("grizzly") # pylint: disable=invalid-name +log = logging.getLogger(__name__) # pylint: disable=invalid-name __all__ = ("get", "load", "names") __adapters__ = dict() diff --git a/grizzly/common/reporter.py b/grizzly/common/reporter.py index 1a81bd16..b402990a 100644 --- a/grizzly/common/reporter.py +++ b/grizzly/common/reporter.py @@ -44,7 +44,7 @@ __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] -LOG = getLogger("grizzly") +LOG = getLogger(__name__) # NOTE: order matters, aux -> stderr -> stdout LogMap = namedtuple("LogMap", "aux stderr stdout") diff --git a/grizzly/common/runner.py b/grizzly/common/runner.py index 729e3246..b7797fa3 100644 --- a/grizzly/common/runner.py +++ b/grizzly/common/runner.py @@ -15,7 +15,7 @@ __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] -LOG = getLogger("grz_runner") +LOG = getLogger(__name__) # _IdleChecker is used to help determine if the target is hung (actively using CPU) # or if it has not made expected the HTTP requests for other reasons (idle). diff --git a/grizzly/common/stack_hasher.py b/grizzly/common/stack_hasher.py index 2817f500..4664871f 100644 --- a/grizzly/common/stack_hasher.py +++ b/grizzly/common/stack_hasher.py @@ -21,7 +21,7 @@ __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] -LOG = getLogger("stack_hasher") +LOG = getLogger(__name__) MAJOR_DEPTH = 5 MAJOR_DEPTH_RUST = 10 diff --git a/grizzly/common/status.py b/grizzly/common/status.py index b61d7c7b..8b77e1b2 100644 --- a/grizzly/common/status.py +++ b/grizzly/common/status.py @@ -19,7 +19,7 @@ __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] -LOG = getLogger("status") +LOG = getLogger(__name__) class Status(object): diff --git a/grizzly/main.py b/grizzly/main.py index 081d0f8a..21de9186 100644 --- a/grizzly/main.py +++ b/grizzly/main.py @@ -20,7 +20,7 @@ __credits__ = ["Tyson Smith", "Jesse Schwartzentruber"] -log = getLogger("grizzly") # pylint: disable=invalid-name +log = getLogger(__name__) # pylint: disable=invalid-name def configure_logging(log_level): if log_level == DEBUG: diff --git a/grizzly/reduce/bucket.py b/grizzly/reduce/bucket.py index a64ed40c..edb8e73f 100644 --- a/grizzly/reduce/bucket.py +++ b/grizzly/reduce/bucket.py @@ -15,7 +15,7 @@ from .crash import CrashReductionJob -LOG = logging.getLogger("grizzly.reduce.bucket") +LOG = logging.getLogger(__name__) def bucket_crashes(bucket_id, quality_filter): diff --git a/grizzly/reduce/crash.py b/grizzly/reduce/crash.py index 0acd61ca..dd17a6d4 100644 --- a/grizzly/reduce/crash.py +++ b/grizzly/reduce/crash.py @@ -15,7 +15,7 @@ from ..common import FuzzManagerReporter -LOG = logging.getLogger("grizzly.reduce.crash") +LOG = logging.getLogger(__name__) def crashentry_data(crash_id, raw=False): diff --git a/grizzly/reduce/reduce.py b/grizzly/reduce/reduce.py index bced7f70..cede6ced 100644 --- a/grizzly/reduce/reduce.py +++ b/grizzly/reduce/reduce.py @@ -39,7 +39,7 @@ __credits__ = ["Tyson Smith", "Jesse Schwartzentruber", "Jason Kratzer"] -LOG = logging.getLogger("grizzly.reduce") +LOG = logging.getLogger(__name__) class LithiumInterestingProxy(object): diff --git a/grizzly/reduce/strategies.py b/grizzly/reduce/strategies.py index 528d677d..22263407 100644 --- a/grizzly/reduce/strategies.py +++ b/grizzly/reduce/strategies.py @@ -23,7 +23,7 @@ from . import testcase_contents -LOG = logging.getLogger("grizzly.reduce.strategies") +LOG = logging.getLogger(__name__) class ReduceStage(metaclass=abc.ABCMeta): diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 20f5bb02..b9b676c7 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -23,7 +23,7 @@ __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] -LOG = getLogger("replay") +LOG = getLogger(__name__) class ReplayResult(object): diff --git a/grizzly/session.py b/grizzly/session.py index b4b4fa7f..4f5addc6 100644 --- a/grizzly/session.py +++ b/grizzly/session.py @@ -17,7 +17,7 @@ __credits__ = ["Tyson Smith", "Jesse Schwartzentruber"] -log = getLogger("grizzly") # pylint: disable=invalid-name +log = getLogger(__name__) # pylint: disable=invalid-name class SessionError(Exception): diff --git a/grizzly/target/__init__.py b/grizzly/target/__init__.py index e97b91ac..5b6e6661 100644 --- a/grizzly/target/__init__.py +++ b/grizzly/target/__init__.py @@ -15,7 +15,7 @@ __credits__ = ["Tyson Smith", "Jesse Schwartzentruber"] TARGETS = None -LOG = getLogger("grizzly") +LOG = getLogger(__name__) def _load_targets(): diff --git a/grizzly/target/puppet_target.py b/grizzly/target/puppet_target.py index 1382d839..9e3e6fc7 100644 --- a/grizzly/target/puppet_target.py +++ b/grizzly/target/puppet_target.py @@ -25,7 +25,7 @@ __author__ = "Tyson Smith" __credits__ = ["Tyson Smith", "Jesse Schwartzentruber"] -LOG = getLogger("puppet_target") +LOG = getLogger(__name__) class PuppetTarget(Target): diff --git a/grizzly/target/target.py b/grizzly/target/target.py index 7bd0ac16..6ad91bc4 100644 --- a/grizzly/target/target.py +++ b/grizzly/target/target.py @@ -15,7 +15,7 @@ __author__ = "Tyson Smith" __credits__ = ["Tyson Smith", "Jesse Schwartzentruber"] -LOG = getLogger("grizzly") +LOG = getLogger(__name__) def sanitizer_opts(env_data): diff --git a/loki/loki.py b/loki/loki.py index eb2773bb..536e9749 100644 --- a/loki/loki.py +++ b/loki/loki.py @@ -15,7 +15,7 @@ __author__ = "Tyson Smith" -LOG = logging.getLogger("loki") +LOG = logging.getLogger(__name__) class Loki(object): diff --git a/sapphire/conftest.py b/sapphire/conftest.py index 6090eba6..97877c76 100644 --- a/sapphire/conftest.py +++ b/sapphire/conftest.py @@ -18,7 +18,7 @@ import pytest -LOG = logging.getLogger("sphr_test") +LOG = logging.getLogger(__name__) @pytest.fixture diff --git a/sapphire/core.py b/sapphire/core.py index 0efd5372..5c89ca24 100644 --- a/sapphire/core.py +++ b/sapphire/core.py @@ -21,7 +21,7 @@ __credits__ = ["Tyson Smith"] -LOG = logging.getLogger("sapphire") +LOG = logging.getLogger(__name__) class Sapphire(object): diff --git a/sapphire/sapphire_job.py b/sapphire/sapphire_job.py index 789713d9..c29b8d40 100644 --- a/sapphire/sapphire_job.py +++ b/sapphire/sapphire_job.py @@ -19,7 +19,7 @@ __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] -LOG = getLogger("sphr_job") +LOG = getLogger(__name__) Tracker = namedtuple("Tracker", "files lock") diff --git a/sapphire/sapphire_load_manager.py b/sapphire/sapphire_load_manager.py index 9c735955..02236a70 100644 --- a/sapphire/sapphire_load_manager.py +++ b/sapphire/sapphire_load_manager.py @@ -13,7 +13,7 @@ __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] -LOG = getLogger("sphr_loadmgr") +LOG = getLogger(__name__) class SapphireLoadManager(object): diff --git a/sapphire/sapphire_worker.py b/sapphire/sapphire_worker.py index 90a3d6db..6bc46d5a 100644 --- a/sapphire/sapphire_worker.py +++ b/sapphire/sapphire_worker.py @@ -20,7 +20,7 @@ __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] -LOG = getLogger("sphr_worker") +LOG = getLogger(__name__) class SapphireWorkerError(Exception): diff --git a/sapphire/server_map.py b/sapphire/server_map.py index 0a48111c..8ada3433 100644 --- a/sapphire/server_map.py +++ b/sapphire/server_map.py @@ -12,7 +12,7 @@ __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] -LOG = logging.getLogger("sphr_map") # pylint: disable=invalid-name +LOG = logging.getLogger(__name__) # pylint: disable=invalid-name class InvalidURLError(Exception): From 1ba060ff2e16aafa8b95ebb61fb2fb0cb68cdc24 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 1 Oct 2020 12:30:11 -0700 Subject: [PATCH 027/531] Update debug log format --- grizzly/__main__.py | 3 +-- grizzly/main.py | 6 ++++-- sapphire/__main__.py | 6 ++++-- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/grizzly/__main__.py b/grizzly/__main__.py index eb579e83..1dbd9537 100644 --- a/grizzly/__main__.py +++ b/grizzly/__main__.py @@ -20,8 +20,7 @@ # is where basicConfig should be called). if getenv("DEBUG"): basicConfig( - format="%(levelname).1s %(name)s [%(asctime)s] %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", + format="%(asctime)s %(levelname).1s %(name)s | %(message)s", level=DEBUG) # load Adapters load() diff --git a/grizzly/main.py b/grizzly/main.py index 21de9186..32abd73c 100644 --- a/grizzly/main.py +++ b/grizzly/main.py @@ -24,10 +24,12 @@ def configure_logging(log_level): if log_level == DEBUG: - log_fmt = "%(levelname).1s %(name)s [%(asctime)s] %(message)s" + date_fmt = None + log_fmt = "%(asctime)s %(levelname).1s %(name)s | %(message)s" else: + date_fmt = "%Y-%m-%d %H:%M:%S" log_fmt = "[%(asctime)s] %(message)s" - basicConfig(format=log_fmt, datefmt="%Y-%m-%d %H:%M:%S", level=log_level) + basicConfig(format=log_fmt, datefmt=date_fmt, level=log_level) def main(args): configure_logging(args.log_level) diff --git a/sapphire/__main__.py b/sapphire/__main__.py index 6f813305..a5e81d15 100644 --- a/sapphire/__main__.py +++ b/sapphire/__main__.py @@ -11,10 +11,12 @@ def configure_logging(log_level): if log_level == DEBUG: - log_fmt = "%(levelname).1s %(name)s [%(asctime)s] %(message)s" + date_fmt = None + log_fmt = "%(asctime)s %(levelname).1s %(name)s | %(message)s" else: + date_fmt = "%Y-%m-%d %H:%M:%S" log_fmt = "[%(asctime)s] %(message)s" - basicConfig(format=log_fmt, datefmt="%Y-%m-%d %H:%M:%S", level=log_level) + basicConfig(format=log_fmt, datefmt=date_fmt, level=log_level) def parse_args(argv=None): # log levels for console logging From 0eb4d913de98321710acec969c82b154bdcc3264 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 8 Oct 2020 17:51:43 -0700 Subject: [PATCH 028/531] [ci] Add Python3.9 --- .travis.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 0abee774..3cf0f519 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,6 +7,7 @@ python: - 3.6 - 3.7 - 3.8 + - 3.9 jobs: include: - os: osx @@ -15,8 +16,10 @@ jobs: - os: windows language: shell before_install: - - choco install python + - choco install python --version 3.8 env: PATH=/c/Python38:/c/Python38/Scripts:$PATH + allow_failures: + - python: 3.9 before_install: - pip3 install --upgrade setuptools pip install: From dc5300937453cd186ee1903c5d42870e4937d0a9 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Fri, 9 Oct 2020 10:56:58 -0700 Subject: [PATCH 029/531] Remove invalid assertion --- grizzly/common/storage.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/grizzly/common/storage.py b/grizzly/common/storage.py index 38dfc373..3fa30bbd 100644 --- a/grizzly/common/storage.py +++ b/grizzly/common/storage.py @@ -404,16 +404,15 @@ def purge_optional(self, keep): Returns: None """ + opt_files = tuple(x.file_name for x in self._files.optional) + if not opt_files: + # nothing to purge + return None # filter required files from opt_files files to keep keep_opt = list() for fname in set(keep): if fname not in (x.file_name for x in self._files.required): keep_opt.append(fname) - opt_files = tuple(x.file_name for x in self._files.optional) - if not opt_files: - assert not keep_opt - # nothing to purge - return None # sanity check keep (cannot remove file that does not exist) assert all(fname in opt_files for fname in keep_opt) # purge From 0279df1f54015b5e99c68ff40584af1d0b1a59ac Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 1 Oct 2020 13:30:56 -0700 Subject: [PATCH 030/531] [sapphire] Update imports --- sapphire/core.py | 37 ++++++++++++------------ sapphire/sapphire_load_manager.py | 32 ++++++++++----------- sapphire/server_map.py | 19 ++++++------ sapphire/test_sapphire.py | 6 ++-- sapphire/test_sapphire_load_manager.py | 40 +++++++++++++------------- 5 files changed, 66 insertions(+), 68 deletions(-) diff --git a/sapphire/core.py b/sapphire/core.py index 5c89ca24..8bb37ea6 100644 --- a/sapphire/core.py +++ b/sapphire/core.py @@ -1,16 +1,17 @@ # coding=utf-8 -""" -Sapphire HTTP server -""" # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -import errno -import logging -import os -import random -import socket -import time +""" +Sapphire HTTP server +""" +from errno import EADDRINUSE +from logging import getLogger +from os.path import abspath +from random import randint +from socket import AF_INET, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR +from socket import error as sock_error, gethostname, socket +from time import sleep from .sapphire_job import SapphireJob from .sapphire_load_manager import SapphireLoadManager @@ -21,7 +22,7 @@ __credits__ = ["Tyson Smith"] -LOG = logging.getLogger(__name__) +LOG = getLogger(__name__) class Sapphire(object): @@ -46,19 +47,19 @@ def _create_listening_socket(allow_remote, requested_port, retries=20): for retry in reversed(range(retries)): sock = None try: - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock = socket(AF_INET, SOCK_STREAM) + sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) sock.settimeout(0.25) # find an unused port and avoid blocked ports # see: dxr.mozilla.org/mozilla-central/source/netwerk/base/nsIOService.cpp - port = random.randint(0x2000, 0xFFFF) if requested_port is None else requested_port + port = requested_port or randint(0x2000, 0xFFFF) sock.bind(("0.0.0.0" if allow_remote else "127.0.0.1", port)) sock.listen(5) - except (OSError, socket.error) as soc_e: + except (OSError, sock_error) as soc_e: if sock is not None: sock.close() - if retry > 1 and soc_e.errno in (errno.EADDRINUSE, 10013): - time.sleep(0.1) + if retry > 1 and soc_e.errno in (EADDRINUSE, 10013): + sleep(0.1) continue raise break @@ -131,8 +132,8 @@ def main(cls, args): with cls(allow_remote=args.remote, port=args.port, timeout=args.timeout) as serv: LOG.info( "Serving %r @ http://%s:%d/", - os.path.abspath(args.path), - socket.gethostname() if args.remote else "127.0.0.1", + abspath(args.path), + gethostname() if args.remote else "127.0.0.1", serv.port) status = serv.serve_path(args.path)[0] if status == SERVED_ALL: diff --git a/sapphire/sapphire_load_manager.py b/sapphire/sapphire_load_manager.py index 02236a70..36fd97a7 100644 --- a/sapphire/sapphire_load_manager.py +++ b/sapphire/sapphire_load_manager.py @@ -3,10 +3,10 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from logging import getLogger -import sys -import threading -import time -import traceback +from sys import exc_info +from threading import active_count, Thread, ThreadError +from time import sleep, time +from traceback import format_exception from .sapphire_worker import SapphireWorker @@ -44,14 +44,14 @@ def close(self): exc_type, exc_obj, exc_tb = self._job.exceptions.get() LOG.error( "Unexpected exception:\n%s", - "".join(traceback.format_exception(exc_type, exc_obj, exc_tb))) + "".join(format_exception(exc_type, exc_obj, exc_tb))) # re-raise exception from worker once all workers are closed raise exc_obj def start(self): assert self._job.pending # create the listener thread to handle incoming requests - listener = threading.Thread( + listener = Thread( target=self.listener, args=(self._socket, self._job, self._workers), kwargs={"shutdown_delay": self.SHUTDOWN_DELAY}) @@ -59,12 +59,12 @@ def start(self): for retry in reversed(range(10)): try: listener.start() - except threading.ThreadError: + except ThreadError: # thread errors can be due to low system resources while fuzzing - LOG.warning("ThreadError (listener), threads: %d", threading.active_count()) + LOG.warning("ThreadError (listener), threads: %d", active_count()) if retry < 1: raise - time.sleep(1) + sleep(1) continue self._listener = listener break @@ -72,7 +72,7 @@ def start(self): def wait(self, timeout, continue_cb=None, poll=0.5): assert self._listener is not None if timeout > 0: - deadline = time.time() + timeout + deadline = time() + timeout else: deadline = None if continue_cb is not None and not callable(continue_cb): @@ -81,7 +81,7 @@ def wait(self, timeout, continue_cb=None, poll=0.5): # the total iteration rate of Grizzly while not self._job.is_complete(wait=poll): # check for a timeout - if deadline and deadline <= time.time(): + if deadline and deadline <= time(): return False # check if callback returns False if continue_cb is not None and not continue_cb(): @@ -118,25 +118,25 @@ def listener(serv_sock, serv_job, max_workers, shutdown_delay=0): pool_size = len(worker_pool) if pool_size < max_workers: break - time.sleep(0.5) # pragma: no cover + sleep(0.5) # pragma: no cover else: # pragma: no cover # this should never happen raise RuntimeError("Failed to trim worker pool!") LOG.debug("trimmed worker pool (size: %d)", pool_size) except Exception: # pylint: disable=broad-except if serv_job.exceptions.empty(): - serv_job.exceptions.put(sys.exc_info()) + serv_job.exceptions.put(exc_info()) serv_job.finish() finally: LOG.debug("listener cleaning up workers") - deadline = time.time() + shutdown_delay - while time.time() < deadline: + deadline = time() + shutdown_delay + while time() < deadline: worker_pool = list(w for w in worker_pool if not w.done) if not worker_pool: break # avoid cutting off connections LOG.debug("waiting for %d worker(s)...", len(worker_pool)) - time.sleep(0.1) + sleep(0.1) else: # pragma: no cover LOG.debug("closing remaining workers") for worker in (w for w in worker_pool if not w.done): diff --git a/sapphire/server_map.py b/sapphire/server_map.py index 8ada3433..47fb181c 100644 --- a/sapphire/server_map.py +++ b/sapphire/server_map.py @@ -3,16 +3,15 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -import logging -import os -import re - +from logging import getLogger +from os.path import abspath, isdir, relpath +from re import search as re_search __all__ = ("Resource", "ServerMap") __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] -LOG = logging.getLogger(__name__) # pylint: disable=invalid-name +LOG = getLogger(__name__) # pylint: disable=invalid-name class InvalidURLError(Exception): @@ -50,7 +49,7 @@ def __init__(self): def _check_url(url): # check and sanitize URL url = url.strip("/") - if re.search(r"\W", url) is not None: + if re_search(r"\W", url) is not None: raise InvalidURLError("Only alpha-numeric characters accepted in URL.") return url @@ -70,11 +69,11 @@ def set_dynamic_response(self, url, callback, mime_type="application/octet-strea def set_include(self, url, target_path): url = self._check_url(url) - if not os.path.isdir(target_path): + if not isdir(target_path): raise IOError("Include path not found: %s" % (target_path,)) if url in self.dynamic or url in self.redirect: raise MapCollisionError("URL collision on %r" % (url,)) - target_path = os.path.abspath(target_path) + target_path = abspath(target_path) # sanity check to prevent mapping overlapping paths # Note: This was added to help map file served via includes back to # the files on disk. This is a temporary workaround until mapping of @@ -83,10 +82,10 @@ def set_include(self, url, target_path): if url == existing_url: # allow overwriting entry continue - if not os.path.relpath(target_path, resource.target).startswith(".."): + if not relpath(target_path, resource.target).startswith(".."): LOG.error("%r mapping includes path %r", existing_url, target_path) raise MapCollisionError("%r and %r include %r" % (url, existing_url, target_path)) - if not os.path.relpath(resource.target, target_path).startswith(".."): + if not relpath(resource.target, target_path).startswith(".."): LOG.error("%r mapping includes path %r", url, resource.target) raise MapCollisionError("%r and %r include %r" % (url, existing_url, resource.target)) LOG.debug("mapping include %r -> %r", url, target_path) diff --git a/sapphire/test_sapphire.py b/sapphire/test_sapphire.py index 0d82cd24..793f1812 100644 --- a/sapphire/test_sapphire.py +++ b/sapphire/test_sapphire.py @@ -11,8 +11,6 @@ import pytest -from grizzly.common import TestCase - from .core import Sapphire from .sapphire_worker import SapphireWorker from .server_map import ServerMap @@ -600,8 +598,8 @@ def test_sapphire_30(client, tmp_path): def test_sapphire_31(mocker): """test Sapphire._create_listening_socket()""" - fake_sleep = mocker.patch("sapphire.core.time.sleep", autospec=True) - fake_sock = mocker.patch("sapphire.core.socket.socket", autospec=True) + fake_sleep = mocker.patch("sapphire.core.sleep", autospec=True) + fake_sock = mocker.patch("sapphire.core.socket", autospec=True) assert Sapphire._create_listening_socket(False, None) assert fake_sock.return_value.close.call_count == 0 assert fake_sock.return_value.setsockopt.call_count == 1 diff --git a/sapphire/test_sapphire_load_manager.py b/sapphire/test_sapphire_load_manager.py index 6f5670f5..96785cfe 100644 --- a/sapphire/test_sapphire_load_manager.py +++ b/sapphire/test_sapphire_load_manager.py @@ -4,10 +4,10 @@ """ # pylint: disable=protected-access -import socket -import threading +from socket import socket +from threading import ThreadError -import pytest +from pytest import raises from .sapphire_load_manager import SapphireLoadManager from .sapphire_job import SapphireJob @@ -17,9 +17,9 @@ def test_sapphire_load_manager_01(mocker, tmp_path): """test basic SapphireLoadManager""" (tmp_path / "testfile").write_bytes(b"test") job = SapphireJob(str(tmp_path)) - clnt_sock = mocker.Mock(spec=socket.socket) + clnt_sock = mocker.Mock(spec=socket) clnt_sock.recv.return_value = b"GET /testfile HTTP/1.1" - serv_sock = mocker.Mock(spec=socket.socket) + serv_sock = mocker.Mock(spec=socket) serv_sock.accept.return_value = (clnt_sock, None) assert not job.is_complete() with SapphireLoadManager(job, serv_sock) as loadmgr: @@ -31,13 +31,13 @@ def test_sapphire_load_manager_01(mocker, tmp_path): def test_sapphire_load_manager_02(mocker): """test SapphireLoadManager.start() failure""" - mocker.patch("sapphire.sapphire_load_manager.time.sleep", autospec=True) - fake_thread = mocker.patch("sapphire.sapphire_load_manager.threading.Thread", autospec=True) - fake_thread.return_value.start.side_effect = threading.ThreadError + mocker.patch("sapphire.sapphire_load_manager.sleep", autospec=True) + fake_thread = mocker.patch("sapphire.sapphire_load_manager.Thread", autospec=True) + fake_thread.return_value.start.side_effect = ThreadError job = mocker.Mock(spec=SapphireJob) job.pending = True loadmgr = SapphireLoadManager(job, None) - with pytest.raises(threading.ThreadError): + with raises(ThreadError): loadmgr.start() loadmgr.close() assert job.is_complete() @@ -48,7 +48,7 @@ def test_sapphire_load_manager_03(mocker, tmp_path): (tmp_path / "test2").touch() (tmp_path / "test3").touch() job = SapphireJob(str(tmp_path)) - clnt_sock = mocker.Mock(spec=socket.socket) + clnt_sock = mocker.Mock(spec=socket) clnt_sock.recv.side_effect = ( b"GET /test1 HTTP/1.1", b"GET /missing HTTP/1.1", @@ -58,7 +58,7 @@ def test_sapphire_load_manager_03(mocker, tmp_path): b"GET /test1 HTTP/1.1", b"GET /test1 HTTP/1.1", b"GET /test3 HTTP/1.1") - serv_sock = mocker.Mock(spec=socket.socket) + serv_sock = mocker.Mock(spec=socket) serv_sock.accept.return_value = (clnt_sock, None) assert not job.is_complete() with SapphireLoadManager(job, serv_sock, max_workers=2) as loadmgr: @@ -70,20 +70,20 @@ def test_sapphire_load_manager_04(mocker, tmp_path): """test SapphireLoadManager.wait()""" (tmp_path / "test1").touch() job = SapphireJob(str(tmp_path)) - clnt_sock = mocker.Mock(spec=socket.socket) + clnt_sock = mocker.Mock(spec=socket) clnt_sock.recv.return_value = b"" - serv_sock = mocker.Mock(spec=socket.socket) + serv_sock = mocker.Mock(spec=socket) serv_sock.accept.return_value = (clnt_sock, None) with SapphireLoadManager(job, serv_sock, max_workers=10) as loadmgr: # invalid callback - with pytest.raises(TypeError, match="continue_cb must be callable"): + with raises(TypeError, match="continue_cb must be callable"): loadmgr.wait(0, continue_cb="test") # callback abort assert loadmgr.wait(1, continue_cb=lambda: False, poll=0.01) # timeout job = SapphireJob(str(tmp_path)) fake_time = mocker.patch("sapphire.sapphire_load_manager.time", autospec=True) - fake_time.time.side_effect = (1, 2, 3) + fake_time.side_effect = (1, 2, 3) with SapphireLoadManager(job, serv_sock, max_workers=10) as loadmgr: assert not loadmgr.wait(1, continue_cb=lambda: False, poll=0.01) @@ -91,11 +91,11 @@ def test_sapphire_load_manager_05(mocker, tmp_path): """test SapphireLoadManager re-raise worker exceptions""" (tmp_path / "test1").touch() job = SapphireJob(str(tmp_path)) - clnt_sock = mocker.Mock(spec=socket.socket) + clnt_sock = mocker.Mock(spec=socket) clnt_sock.recv.side_effect = Exception("worker exception") - serv_sock = mocker.Mock(spec=socket.socket) + serv_sock = mocker.Mock(spec=socket) serv_sock.accept.return_value = (clnt_sock, None) - with pytest.raises(Exception, match="worker exception"): + with raises(Exception, match="worker exception"): with SapphireLoadManager(job, serv_sock) as loadmgr: loadmgr.wait(1) assert clnt_sock.close.call_count == 1 @@ -106,9 +106,9 @@ def test_sapphire_load_manager_06(mocker, tmp_path): """test SapphireLoadManager re-raise launcher exceptions""" (tmp_path / "test1").touch() job = SapphireJob(str(tmp_path)) - serv_sock = mocker.Mock(spec=socket.socket) + serv_sock = mocker.Mock(spec=socket) serv_sock.accept.side_effect = Exception("launcher exception") - with pytest.raises(Exception, match="launcher exception"): + with raises(Exception, match="launcher exception"): with SapphireLoadManager(job, serv_sock) as loadmgr: loadmgr.wait(1) assert job.is_complete() From cd3e47792ce2525355b400e3dc6fdcdec567807a Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 1 Oct 2020 13:54:33 -0700 Subject: [PATCH 031/531] [sapphire] Modules and Classes --- ..._load_manager.py => connection_manager.py} | 6 +- sapphire/core.py | 8 +- sapphire/{sapphire_job.py => job.py} | 2 +- ..._manager.py => test_connection_manager.py} | 64 ++++++++-------- .../{test_sapphire_job.py => test_job.py} | 76 +++++++++---------- sapphire/test_sapphire.py | 10 +-- ...test_sapphire_worker.py => test_worker.py} | 56 +++++++------- sapphire/{sapphire_worker.py => worker.py} | 10 +-- 8 files changed, 116 insertions(+), 116 deletions(-) rename sapphire/{sapphire_load_manager.py => connection_manager.py} (97%) rename sapphire/{sapphire_job.py => job.py} (99%) rename sapphire/{test_sapphire_load_manager.py => test_connection_manager.py} (61%) rename sapphire/{test_sapphire_job.py => test_job.py} (82%) rename sapphire/{test_sapphire_worker.py => test_worker.py} (65%) rename sapphire/{sapphire_worker.py => worker.py} (97%) diff --git a/sapphire/sapphire_load_manager.py b/sapphire/connection_manager.py similarity index 97% rename from sapphire/sapphire_load_manager.py rename to sapphire/connection_manager.py index 36fd97a7..3d1035d4 100644 --- a/sapphire/sapphire_load_manager.py +++ b/sapphire/connection_manager.py @@ -8,7 +8,7 @@ from time import sleep, time from traceback import format_exception -from .sapphire_worker import SapphireWorker +from .worker import Worker __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] @@ -16,7 +16,7 @@ LOG = getLogger(__name__) -class SapphireLoadManager(object): +class ConnectionManager(object): SHUTDOWN_DELAY = 0.5 # allow extra time before closing socket if needed __slots__ = ("_job", "_listener", "_socket", "_workers") @@ -100,7 +100,7 @@ def listener(serv_sock, serv_job, max_workers, shutdown_delay=0): while not serv_job.is_complete(): if not serv_job.accepting.wait(0.05): continue - worker = SapphireWorker.launch(serv_sock, serv_job) + worker = Worker.launch(serv_sock, serv_job) if worker is not None: worker_pool.append(worker) pool_size += 1 diff --git a/sapphire/core.py b/sapphire/core.py index 8bb37ea6..6742c7cb 100644 --- a/sapphire/core.py +++ b/sapphire/core.py @@ -13,8 +13,8 @@ from socket import error as sock_error, gethostname, socket from time import sleep -from .sapphire_job import SapphireJob -from .sapphire_load_manager import SapphireLoadManager +from .job import Job +from .connection_manager import ConnectionManager from .status_codes import SERVED_ALL, SERVED_NONE, SERVED_TIMEOUT @@ -100,7 +100,7 @@ def serve_path(self, path, continue_cb=None, forever=False, optional_files=None, files served is a list of the files that were served """ LOG.debug("serving %r (forever=%r)", path, forever) - job = SapphireJob( + job = Job( path, auto_close=self._auto_close, forever=forever, @@ -110,7 +110,7 @@ def serve_path(self, path, continue_cb=None, forever=False, optional_files=None, job.finish() LOG.debug("nothing to serve") return (SERVED_NONE, tuple()) - with SapphireLoadManager(job, self._socket, self._max_workers) as loadmgr: + with ConnectionManager(job, self._socket, self._max_workers) as loadmgr: was_timeout = not loadmgr.wait(self.timeout, continue_cb=continue_cb) LOG.debug("status: %r, timeout: %r", job.status, was_timeout) return (SERVED_TIMEOUT if was_timeout else job.status, tuple(job.served)) diff --git a/sapphire/sapphire_job.py b/sapphire/job.py similarity index 99% rename from sapphire/sapphire_job.py rename to sapphire/job.py index c29b8d40..ddea30b9 100644 --- a/sapphire/sapphire_job.py +++ b/sapphire/job.py @@ -25,7 +25,7 @@ Tracker = namedtuple("Tracker", "files lock") -class SapphireJob(object): +class Job(object): # MIME_MAP is used to support new or uncommon mime types. # Definitions in here take priority over mimetypes.guess_type(). MIME_MAP = { diff --git a/sapphire/test_sapphire_load_manager.py b/sapphire/test_connection_manager.py similarity index 61% rename from sapphire/test_sapphire_load_manager.py rename to sapphire/test_connection_manager.py index 96785cfe..c9c3c3a8 100644 --- a/sapphire/test_sapphire_load_manager.py +++ b/sapphire/test_connection_manager.py @@ -1,6 +1,6 @@ # coding=utf-8 """ -SapphireLoadManager unit tests +ConnectionManager unit tests """ # pylint: disable=protected-access @@ -9,45 +9,45 @@ from pytest import raises -from .sapphire_load_manager import SapphireLoadManager -from .sapphire_job import SapphireJob +from .connection_manager import ConnectionManager +from .job import Job -def test_sapphire_load_manager_01(mocker, tmp_path): - """test basic SapphireLoadManager""" +def test_connection_manager_01(mocker, tmp_path): + """test basic ConnectionManager""" (tmp_path / "testfile").write_bytes(b"test") - job = SapphireJob(str(tmp_path)) + job = Job(str(tmp_path)) clnt_sock = mocker.Mock(spec=socket) clnt_sock.recv.return_value = b"GET /testfile HTTP/1.1" serv_sock = mocker.Mock(spec=socket) serv_sock.accept.return_value = (clnt_sock, None) assert not job.is_complete() - with SapphireLoadManager(job, serv_sock) as loadmgr: + with ConnectionManager(job, serv_sock) as loadmgr: assert loadmgr.wait(1) assert clnt_sock.close.call_count == 1 assert job.is_complete() assert not job.accepting.is_set() assert job.exceptions.empty() -def test_sapphire_load_manager_02(mocker): - """test SapphireLoadManager.start() failure""" - mocker.patch("sapphire.sapphire_load_manager.sleep", autospec=True) - fake_thread = mocker.patch("sapphire.sapphire_load_manager.Thread", autospec=True) +def test_connection_manager_02(mocker): + """test ConnectionManager.start() failure""" + mocker.patch("sapphire.connection_manager.sleep", autospec=True) + fake_thread = mocker.patch("sapphire.connection_manager.Thread", autospec=True) fake_thread.return_value.start.side_effect = ThreadError - job = mocker.Mock(spec=SapphireJob) + job = mocker.Mock(spec=Job) job.pending = True - loadmgr = SapphireLoadManager(job, None) + loadmgr = ConnectionManager(job, None) with raises(ThreadError): loadmgr.start() loadmgr.close() assert job.is_complete() -def test_sapphire_load_manager_03(mocker, tmp_path): - """test SapphireLoadManager multiple files and requests""" +def test_connection_manager_03(mocker, tmp_path): + """test ConnectionManager multiple files and requests""" (tmp_path / "test1").touch() (tmp_path / "test2").touch() (tmp_path / "test3").touch() - job = SapphireJob(str(tmp_path)) + job = Job(str(tmp_path)) clnt_sock = mocker.Mock(spec=socket) clnt_sock.recv.side_effect = ( b"GET /test1 HTTP/1.1", @@ -61,55 +61,55 @@ def test_sapphire_load_manager_03(mocker, tmp_path): serv_sock = mocker.Mock(spec=socket) serv_sock.accept.return_value = (clnt_sock, None) assert not job.is_complete() - with SapphireLoadManager(job, serv_sock, max_workers=2) as loadmgr: + with ConnectionManager(job, serv_sock, max_workers=2) as loadmgr: assert loadmgr.wait(1) assert clnt_sock.close.call_count == 8 assert job.is_complete() -def test_sapphire_load_manager_04(mocker, tmp_path): - """test SapphireLoadManager.wait()""" +def test_connection_manager_04(mocker, tmp_path): + """test ConnectionManager.wait()""" (tmp_path / "test1").touch() - job = SapphireJob(str(tmp_path)) + job = Job(str(tmp_path)) clnt_sock = mocker.Mock(spec=socket) clnt_sock.recv.return_value = b"" serv_sock = mocker.Mock(spec=socket) serv_sock.accept.return_value = (clnt_sock, None) - with SapphireLoadManager(job, serv_sock, max_workers=10) as loadmgr: + with ConnectionManager(job, serv_sock, max_workers=10) as loadmgr: # invalid callback with raises(TypeError, match="continue_cb must be callable"): loadmgr.wait(0, continue_cb="test") # callback abort assert loadmgr.wait(1, continue_cb=lambda: False, poll=0.01) # timeout - job = SapphireJob(str(tmp_path)) - fake_time = mocker.patch("sapphire.sapphire_load_manager.time", autospec=True) + job = Job(str(tmp_path)) + fake_time = mocker.patch("sapphire.connection_manager.time", autospec=True) fake_time.side_effect = (1, 2, 3) - with SapphireLoadManager(job, serv_sock, max_workers=10) as loadmgr: + with ConnectionManager(job, serv_sock, max_workers=10) as loadmgr: assert not loadmgr.wait(1, continue_cb=lambda: False, poll=0.01) -def test_sapphire_load_manager_05(mocker, tmp_path): - """test SapphireLoadManager re-raise worker exceptions""" +def test_connection_manager_05(mocker, tmp_path): + """test ConnectionManager re-raise worker exceptions""" (tmp_path / "test1").touch() - job = SapphireJob(str(tmp_path)) + job = Job(str(tmp_path)) clnt_sock = mocker.Mock(spec=socket) clnt_sock.recv.side_effect = Exception("worker exception") serv_sock = mocker.Mock(spec=socket) serv_sock.accept.return_value = (clnt_sock, None) with raises(Exception, match="worker exception"): - with SapphireLoadManager(job, serv_sock) as loadmgr: + with ConnectionManager(job, serv_sock) as loadmgr: loadmgr.wait(1) assert clnt_sock.close.call_count == 1 assert job.is_complete() assert job.exceptions.empty() -def test_sapphire_load_manager_06(mocker, tmp_path): - """test SapphireLoadManager re-raise launcher exceptions""" +def test_connection_manager_06(mocker, tmp_path): + """test ConnectionManager re-raise launcher exceptions""" (tmp_path / "test1").touch() - job = SapphireJob(str(tmp_path)) + job = Job(str(tmp_path)) serv_sock = mocker.Mock(spec=socket) serv_sock.accept.side_effect = Exception("launcher exception") with raises(Exception, match="launcher exception"): - with SapphireLoadManager(job, serv_sock) as loadmgr: + with ConnectionManager(job, serv_sock) as loadmgr: loadmgr.wait(1) assert job.is_complete() assert job.exceptions.empty() diff --git a/sapphire/test_sapphire_job.py b/sapphire/test_job.py similarity index 82% rename from sapphire/test_sapphire_job.py rename to sapphire/test_job.py index 658dccdc..30cdbc8d 100644 --- a/sapphire/test_sapphire_job.py +++ b/sapphire/test_job.py @@ -1,6 +1,6 @@ # coding=utf-8 """ -SapphireJob unit tests +Job unit tests """ # pylint: disable=protected-access @@ -8,14 +8,14 @@ import pytest -from .sapphire_job import SapphireJob +from .job import Job from .server_map import Resource, ServerMap from .status_codes import SERVED_ALL, SERVED_NONE, SERVED_REQUEST -def test_sapphire_job_01(tmp_path): - """test creating an empty SapphireJob""" - job = SapphireJob(str(tmp_path)) +def test_job_01(tmp_path): + """test creating an empty Job""" + job = Job(str(tmp_path)) assert not job.forever assert job.status == SERVED_ALL assert job.check_request("") is None @@ -31,8 +31,8 @@ def test_sapphire_job_01(tmp_path): assert not any(job.served) assert job.is_complete() -def test_sapphire_job_02(tmp_path): - """test SapphireJob two required files and one optional file""" +def test_job_02(tmp_path): + """test Job two required files and one optional file""" opt_path = tmp_path / "opt_file.txt" opt_path.write_bytes(b"a") req1_path = tmp_path / "req_file_1.txt" @@ -40,7 +40,7 @@ def test_sapphire_job_02(tmp_path): (tmp_path / "test").mkdir() req2_path = tmp_path / "test" / "req_file_2.txt" req2_path.write_bytes(b"a") - job = SapphireJob(str(tmp_path), optional_files=[opt_path.name]) + job = Job(str(tmp_path), optional_files=[opt_path.name]) assert job.status == SERVED_NONE assert not job.is_complete() resource = job.check_request("req_file_1.txt") @@ -65,12 +65,12 @@ def test_sapphire_job_02(tmp_path): job.finish() assert job.is_complete() -def test_sapphire_job_03(tmp_path): - """test SapphireJob redirects""" +def test_job_03(tmp_path): + """test Job redirects""" smap = ServerMap() smap.set_redirect("one", "somefile.txt", required=False) smap.set_redirect("two", "reqfile.txt") - job = SapphireJob(str(tmp_path), server_map=smap) + job = Job(str(tmp_path), server_map=smap) assert job.status == SERVED_NONE resource = job.check_request("one") assert resource.type == Resource.URL_REDIRECT @@ -81,8 +81,8 @@ def test_sapphire_job_03(tmp_path): assert job.remove_pending("two") assert job.pending == 0 -def test_sapphire_job_04(mocker, tmp_path): - """test SapphireJob includes""" +def test_job_04(mocker, tmp_path): + """test Job includes""" srv_root = tmp_path / "root" srv_include = tmp_path / "test" srv_include_2 = tmp_path / "test_2" @@ -109,7 +109,7 @@ def test_sapphire_job_04(mocker, tmp_path): smap.include["testinc/1/2/3"] = Resource(Resource.URL_INCLUDE, str(srv_include)) smap.include[""] = Resource(Resource.URL_INCLUDE, str(srv_include)) smap.set_include("testinc/inc2", str(srv_include_2)) - job = SapphireJob(str(srv_root), server_map=smap) + job = Job(str(srv_root), server_map=smap) assert job.status == SERVED_NONE # test includes that map to 'srv_include' for incl, inc_path in smap.include.items(): @@ -133,8 +133,8 @@ def test_sapphire_job_04(mocker, tmp_path): assert not job.is_forbidden(str(srv_root / ".." / "test" / "test_file.txt")) assert not job.is_forbidden(str(srv_include / ".." / "root" / "req_file.txt")) -def test_sapphire_job_05(tmp_path): - """test SapphireJob.check_request() with tricky includes""" +def test_job_05(tmp_path): + """test Job.check_request() with tricky includes""" srv_root = tmp_path / "root" srv_root.mkdir() req = srv_root / "req_file.txt" @@ -149,7 +149,7 @@ def test_sapphire_job_05(tmp_path): # test url matching part of the file name smap = ServerMap() smap.include["inc"] = Resource(Resource.URL_INCLUDE, str(inc_dir)) - job = SapphireJob(str(srv_root), server_map=smap) + job = Job(str(srv_root), server_map=smap) resource = job.check_request("inc/sub/include.js") assert resource.type == Resource.URL_INCLUDE assert resource.target == str(inc_file1) @@ -179,12 +179,12 @@ def test_sapphire_job_05(tmp_path): #assert resource.type == Resource.URL_INCLUDE #assert resource.target == str(inc_c_d) -def test_sapphire_job_06(tmp_path): - """test SapphireJob dynamic""" +def test_job_06(tmp_path): + """test Job dynamic""" smap = ServerMap() smap.set_dynamic_response("cb1", lambda: 0, mime_type="mime_type") smap.set_dynamic_response("cb2", lambda: 1) - job = SapphireJob(str(tmp_path), server_map=smap) + job = Job(str(tmp_path), server_map=smap) assert job.status == SERVED_ALL assert job.pending == 0 resource = job.check_request("cb1") @@ -197,7 +197,7 @@ def test_sapphire_job_06(tmp_path): assert callable(resource.target) assert isinstance(resource.mime, str) -def test_sapphire_job_07(tmp_path): +def test_job_07(tmp_path): """test accessing forbidden files""" srv_root = tmp_path / "root" srv_root.mkdir() @@ -205,7 +205,7 @@ def test_sapphire_job_07(tmp_path): test_1.write_bytes(b"a") no_access = tmp_path / "no_access.txt" no_access.write_bytes(b"a") - job = SapphireJob(str(srv_root)) + job = Job(str(srv_root)) assert job.status == SERVED_NONE assert job.pending == 1 resource = job.check_request("../no_access.txt") @@ -216,35 +216,35 @@ def test_sapphire_job_07(tmp_path): @pytest.mark.skipif(platform.system() == "Windows", reason="Unsupported on Windows") -def test_sapphire_job_08(tmp_path): - """test SapphireJob with file names containing invalid characters""" +def test_job_08(tmp_path): + """test Job with file names containing invalid characters""" test_file = tmp_path / "test.txt" test_file.write_bytes(b"a") (tmp_path / "?_2.txt").write_bytes(b"a") - job = SapphireJob(str(tmp_path)) + job = Job(str(tmp_path)) assert job.status == SERVED_NONE assert job.pending == 1 assert job.check_request("test.txt").target == str(test_file) -def test_sapphire_job_09(): - """test SapphireJob with missing directory""" +def test_job_09(): + """test Job with missing directory""" with pytest.raises(OSError): - SapphireJob("missing") + Job("missing") -def test_sapphire_job_10(tmp_path): - """test SapphireJob.increment_served() and SapphireJob.served""" - job = SapphireJob(str(tmp_path)) +def test_job_10(tmp_path): + """test Job.increment_served() and Job.served""" + job = Job(str(tmp_path)) assert not any(job.served) job.increment_served(str(tmp_path / "file.bin")) assert "file.bin" in job.served job.increment_served("/some/include/path/inc.bin") assert "/some/include/path/inc.bin" in job.served -def test_sapphire_job_11(): - """test SapphireJob.lookup_mime()""" - assert SapphireJob.lookup_mime("unknown") == "application/octet-stream" - # look up from SapphireJob.MIME_MAP - assert ".avif" in SapphireJob.MIME_MAP, "test is broken" - assert SapphireJob.lookup_mime("test.avif") == "image/avif" +def test_job_11(): + """test Job.lookup_mime()""" + assert Job.lookup_mime("unknown") == "application/octet-stream" + # look up from Job.MIME_MAP + assert ".avif" in Job.MIME_MAP, "test is broken" + assert Job.lookup_mime("test.avif") == "image/avif" # look up known ext - assert SapphireJob.lookup_mime("test.html") == "text/html" + assert Job.lookup_mime("test.html") == "text/html" diff --git a/sapphire/test_sapphire.py b/sapphire/test_sapphire.py index 793f1812..36f3569e 100644 --- a/sapphire/test_sapphire.py +++ b/sapphire/test_sapphire.py @@ -12,7 +12,7 @@ import pytest from .core import Sapphire -from .sapphire_worker import SapphireWorker +from .worker import Worker from .server_map import ServerMap from .status_codes import SERVED_ALL, SERVED_NONE, SERVED_REQUEST, SERVED_TIMEOUT @@ -186,10 +186,10 @@ def is_running(): def test_sapphire_09(client, tmp_path): """test serving interesting sized files""" tests = [ - {"size": SapphireWorker.DEFAULT_TX_SIZE, "name": "even.html"}, - {"size": SapphireWorker.DEFAULT_TX_SIZE - 1, "name": "minus_one.html"}, - {"size": SapphireWorker.DEFAULT_TX_SIZE + 1, "name": "plus_one.html"}, - {"size": SapphireWorker.DEFAULT_TX_SIZE * 2, "name": "double.html"}, + {"size": Worker.DEFAULT_TX_SIZE, "name": "even.html"}, + {"size": Worker.DEFAULT_TX_SIZE - 1, "name": "minus_one.html"}, + {"size": Worker.DEFAULT_TX_SIZE + 1, "name": "plus_one.html"}, + {"size": Worker.DEFAULT_TX_SIZE * 2, "name": "double.html"}, {"size": 1, "name": "one.html"}, {"size": 0, "name": "zero.html"}, ] diff --git a/sapphire/test_sapphire_worker.py b/sapphire/test_worker.py similarity index 65% rename from sapphire/test_sapphire_worker.py rename to sapphire/test_worker.py index 03aad2b8..db9bf980 100644 --- a/sapphire/test_sapphire_worker.py +++ b/sapphire/test_worker.py @@ -9,14 +9,14 @@ import pytest -from .sapphire_job import SapphireJob -from .sapphire_worker import SapphireWorker, SapphireWorkerError +from .job import Job +from .worker import Worker, WorkerError -def test_sapphire_worker_01(mocker): - """test simple SapphireWorker in running state""" +def test_worker_01(mocker): + """test simple Worker in running state""" wthread = mocker.Mock(spec=threading.Thread) wthread.is_alive.return_value = True - worker = SapphireWorker(mocker.Mock(spec=socket.socket), wthread) + worker = Worker(mocker.Mock(spec=socket.socket), wthread) assert worker._conn is not None assert worker._thread is not None # it is assumed that launch() has already been called at this point @@ -33,44 +33,44 @@ def test_sapphire_worker_01(mocker): assert worker._thread is None assert worker.done -def test_sapphire_worker_02(mocker): - """test simple SapphireWorker fails to close""" - worker = SapphireWorker( +def test_worker_02(mocker): + """test simple Worker fails to close""" + worker = Worker( mocker.Mock(spec=socket.socket), mocker.Mock(spec=threading.Thread)) # it is assumed that launch() has already been called at this point worker._thread.is_alive.return_value = True - with pytest.raises(SapphireWorkerError, match="Worker thread failed to join!"): + with pytest.raises(WorkerError, match="Worker thread failed to join!"): worker.close() -def test_sapphire_worker_03(mocker): - """test SapphireWorker.launch() fail cases""" +def test_worker_03(mocker): + """test Worker.launch() fail cases""" serv_con = mocker.Mock(spec=socket.socket) - serv_job = mocker.Mock(spec=SapphireJob) - fake_thread = mocker.patch("sapphire.sapphire_worker.Thread", autospec=True) - mocker.patch("sapphire.sapphire_worker.sleep", autospec=True) + serv_job = mocker.Mock(spec=Job) + fake_thread = mocker.patch("sapphire.worker.Thread", autospec=True) + mocker.patch("sapphire.worker.sleep", autospec=True) serv_con.accept.side_effect = socket.timeout - assert SapphireWorker.launch(serv_con, serv_job) is None + assert Worker.launch(serv_con, serv_job) is None serv_con.accept.side_effect = None conn = mocker.Mock(spec=socket.socket) serv_con.accept.return_value = (conn, None) fake_thread.side_effect = threading.ThreadError - assert SapphireWorker.launch(serv_con, serv_job) is None + assert Worker.launch(serv_con, serv_job) is None assert conn.close.call_count == 1 assert serv_job.accepting.clear.call_count == 0 assert serv_job.accepting.set.call_count == 1 -def test_sapphire_worker_04(mocker, tmp_path): - """test SapphireWorker.launch()""" +def test_worker_04(mocker, tmp_path): + """test Worker.launch()""" (tmp_path / "testfile").touch() - job = SapphireJob(str(tmp_path)) + job = Job(str(tmp_path)) clnt_sock = mocker.Mock(spec=socket.socket) clnt_sock.recv.return_value = b"GET /testfile HTTP/1.1" serv_sock = mocker.Mock(spec=socket.socket) serv_sock.accept.return_value = (clnt_sock, None) - worker = SapphireWorker.launch(serv_sock, job) + worker = Worker.launch(serv_sock, job) assert worker is not None try: assert job.is_complete(wait=1) @@ -80,37 +80,37 @@ def test_sapphire_worker_04(mocker, tmp_path): assert serv_sock.accept.call_count == 1 assert clnt_sock.close.call_count == 2 -def test_sapphire_worker_05(mocker): - """test SapphireWorker.handle_request() socket errors""" +def test_worker_05(mocker): + """test Worker.handle_request() socket errors""" serv_con = mocker.Mock(spec=socket.socket) serv_con.recv.side_effect = socket.error - serv_job = mocker.Mock(spec=SapphireJob) - SapphireWorker.handle_request(serv_con, serv_job) + serv_job = mocker.Mock(spec=Job) + Worker.handle_request(serv_con, serv_job) assert serv_job.accepting.set.call_count == 1 assert serv_con.sendall.call_count == 0 assert serv_con.close.call_count == 1 def test_response_data_01(): """test _200_header()""" - output = SapphireWorker._200_header(10, "text/html") + output = Worker._200_header(10, "text/html") assert b"Content-Length: 10" in output assert b"Content-Type: text/html" in output def test_response_data_02(): """test _307_redirect()""" - output = SapphireWorker._307_redirect("http://some.test.url") + output = Worker._307_redirect("http://some.test.url") assert b"Location: http://some.test.url" in output def test_response_data_03(): """test _4xx_page() without close timeout""" - output = SapphireWorker._4xx_page(400, "Bad Request") + output = Worker._4xx_page(400, "Bad Request") assert b"Content-Length: " in output assert b"HTTP/1.1 400 Bad Request" in output assert b"400!" in output def test_response_data_04(): """test _4xx_page() with close timeout""" - output = SapphireWorker._4xx_page(404, "Not Found", close=10) + output = Worker._4xx_page(404, "Not Found", close=10) assert b"Content-Length: " in output assert b"HTTP/1.1 404 Not Found" in output assert b"" in output diff --git a/sapphire/sapphire_worker.py b/sapphire/worker.py similarity index 97% rename from sapphire/sapphire_worker.py rename to sapphire/worker.py index 6bc46d5a..5d892f21 100644 --- a/sapphire/sapphire_worker.py +++ b/sapphire/worker.py @@ -23,11 +23,11 @@ LOG = getLogger(__name__) -class SapphireWorkerError(Exception): - """Raised by SapphireWorker""" +class WorkerError(Exception): + """Raised by Worker""" -class SapphireWorker(object): +class Worker(object): DEFAULT_REQUEST_LIMIT = 0x1000 # 4KB DEFAULT_TX_SIZE = 0x10000 # 64KB REQ_PATTERN = re_compile(b"^GET\\s/(?P\\S*)\\sHTTP/1") @@ -77,7 +77,7 @@ def close(self): self.join(timeout=60) if self._thread is not None and self._thread.is_alive(): # this is here to catch unexpected hangs - raise SapphireWorkerError("Worker thread failed to join!") + raise WorkerError("Worker thread failed to join!") @property def done(self): @@ -115,7 +115,7 @@ def handle_request(cls, conn, serv_job): finish_job = serv_job.remove_pending(request) elif resource.type != Resource.URL_DYNAMIC: # pragma: no cover # this should never happen - raise SapphireWorkerError("Unknown resource type %r" % (resource.type,)) + raise WorkerError("Unknown resource type %r" % (resource.type,)) if finish_job and serv_job.forever: LOG.debug("serv_job.forever is set, resetting finish_job") From 4a45df176de7754bea5af6781ca25c76260b5204 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 1 Oct 2020 14:01:38 -0700 Subject: [PATCH 032/531] [sapphire] Add __slots__ to Sapphire --- sapphire/core.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sapphire/core.py b/sapphire/core.py index 6742c7cb..5a607329 100644 --- a/sapphire/core.py +++ b/sapphire/core.py @@ -26,6 +26,8 @@ class Sapphire(object): + __slots__ = ("_auto_close", "_max_workers", "_socket", "_timeout") + def __init__(self, allow_remote=False, auto_close=-1, max_workers=10, port=None, timeout=60): self._auto_close = auto_close # call 'window.close()' on 4xx error pages self._max_workers = max_workers # limit worker threads From bd1fcf088059e81215d93a2dfeb951a77c2cf673 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 1 Oct 2020 14:13:58 -0700 Subject: [PATCH 033/531] [sapphire] Move status codes to job.py --- sapphire/__init__.py | 2 +- sapphire/core.py | 3 +-- sapphire/job.py | 8 +++++++- sapphire/status_codes.py | 9 --------- sapphire/test_job.py | 3 +-- sapphire/test_sapphire.py | 2 +- 6 files changed, 11 insertions(+), 16 deletions(-) delete mode 100644 sapphire/status_codes.py diff --git a/sapphire/__init__.py b/sapphire/__init__.py index 7f4370ce..49c6cb9f 100644 --- a/sapphire/__init__.py +++ b/sapphire/__init__.py @@ -7,8 +7,8 @@ # file, You can obtain one at http://mozilla.org/MPL/2.0/. from .core import Sapphire +from .job import SERVED_ALL, SERVED_NONE, SERVED_REQUEST, SERVED_TIMEOUT from .server_map import ServerMap -from .status_codes import SERVED_ALL, SERVED_NONE, SERVED_REQUEST, SERVED_TIMEOUT __all__ = ("Sapphire", "SERVED_ALL", "SERVED_NONE", "SERVED_REQUEST", "SERVED_TIMEOUT", "ServerMap") __author__ = "Tyson Smith" diff --git a/sapphire/core.py b/sapphire/core.py index 5a607329..2df8cdf2 100644 --- a/sapphire/core.py +++ b/sapphire/core.py @@ -13,9 +13,8 @@ from socket import error as sock_error, gethostname, socket from time import sleep -from .job import Job +from .job import Job, SERVED_ALL, SERVED_NONE, SERVED_TIMEOUT from .connection_manager import ConnectionManager -from .status_codes import SERVED_ALL, SERVED_NONE, SERVED_TIMEOUT __author__ = "Tyson Smith" diff --git a/sapphire/job.py b/sapphire/job.py index ddea30b9..625f533a 100644 --- a/sapphire/job.py +++ b/sapphire/job.py @@ -14,7 +14,6 @@ from threading import Event, Lock from .server_map import Resource -from .status_codes import SERVED_ALL, SERVED_NONE, SERVED_REQUEST __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] @@ -22,6 +21,13 @@ LOG = getLogger(__name__) +# job status codes +SERVED_ALL = 0 # all expected requests for required files have been received +SERVED_NONE = 1 # no requests for required files have been received +SERVED_REQUEST = 2 # some requests for required files have been received +SERVED_TIMEOUT = 3 # timeout occurred + + Tracker = namedtuple("Tracker", "files lock") diff --git a/sapphire/status_codes.py b/sapphire/status_codes.py deleted file mode 100644 index ef95b4ce..00000000 --- a/sapphire/status_codes.py +++ /dev/null @@ -1,9 +0,0 @@ -# coding=utf-8 -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. - -SERVED_ALL = 0 # all expected requests for required files have been received -SERVED_NONE = 1 # no requests for required files have been received -SERVED_REQUEST = 2 # some requests for required files have been received -SERVED_TIMEOUT = 3 # timeout occurred diff --git a/sapphire/test_job.py b/sapphire/test_job.py index 30cdbc8d..205fb215 100644 --- a/sapphire/test_job.py +++ b/sapphire/test_job.py @@ -8,9 +8,8 @@ import pytest -from .job import Job +from .job import Job, SERVED_ALL, SERVED_NONE, SERVED_REQUEST from .server_map import Resource, ServerMap -from .status_codes import SERVED_ALL, SERVED_NONE, SERVED_REQUEST def test_job_01(tmp_path): diff --git a/sapphire/test_sapphire.py b/sapphire/test_sapphire.py index 36f3569e..41e0e752 100644 --- a/sapphire/test_sapphire.py +++ b/sapphire/test_sapphire.py @@ -12,9 +12,9 @@ import pytest from .core import Sapphire +from .job import SERVED_ALL, SERVED_NONE, SERVED_REQUEST, SERVED_TIMEOUT from .worker import Worker from .server_map import ServerMap -from .status_codes import SERVED_ALL, SERVED_NONE, SERVED_REQUEST, SERVED_TIMEOUT class _TestFile(object): From 5ef110f4604b01b3ead3939c799aa7df7ef540e3 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 1 Oct 2020 15:43:09 -0700 Subject: [PATCH 034/531] [sapphire] Update comments --- sapphire/core.py | 90 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 64 insertions(+), 26 deletions(-) diff --git a/sapphire/core.py b/sapphire/core.py index 2df8cdf2..efb6aaf7 100644 --- a/sapphire/core.py +++ b/sapphire/core.py @@ -41,10 +41,21 @@ def __exit__(self, *exc): self.close() @staticmethod - def _create_listening_socket(allow_remote, requested_port, retries=20): - # The intention of this function is to contain the socket creation code - # along with all the searching and retrying code. If a specific port is requested - # and it is not available a socket.error will be raised. + def _create_listening_socket(remote, port=None, retries=20): + """Create listening socket. Search for an open socket if needed and + and configure the socket. If a specific port is unavailable or no + available ports can be found a socket.error will be raised. + + Args: + remote (bool): Accept all (non-local) incoming connections. + port (int): Port to listen on. If None is given a random port will + be used. + retries (int): Number of attempts to the socket. + + Returns: + socket: A listening socket. + """ + addr = "0.0.0.0" if remote else "127.0.0.1" for retry in reversed(range(retries)): sock = None try: @@ -53,8 +64,7 @@ def _create_listening_socket(allow_remote, requested_port, retries=20): sock.settimeout(0.25) # find an unused port and avoid blocked ports # see: dxr.mozilla.org/mozilla-central/source/netwerk/base/nsIOService.cpp - port = requested_port or randint(0x2000, 0xFFFF) - sock.bind(("0.0.0.0" if allow_remote else "127.0.0.1", port)) + sock.bind((addr, port or randint(0x2000, 0xFFFF))) sock.listen(5) except (OSError, sock_error) as soc_e: if sock is not None: @@ -67,38 +77,48 @@ def _create_listening_socket(allow_remote, requested_port, retries=20): return sock def close(self): - """ - close() + """Close listening server socket. - This function closes the listening server socket if it is open. + Args: + None + + Returns: + None """ if self._socket is not None: self._socket.close() @property def port(self): - """ - port -> int + """Port number of listening socket. - returns the port number the socket is listening on - """ + Args: + None + Returns: + int: Listening port number. + """ return self._socket.getsockname()[1] def serve_path(self, path, continue_cb=None, forever=False, optional_files=None, server_map=None): - """ - serve_path() -> tuple - path is the directory that will be used as wwwroot. The callback continue_cb should - be a function that returns True or False. If continue_cb is specified and returns False - the server serve loop will exit. optional_files is list of files that do not need to be - served in order to exit the serve loop. - - returns a tuple (server status, files served) - server status is an int: - - SERVED_ALL: All files excluding files int the optional_files list were served - - SERVED_NONE: No files were served - - SERVED_REQUEST: Some files were requested - files served is a list of the files that were served + """Serve files in path. On completion a list served files and a status + code will be returned. + The status codes include: + - SERVED_ALL: All files excluding files in optional_files were served + - SERVED_NONE: No files were served + - SERVED_REQUEST: Some files were requested + + Args: + path (str): Directory to use a wwwroot. + continue_cb (str): A callback that can be used to exit the serve loop. + This should a function that returns a bool. + forever (bool): Continue to handle requests even after all files have + been served. This is meant to be used with continue_cb. + optional_files (list): Files that do not need to be served in order + to exit the serve loop. + server_map (ServerMap): + Returns: + tuple: Files served and status code """ LOG.debug("serving %r (forever=%r)", path, forever) job = Job( @@ -118,10 +138,28 @@ def serve_path(self, path, continue_cb=None, forever=False, optional_files=None, @property def timeout(self): + """The amount of time that must pass before exit the serve loop and + indicating a timeout. + + Args: + None + + Returns: + int: Timeout in seconds. + """ return self._timeout @timeout.setter def timeout(self, value): + """The amount of time that must pass before exit the serve loop and + indicating a timeout. + + Args: + value (int): Timeout in seconds. + + Returns: + None + """ if not value: self._timeout = 0 else: From 13869aa57ef291162c1bcc319be62474eaa34f2d Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 1 Oct 2020 16:49:04 -0700 Subject: [PATCH 035/531] [sapphire] Debug log nit --- sapphire/worker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sapphire/worker.py b/sapphire/worker.py index 5d892f21..46915eda 100644 --- a/sapphire/worker.py +++ b/sapphire/worker.py @@ -172,8 +172,8 @@ def handle_request(cls, conn, serv_job): serv_job.increment_served(resource.target) except (sock_error, sock_timeout): - exc_type, exc_obj, exc_tb = exc_info() - LOG.debug("%s: %r (line %d)", exc_type.__name__, exc_obj, exc_tb.tb_lineno) + _, exc_obj, exc_tb = exc_info() + LOG.debug("%r - line %d", exc_obj, exc_tb.tb_lineno) if not finish_job: serv_job.accepting.set() From b9ca752b399f938c3669388b2442fc8ccfb0f6b9 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 8 Oct 2020 14:57:53 -0700 Subject: [PATCH 036/531] [replay] Update debug output --- grizzly/replay/replay.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index b9b676c7..bd2ece2c 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -261,14 +261,14 @@ def _dyn_close(): # pragma: no cover if self.status.iteration < repeat: LOG.debug("skipping remaining attempts") # failed to reproduce issue - LOG.debug("results (%d) < expected, %s after %d attempts", + LOG.debug("results (%d) < minimum (%d), after %d attempts", self.status.results, min_results, self.status.iteration) break # check if complete (results found) if self.status.results >= min_results: assert self.status.results == min_results assert sum(x.count for x in reports.values() if x.expected) >= min_results - LOG.debug("results == expected, %s after %d attempts", + LOG.debug("results == expected (%d), after %d attempts", min_results, self.status.iteration) break From cc42b56b251074a1f039eb06c1835ed392e091d7 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 8 Oct 2020 14:59:26 -0700 Subject: [PATCH 037/531] [sapphire] Remove old pylint suppression --- sapphire/server_map.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sapphire/server_map.py b/sapphire/server_map.py index 47fb181c..1b565f8f 100644 --- a/sapphire/server_map.py +++ b/sapphire/server_map.py @@ -11,7 +11,7 @@ __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] -LOG = getLogger(__name__) # pylint: disable=invalid-name +LOG = getLogger(__name__) class InvalidURLError(Exception): From 827d83420541c196798b795a497a8e4918417c03 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 14 Oct 2020 13:18:07 -0700 Subject: [PATCH 038/531] [reduce] Add cssbeautifier to dependencies list --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 592c172c..846b8df5 100755 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ EXTRAS = { - 'reduce': ['lithium-reducer', 'jsbeautifier'], + 'reduce': ['cssbeautifier', 'lithium-reducer', 'jsbeautifier'], 's3': ['boto3'], } EXTRAS['all'] = list(set(chain.from_iterable(EXTRAS.values()))) From 2937aadb0d37af1dd5a6892e6523a57affb0a045 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Fri, 16 Oct 2020 10:16:12 -0700 Subject: [PATCH 039/531] Update comments --- grizzly/common/storage.py | 82 ++++++++++++++++++++------------------- 1 file changed, 43 insertions(+), 39 deletions(-) diff --git a/grizzly/common/storage.py b/grizzly/common/storage.py index 3fa30bbd..831f7b34 100644 --- a/grizzly/common/storage.py +++ b/grizzly/common/storage.py @@ -62,8 +62,8 @@ def _add(self, target, test_file): """Add a test file to test case and perform sanity checks. Args: - target (list): Specific list of Files to append target test_file to. - test_file (TestFile): TestFile to append + target (list): Specific list of files to append target test_file to. + test_file (TestFile): TestFile to append. Returns: None @@ -76,14 +76,14 @@ def _add(self, target, test_file): def add_batch(self, path, include_files, prefix=None): """Iterate over files in include_files and attach the files that are - located in path to testcase. + located in path to TestCase. Args: path (str): Path to the root of the directory that contains files. include_files (iterable): Paths of the files to be added to the - test case if they exist in path. + TestCase if they exist in path. prefix (str): Path prefix to prepend to file when adding to - test case. + the TestCase. Returns: None @@ -98,10 +98,10 @@ def add_batch(self, path, include_files, prefix=None): self.add_from_file(fname, file_name=test_path) def add_meta(self, meta_file): - """Add a test file to test case as a meta file. + """Add a TestFile to TestCase as a meta file. Args: - meta_file (TestFile): TestFile to add to TestCase + meta_file (TestFile): TestFile to add to TestCase. Returns: None @@ -109,11 +109,11 @@ def add_meta(self, meta_file): self._add(self._files.meta, meta_file) def add_environ_var(self, name, value): - """Add environment variable to test case. + """Add environment variable to TestCase. Args: - name (str): Environment variable name - value (str): Environment variable value + name (str): Environment variable name. + value (str): Environment variable value. Returns: None @@ -121,11 +121,11 @@ def add_environ_var(self, name, value): self.env_vars[name] = value def add_file(self, test_file, required=True): - """Add a test file to test case. + """Add a TestFile to TestCase. Args: - meta_file (TestFile): TestFile to add to TestCase - required (bool): Indicates if test file must be served + meta_file (TestFile): TestFile to add to TestCase. + required (bool): Indicates if test file must be served. Returns: None @@ -136,13 +136,14 @@ def add_file(self, test_file, required=True): self._add(self._files.optional, test_file) def add_from_data(self, data, file_name, encoding="UTF-8", required=True): - """Create a TestFile and add it to the test case. + """Create a TestFile and add it to the TestCase. Args: - data (bytes): Data to write to file - file_name (str): Name for the test file - encoding (str): Encoding to be used - required (bool): Indicates if test file must be served + data (bytes or str): Data to write to file. If data is of type str + encoding must be given. + file_name (str): Name for the TestFile. + encoding (str): Encoding to be used. + required (bool): Indicates whether the TestFile must be served. Returns: None @@ -155,12 +156,13 @@ def add_from_data(self, data, file_name, encoding="UTF-8", required=True): raise def add_from_file(self, input_file, file_name=None, required=True): - """Create a TestFile from an existing file and add it to the test case. + """Create a TestFile from an existing file and add it to the TestCase. Args: - input_file (str): Path to existing file to use - file_name (str): Name for the test file - required (bool): Indicates if test file must be served + input_file (str): Path to existing file to use. + file_name (str): Name for the TestFile. If file_name is not given + the name of the input_file will be used. + required (bool): Indicates whether the TestFile must be served. Returns: None @@ -192,7 +194,7 @@ def contains(self, file_name): file_name (str): File name to search for in TestCase. Returns: - bool: True if file exists in the TestCase otherwise False + bool: True if file exists in the TestCase otherwise False. """ return file_name in self._existing_paths @@ -215,8 +217,8 @@ def dump(self, out_path, include_details=False): """Write all the test case data to the filesystem. Args: - out_path (str): Path to directory to output data - include_details (bool): Output "test_info.json" file + out_path (str): Path to directory to output data. + include_details (bool): Output "test_info.json" file. Returns: None @@ -407,7 +409,7 @@ def purge_optional(self, keep): opt_files = tuple(x.file_name for x in self._files.optional) if not opt_files: # nothing to purge - return None + return # filter required files from opt_files files to keep keep_opt = list() for fname in set(keep): @@ -422,7 +424,6 @@ def purge_optional(self, keep): to_remove.append(idx) for idx in reversed(to_remove): self._files.optional.pop(idx).close() - return None @staticmethod def scan_path(path): @@ -465,7 +466,8 @@ def __init__(self, file_name): or ("/" in file_name and not file_name.rsplit("/", 1)[-1]) \ or file_name.startswith("../"): raise TypeError("file_name is invalid %r" % (file_name,)) - self._file_name = os.path.normpath(file_name) # name including path relative to wwwroot + # name including path relative to wwwroot + self._file_name = os.path.normpath(file_name) self._fp = SpooledTemporaryFile( dir=grz_tmp("storage"), max_size=self.CACHE_LIMIT, @@ -498,7 +500,7 @@ def close(self): None Returns: - None TestFile instance + None """ self._fp.close() @@ -519,10 +521,10 @@ def data(self): return data def dump(self, path): - """Write test file data to the filesystem. + """Write TestFile data to the filesystem. Args: - path (str): Path to output data + path (str): Path to output data. Returns: None @@ -543,12 +545,13 @@ def from_data(cls, data, file_name, encoding="UTF-8"): """Create a TestFile and add it to the test case. Args: - data (bytes): Data to write to file - file_name (str): Name for the test file - encoding (str): Encoding to be used + data (bytes or str): Data to write to file. If data is of type str + encoding must be given. + file_name (str): Name for the TestFile. + encoding (str): Encoding to be used. Returns: - TestFile: new instance + TestFile: A TestFile. """ t_file = cls(file_name) if data: @@ -563,11 +566,12 @@ def from_file(cls, input_file, file_name=None): """Create a TestFile from an existing file. Args: - input_file (str): Path to existing file to use - file_name (str): Name for the test file + input_file (str): Path to existing file to use. + file_name (str): Name for the TestFile. If file_name is not given + the name of the input_file will be used. Returns: - TestFile: new instance + TestFile: A TestFile. """ if file_name is None: file_name = os.path.basename(input_file) @@ -596,7 +600,7 @@ def write(self, data): """Add data to the TestFile. Args: - data (bytes): Data to add to the TestFile + data (bytes): Data to add to the TestFile. Returns: None From 76b2a9cc2abb0e09e3ba80bc7e068e4d553ad562 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 20 Oct 2020 10:59:44 -0700 Subject: [PATCH 040/531] Bump version to 0.9.7 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 846b8df5..65d47c67 100755 --- a/setup.py +++ b/setup.py @@ -70,4 +70,4 @@ ], package_data={"grizzly.common": ["harness.html"]}, url='https://github.com/MozillaSecurity/grizzly', - version='0.9.6') + version='0.9.7') From e7c56b8a6597f6531297f318321289f7ebd613c2 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 21 Oct 2020 11:23:47 -0700 Subject: [PATCH 041/531] [reduce] Pin lithium-reducer version --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 65d47c67..564de532 100755 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ EXTRAS = { - 'reduce': ['cssbeautifier', 'lithium-reducer', 'jsbeautifier'], + 'reduce': ['cssbeautifier', 'lithium-reducer<0.4', 'jsbeautifier'], 's3': ['boto3'], } EXTRAS['all'] = list(set(chain.from_iterable(EXTRAS.values()))) From 88ed28f0e4914473195010cd54424f83b56a23f4 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 21 Oct 2020 11:24:29 -0700 Subject: [PATCH 042/531] Bump version to 0.9.8 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 564de532..d46c4108 100755 --- a/setup.py +++ b/setup.py @@ -70,4 +70,4 @@ ], package_data={"grizzly.common": ["harness.html"]}, url='https://github.com/MozillaSecurity/grizzly', - version='0.9.7') + version='0.9.8') From 52af6b9f03ecbf3589a26c17b02d4a2111da0fa4 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Fri, 23 Oct 2020 09:39:53 -0700 Subject: [PATCH 043/531] Fix docstrings --- grizzly/common/storage.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/grizzly/common/storage.py b/grizzly/common/storage.py index 831f7b34..3015c891 100644 --- a/grizzly/common/storage.py +++ b/grizzly/common/storage.py @@ -200,7 +200,8 @@ def contains(self, file_name): @property def data_size(self): - """The total amount of data used by the test case (bytes). + """The total amount of data used (bytes) by the TestFiles in the + TestCase. Args: None @@ -244,7 +245,7 @@ def dump(self, out_path, include_details=False): meta_file.dump(out_path) def get_file(self, file_name): - """Look up and return the TestFile with the specified file name. + """Lookup and return the TestFile with the specified file name. Args: file_name (str): Name of file to retrieve. @@ -327,9 +328,9 @@ def load_single(cls, path, load_prefs, adjacent=False): Args: path (str): Path to the directory or file to load. load_prefs (bool): Load prefs.js file if available. - adjacent (str): Load adjacent files as part of the test case. - This is always the case when loading a directory. - WARNING: This should be used with caution! + adjacent (bool): Load adjacent files as part of the TestCase. + This is always true when loading a directory. + WARNING: This should be used with caution! Returns: TestCase: A TestCase. From 759ed7e39d30f4473f457dc2423e0b3c3af1f03e Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Fri, 23 Oct 2020 13:57:17 -0700 Subject: [PATCH 044/531] Update TravisCI badge --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ba90a278..00db32f5 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ Grizzly ======= -[![Build Status](https://travis-ci.org/MozillaSecurity/grizzly.svg?branch=master)](https://travis-ci.org/MozillaSecurity/grizzly) +[![Build Status](https://travis-ci.com/MozillaSecurity/grizzly.svg?branch=master)](https://travis-ci.com/MozillaSecurity/grizzly) [![codecov](https://codecov.io/gh/MozillaSecurity/grizzly/branch/master/graph/badge.svg)](https://codecov.io/gh/MozillaSecurity/grizzly) [![Matrix](https://img.shields.io/badge/dynamic/json?color=green&label=chat&query=%24.chunk[%3F(%40.canonical_alias%3D%3D%22%23fuzzing%3Amozilla.org%22)].num_joined_members&suffix=%20users&url=https%3A%2F%2Fmozilla.modular.im%2F_matrix%2Fclient%2Fr0%2FpublicRooms&style=flat&logo=matrix)](https://riot.im/app/#/room/#fuzzing:mozilla.org) [![PyPI](https://img.shields.io/pypi/v/grizzly-framework)](https://pypi.org/project/grizzly-framework) @@ -14,7 +14,7 @@ be served to and processed by the browser. Cross platform compatibility is available for Windows, Linux and MacOS. However not all features may be available. -For additional information please check out the [wiki](https://github.com/MozillaSecurity/grizzly/wiki) or the [announcement](https://blog.mozilla.org/security/2019/07/10/grizzly/) +For additional information please check out the [wiki](https://github.com/MozillaSecurity/grizzly/wiki) or the [announcement](https://blog.mozilla.org/security/2019/07/10/grizzly/). Installation ------------ From e90a30e65240282d2e1af7bc1f69e81fe4f11f6c Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 28 Oct 2020 11:15:17 -0700 Subject: [PATCH 045/531] Reduce log output during run --- grizzly/common/runner.py | 1 + grizzly/reduce/test_common.py | 1 + grizzly/replay/replay.py | 1 - grizzly/session.py | 1 - grizzly/target/puppet_target.py | 10 +++++----- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/grizzly/common/runner.py b/grizzly/common/runner.py index b7797fa3..1f9b9f6a 100644 --- a/grizzly/common/runner.py +++ b/grizzly/common/runner.py @@ -102,6 +102,7 @@ def launch(self, location, env_mod=None, max_retries=3, retry_delay=0): assert self._target is not None assert max_retries >= 0 assert retry_delay >= 0 + LOG.debug("launching target (timeout %ds)", self._target.launch_timeout) for retries in reversed(range(max_retries)): try: self._target.launch(location, env_mod=env_mod) diff --git a/grizzly/reduce/test_common.py b/grizzly/reduce/test_common.py index 7a24a239..02b386ee 100644 --- a/grizzly/reduce/test_common.py +++ b/grizzly/reduce/test_common.py @@ -32,6 +32,7 @@ def __init__(self, *args, **kwds): self.closed = True self.binary = "" self.forced_close = os.getenv("GRZ_FORCED_CLOSE", "1").lower() not in ("false", "0") + self.launch_timeout = 300 self.prefs = None self.rl_countdown = 0 self.use_valgrind = False diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index bd2ece2c..648709d8 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -158,7 +158,6 @@ def _dyn_close(): # pragma: no cover for _ in range(repeat): self.status.iteration += 1 if self.target.closed: - LOG.info("Launching target...") if self._harness is None: location = runner.location( "/grz_current_test", diff --git a/grizzly/session.py b/grizzly/session.py index 4f5addc6..da0ef748 100644 --- a/grizzly/session.py +++ b/grizzly/session.py @@ -154,7 +154,6 @@ def _dyn_close(): # pragma: no cover close_after=self.target.rl_reset, forced_close=self.target.forced_close, timeout=self.adapter.TEST_DURATION) - log.info("Launching target") runner.launch(location, max_retries=3, retry_delay=0) self.target.step() diff --git a/grizzly/target/puppet_target.py b/grizzly/target/puppet_target.py index 9e3e6fc7..3eb052ea 100644 --- a/grizzly/target/puppet_target.py +++ b/grizzly/target/puppet_target.py @@ -116,24 +116,24 @@ def detect_failure(self, ignored, was_timeout): # if something has happened figure out what if not is_healthy: if self._puppet.reason == FFPuppet.RC_CLOSED: - LOG.info("target.close() was called") + LOG.debug("target.close() was called") elif self._puppet.reason == FFPuppet.RC_EXITED: - LOG.info("Target closed itself") + LOG.debug("target closed itself") elif (self._puppet.reason == FFPuppet.RC_WORKER and "memory" in ignored and "ffp_worker_memory_usage" in self._puppet.available_logs()): status = self.RESULT_IGNORED - LOG.info("Memory limit exceeded") + LOG.debug("memory limit exceeded") elif (self._puppet.reason == FFPuppet.RC_WORKER and "log-limit" in ignored and "ffp_worker_log_size" in self._puppet.available_logs()): status = self.RESULT_IGNORED - LOG.info("Log size limit exceeded") + LOG.debug("log size limit exceeded") else: LOG.debug("failure detected, ffpuppet return code: %r", self._puppet.reason) status = self.RESULT_FAILURE elif was_timeout: - LOG.info("Timeout detected") + LOG.debug("timeout detected") status = self.RESULT_IGNORED if "timeout" in ignored else self.RESULT_FAILURE return status From 3bf198136f5b501d733da37f2487b925d95cfba4 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 28 Oct 2020 12:58:30 -0700 Subject: [PATCH 046/531] [replay] Add exit_early argument to ReplayManager.run() --- grizzly/replay/replay.py | 38 ++++++++++++++++++++--------------- grizzly/replay/test_replay.py | 22 ++++++++++++++++++++ 2 files changed, 44 insertions(+), 16 deletions(-) diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 648709d8..d1f25f0a 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -104,7 +104,7 @@ def report_to_filesystem(path, results, tests=None): for result in expected: reporter.submit(tests or [], report=result.report) - def run(self, testcases, repeat=1, min_results=1, idle_delay=0, idle_threshold=0): + def run(self, testcases, repeat=1, min_results=1, exit_early=True, idle_delay=0, idle_threshold=0): """Run testcase replay. Args: @@ -112,6 +112,11 @@ def run(self, testcases, repeat=1, min_results=1, idle_delay=0, idle_threshold=0 repeat (int): Maximum number of times to run the TestCase. min_results (int): Minimum number of results needed before run can be considered successful. + exit_early (bool): If True the minimum required number of iterations + are performed to either meet `min_results` or + determine that it is not possible to do so. + If False `repeat` number of iterations are + performed. idle_delay (int): Number of seconds to wait before polling for idle. idle_threshold (int): CPU usage threshold to mark the process as idle. @@ -255,21 +260,22 @@ def _dyn_close(): # pragma: no cover LOG.error("ERROR: Test case was not served. Timeout too short?") break - # check status and exit early if possible - if repeat - self.status.iteration + self.status.results < min_results: - if self.status.iteration < repeat: - LOG.debug("skipping remaining attempts") - # failed to reproduce issue - LOG.debug("results (%d) < minimum (%d), after %d attempts", - self.status.results, min_results, self.status.iteration) - break - # check if complete (results found) - if self.status.results >= min_results: - assert self.status.results == min_results - assert sum(x.count for x in reports.values() if x.expected) >= min_results - LOG.debug("results == expected (%d), after %d attempts", - min_results, self.status.iteration) - break + if exit_early: + # failed to meet minimum number of results + if repeat - self.status.iteration + self.status.results < min_results: + if self.status.iteration < repeat: + LOG.debug("skipping remaining attempts") + # failed to reproduce issue + LOG.debug("results (%d) < minimum (%d), after %d attempts", + self.status.results, min_results, self.status.iteration) + break + # check if complete (minimum number of results found) + if self.status.results >= min_results: + assert self.status.results == min_results + assert sum(x.count for x in reports.values() if x.expected) >= min_results + LOG.debug("results == expected (%d), after %d attempts", + min_results, self.status.iteration) + break # warn about large browser logs #self.status.log_size = self.target.log_size() diff --git a/grizzly/replay/test_replay.py b/grizzly/replay/test_replay.py index 6707ecda..558ca40f 100644 --- a/grizzly/replay/test_replay.py +++ b/grizzly/replay/test_replay.py @@ -163,6 +163,28 @@ def test_replay_07(mocker, tmp_path): assert replay.status.results == 2 assert replay.status.ignored == 1 assert len(results) == 1 + assert sum(x.count for x in results) == 2 + target.reset_mock() + # ignore early failure (perform all repeats) + target.detect_failure.return_value = Target.RESULT_NONE + target.detect_failure.side_effect = None + with ReplayManager([], server, target, use_harness=False) as replay: + assert not replay.run(testcases, repeat=4, min_results=4, exit_early=False) + assert target.close.call_count == 1 + assert replay.status.iteration == 4 + assert replay.status.results == 0 + assert replay.status.ignored == 0 + target.reset_mock() + # ignore early success (perform all repeats) + target.detect_failure.return_value = Target.RESULT_FAILURE + with ReplayManager([], server, target, use_harness=False) as replay: + results = replay.run(testcases, repeat=4, min_results=1, exit_early=False) + assert target.close.call_count == 1 + assert replay.status.iteration == 4 + assert replay.status.results == 4 + assert replay.status.ignored == 0 + assert len(results) == 1 + assert sum(x.count for x in results) == 4 def test_replay_08(mocker, tmp_path): """test ReplayManager.run() - test signatures - fail to meet minimum""" From f0c8542fd1156a9cfe06581273c82c1c792134b9 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 28 Oct 2020 12:59:11 -0700 Subject: [PATCH 047/531] Update puppet_target imports --- grizzly/target/puppet_target.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/grizzly/target/puppet_target.py b/grizzly/target/puppet_target.py index 3eb052ea..34795e37 100644 --- a/grizzly/target/puppet_target.py +++ b/grizzly/target/puppet_target.py @@ -6,7 +6,11 @@ from os import close, kill, unlink from os.path import abspath, isfile from platform import system -import signal +from signal import SIGABRT +try: + from signal import SIGUSR1 +except ImportError: + SIGUSR1 = None from time import sleep, time from tempfile import mkdtemp, mkstemp @@ -32,8 +36,7 @@ class PuppetTarget(Target): __slots__ = ("use_rr", "use_valgrind", "_puppet", "_remove_prefs") def __init__(self, binary, extension, launch_timeout, log_limit, memory_limit, relaunch, **kwds): - super(PuppetTarget, self).__init__(binary, extension, launch_timeout, - log_limit, memory_limit, relaunch) + super().__init__(binary, extension, launch_timeout, log_limit, memory_limit, relaunch) self.use_rr = kwds.pop("rr", False) self.use_valgrind = kwds.pop("valgrind", False) self._remove_prefs = False @@ -51,7 +54,7 @@ def _abort_hung_proc(self): proc_usage = self._puppet.cpu_usage() for pid, cpu in sorted(proc_usage, reverse=True, key=lambda x: x[1]): LOG.debug("sending SIGABRT to pid: %r, cpu: %0.2f%%", pid, cpu) - kill(pid, signal.SIGABRT) + kill(pid, SIGABRT) break def add_abort_token(self, token): @@ -130,7 +133,7 @@ def detect_failure(self, ignored, was_timeout): status = self.RESULT_IGNORED LOG.debug("log size limit exceeded") else: - LOG.debug("failure detected, ffpuppet return code: %r", self._puppet.reason) + LOG.debug("failure detected, ffpuppet reason %r", self._puppet.reason) status = self.RESULT_FAILURE elif was_timeout: LOG.debug("timeout detected") @@ -138,6 +141,7 @@ def detect_failure(self, ignored, was_timeout): return status def dump_coverage(self, timeout=15): + assert SIGUSR1 is not None pid = self._puppet.get_pid() if pid is None or not self._puppet.is_healthy(): LOG.debug("Skipping coverage dump (target is not in a good state)") @@ -147,11 +151,11 @@ def dump_coverage(self, timeout=15): try: for child in Process(pid).children(recursive=True): LOG.debug("Sending SIGUSR1 to %d (child)", child.pid) - kill(child.pid, signal.SIGUSR1) + kill(child.pid, SIGUSR1) except (AccessDenied, NoSuchProcess): # pragma: no cover pass LOG.debug("Sending SIGUSR1 to %d (parent)", pid) - kill(pid, signal.SIGUSR1) + kill(pid, SIGUSR1) start_time = time() gcda_found = False delay = 0.1 @@ -179,7 +183,7 @@ def dump_coverage(self, timeout=15): if elapsed >= timeout: # timeout failure LOG.warning("gcda file open by pid %d after %0.2fs", gcda_open, elapsed) - kill(gcda_open, signal.SIGABRT) + kill(gcda_open, SIGABRT) sleep(1) self.close() break From 362409383d3756786c2afa53b20f6b20e253b76d Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 28 Oct 2020 14:35:35 -0700 Subject: [PATCH 048/531] [tests] Simplify test_status_08 --- grizzly/common/test_status.py | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/grizzly/common/test_status.py b/grizzly/common/test_status.py index 07a2f935..b2232312 100644 --- a/grizzly/common/test_status.py +++ b/grizzly/common/test_status.py @@ -130,7 +130,7 @@ def test_status_07(tmp_path): status.timestamp += 1 assert status.rate == 0.5 -def _client_writer(done, working_path): +def _client_writer(done, reported, working_path): """Used by test_status_08""" # NOTE: this must be at the top level to work on Windows Status.PATH = working_path @@ -139,6 +139,9 @@ def _client_writer(done, working_path): while not done.is_set(): status.iteration += 1 status.report(force=True) + # perform two reports before setting flag + if not reported.is_set() and status.iteration > 1: + reported.set() sleep(0.01) finally: status.cleanup() @@ -146,27 +149,28 @@ def _client_writer(done, working_path): def test_status_08(tmp_path): """test Status.loadall() with multiple active reporters""" Status.PATH = str(tmp_path) - best_rate = 0 done = Event() procs = list() + report_events = list() try: + # launch processes for _ in range(5): - procs.append(Process(target=_client_writer, args=(done, Status.PATH))) + report_events.append(Event()) + procs.append(Process(target=_client_writer, args=(done, report_events[-1], Status.PATH))) procs[-1].start() - deadline = time() + 60 - while len(tuple(Status.loadall())) < len(procs): - sleep(0.1) - assert time() < deadline, "timeout waiting for processes to launch!" - for _ in range(20): - for obj in Status.loadall(): - if obj.rate > best_rate: - best_rate = obj.rate + # wait for processes to launch and report + for has_reported in report_events: + assert has_reported.wait(60) + # collect reports + reports = tuple(Status.loadall()) + assert len(reports) == len(procs) + assert max(x.rate for x in reports) > 0 finally: done.set() for proc in procs: if proc.pid is not None: proc.join() - assert best_rate > 0 + # verify cleanup assert not any(Status.loadall()) def test_reducer_stats_01(tmp_path): From cf6ab8fab08606db8ec1fd6804feb349d20f1e17 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 28 Oct 2020 14:55:21 -0700 Subject: [PATCH 049/531] [tests] Fix test_reducer_stats_04 --- grizzly/common/test_status.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/grizzly/common/test_status.py b/grizzly/common/test_status.py index b2232312..5a259726 100644 --- a/grizzly/common/test_status.py +++ b/grizzly/common/test_status.py @@ -160,7 +160,7 @@ def test_status_08(tmp_path): procs[-1].start() # wait for processes to launch and report for has_reported in report_events: - assert has_reported.wait(60) + assert has_reported.wait(timeout=60) # collect reports reports = tuple(Status.loadall()) assert len(reports) == len(procs) @@ -212,32 +212,37 @@ def test_reducer_stats_03(tmp_path): with ReducerStats() as stats: assert stats.passed == 0 -def _reducer_client(working_path, limit, unrestrict): +def _reducer_client(working_path, reported, unrestrict): """Used by test_reducer_stats_04""" # NOTE: this must be at the top level to work on Windows ReducerStats.PATH = working_path - for _ in range(50): + for _ in range(20): with ReducerStats() as stats: stats.passed += 1 - if stats.passed == limit: - unrestrict.set() - unrestrict.wait(timeout=60) + if not reported.is_set(): + reported.set() + unrestrict.wait(timeout=60) def test_reducer_stats_04(tmp_path): """test ReducerStats() with multiple processes""" ReducerStats.PATH = str(tmp_path) + report_events = list() procs = list() unrestrict = Event() # used to sync client procs try: - proc_count = 5 - for _ in range(proc_count): + # launch processes + for _ in range(5): + report_events.append(Event()) procs.append(Process( - target=_reducer_client, args=(ReducerStats.PATH, proc_count, unrestrict))) + target=_reducer_client, args=(ReducerStats.PATH, report_events[-1], unrestrict))) procs[-1].start() + # wait for processes to report + for has_reported in report_events: + assert has_reported.wait(timeout=60) finally: unrestrict.set() for proc in procs: if proc.pid is not None: proc.join() with ReducerStats() as stats: - assert stats.passed == 250 + assert stats.passed == 100 From ef2d268ce95dbc28bb989a6e3912860ce6fa884f Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 29 Oct 2020 10:12:08 -0700 Subject: [PATCH 050/531] [tests] Run pytest with '--log-level DEBUG' --- grizzly/common/test_runner.py | 2 +- grizzly/replay/test_main.py | 6 +++--- grizzly/replay/test_replay.py | 32 ++++++++++++++++---------------- grizzly/test_session.py | 8 ++++---- tox.ini | 1 + 5 files changed, 25 insertions(+), 24 deletions(-) diff --git a/grizzly/common/test_runner.py b/grizzly/common/test_runner.py index d36f470a..435991ff 100644 --- a/grizzly/common/test_runner.py +++ b/grizzly/common/test_runner.py @@ -175,7 +175,7 @@ def test_runner_07(): def test_runner_08(mocker): """test Runner.launch()""" server = mocker.Mock(spec=Sapphire, port=0x1337) - target = mocker.Mock(spec=Target) + target = mocker.Mock(spec=Target, launch_timeout=30) runner = Runner(server, target) runner.launch("http://a/") diff --git a/grizzly/replay/test_main.py b/grizzly/replay/test_main.py index 965df971..98ae1638 100644 --- a/grizzly/replay/test_main.py +++ b/grizzly/replay/test_main.py @@ -77,7 +77,7 @@ def test_main_01(mocker, tmp_path): serve_path.return_value = (None, ["test.html"]) # passed to mocked Target.detect_failure # setup Target load_target = mocker.patch("grizzly.replay.replay.load_target") - target = mocker.Mock(spec=Target, binary="bin", forced_close=True) + target = mocker.Mock(spec=Target, binary="bin", forced_close=True, launch_timeout=30) target.RESULT_FAILURE = Target.RESULT_FAILURE target.RESULT_IGNORED = Target.RESULT_IGNORED target.RESULT_NONE = Target.RESULT_NONE @@ -196,7 +196,7 @@ def test_main_03(mocker, tmp_path): def test_main_04(mocker): """test ReplayManager.main() loading GRZ_FORCED_CLOSE from test case""" mocker.patch("grizzly.replay.replay.Sapphire.serve_path", return_value=(None, ["x.html"])) - target = mocker.Mock(spec=Target, forced_close=True) + target = mocker.Mock(spec=Target, forced_close=True, launch_timeout=30) load_target = mocker.patch("grizzly.replay.replay.load_target", autospec=True) load_target.return_value.return_value = target testcase = mocker.Mock( @@ -228,7 +228,7 @@ def test_main_05(mocker, tmp_path): serve_path = mocker.patch("grizzly.replay.replay.Sapphire.serve_path", autospec=True) serve_path.return_value = (None, ["test.html"]) # passed to mocked Target.detect_failure # setup Target - target = mocker.Mock(spec=Target, binary="bin", forced_close=True) + target = mocker.Mock(spec=Target, binary="bin", forced_close=True, launch_timeout=30) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE target.save_logs = _fake_save_logs diff --git a/grizzly/replay/test_replay.py b/grizzly/replay/test_replay.py index 558ca40f..398c6486 100644 --- a/grizzly/replay/test_replay.py +++ b/grizzly/replay/test_replay.py @@ -40,7 +40,7 @@ def test_replay_02(mocker, tmp_path): mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target, closed=True, forced_close=True, rl_reset=1) + target = mocker.Mock(spec=Target, closed=True, forced_close=True, launch_timeout=30, rl_reset=1) target.RESULT_NONE = Target.RESULT_NONE target.detect_failure.return_value = Target.RESULT_NONE with TestCase("index.html", "redirect.html", "test-adapter") as testcase: @@ -58,7 +58,7 @@ def test_replay_03(mocker): """test ReplayManager.run() - no repro - with repeats""" server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target, closed=True, rl_reset=100) + target = mocker.Mock(spec=Target, closed=True, launch_timeout=30, rl_reset=100) target.RESULT_NONE = Target.RESULT_NONE target.detect_failure.return_value = Target.RESULT_NONE with TestCase("index.html", "redirect.html", "test-adapter") as testcase: @@ -77,7 +77,7 @@ def test_replay_04(mocker, tmp_path): served = ["index.html"] server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, served) - target = mocker.Mock(spec=Target, binary="C:\\fake_bin") + target = mocker.Mock(spec=Target, binary="C:\\fake_bin", launch_timeout=30) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE target.save_logs = _fake_save_logs @@ -104,7 +104,7 @@ def test_replay_05(mocker): """test ReplayManager.run() - Error (landing page not requested/served)""" server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_REQUEST, ["x"]) - target = mocker.Mock(spec=Target) + target = mocker.Mock(spec=Target, launch_timeout=30) target.RESULT_NONE = Target.RESULT_NONE target.detect_failure.return_value = Target.RESULT_NONE testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] @@ -122,7 +122,7 @@ def test_replay_06(mocker, tmp_path): mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target) + target = mocker.Mock(spec=Target, launch_timeout=30) target.RESULT_IGNORED = Target.RESULT_IGNORED target.detect_failure.return_value = Target.RESULT_IGNORED testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] @@ -139,7 +139,7 @@ def test_replay_07(mocker, tmp_path): mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target, binary="path/fake_bin") + target = mocker.Mock(spec=Target, binary="path/fake_bin", launch_timeout=30) target.RESULT_FAILURE = Target.RESULT_FAILURE target.RESULT_IGNORED = Target.RESULT_IGNORED target.RESULT_NONE = Target.RESULT_NONE @@ -204,7 +204,7 @@ def test_replay_08(mocker, tmp_path): server.serve_path.return_value = (SERVED_ALL, ["index.html"]) signature = mocker.Mock() signature.matches.side_effect = (True, False, False) - target = mocker.Mock(spec=Target, binary="fake_bin") + target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] @@ -239,7 +239,7 @@ def test_replay_09(mocker, tmp_path): server.serve_path.return_value = (SERVED_ALL, ["a.html"]) signature = mocker.Mock() signature.matches.side_effect = (True, True) - target = mocker.Mock(spec=Target, binary="fake_bin") + target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="a.html", optional=[])] @@ -271,7 +271,7 @@ def test_replay_10(mocker, tmp_path): fake_report.side_effect = (report_0, report_1, report_2) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target, binary="fake_bin") + target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] @@ -305,7 +305,7 @@ def test_replay_11(mocker, tmp_path): fake_report.side_effect = (report_0, report_1, report_2, report_3) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target, binary="fake_bin") + target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] @@ -337,7 +337,7 @@ def test_replay_12(mocker, tmp_path): fake_report.side_effect = (report_1, report_2, report_3) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target, binary="fake_bin") + target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] @@ -365,7 +365,7 @@ def test_replay_13(mocker, tmp_path): fake_report.side_effect = (report_0,) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.side_effect = ((SERVED_ALL, ["index.html"]), KeyboardInterrupt) - target = mocker.Mock(spec=Target, binary="fake_bin") + target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] @@ -384,7 +384,7 @@ def test_replay_14(mocker): """test ReplayManager.run() - multiple TestCases - no repro""" server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target, closed=True, forced_close=True, rl_reset=1) + target = mocker.Mock(spec=Target, closed=True, forced_close=True, launch_timeout=30, rl_reset=1) target.RESULT_NONE = Target.RESULT_NONE target.detect_failure.return_value = Target.RESULT_NONE testcases = [ @@ -403,7 +403,7 @@ def test_replay_15(mocker): """test ReplayManager.run() - multiple TestCases - no repro - with repeats""" server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target, closed=True, forced_close=True, rl_reset=100) + target = mocker.Mock(spec=Target, closed=True, forced_close=True, launch_timeout=30, rl_reset=100) target.RESULT_NONE = Target.RESULT_NONE target.detect_failure.return_value = Target.RESULT_NONE testcases = [ @@ -427,7 +427,7 @@ def test_replay_16(mocker, tmp_path): (SERVED_ALL, ["a.html"]), (SERVED_ALL, ["b.html"]), (SERVED_ALL, ["c.html"])) - target = mocker.Mock(spec=Target, binary="fake_bin", rl_reset=1) + target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30, rl_reset=1) target.RESULT_FAILURE = Target.RESULT_FAILURE target.RESULT_NONE = Target.RESULT_NONE target.detect_failure.side_effect = ( @@ -458,7 +458,7 @@ def test_replay_17(mocker, tmp_path): mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target, closed=True, forced_close=True, rl_reset=1) + target = mocker.Mock(spec=Target, closed=True, forced_close=True, launch_timeout=30, rl_reset=1) target.RESULT_NONE = Target.RESULT_NONE target.detect_failure.return_value = Target.RESULT_NONE with TestCase("index.html", "redirect.html", "test-adapter") as testcase: diff --git a/grizzly/test_session.py b/grizzly/test_session.py index 531cb6e9..ffc1e780 100644 --- a/grizzly/test_session.py +++ b/grizzly/test_session.py @@ -49,7 +49,7 @@ def generate(self, testcase, server_map): fake_serv = mocker.Mock(spec=Sapphire, port=0x1337) prefs = tmp_path / "prefs.js" prefs.touch() - fake_target = mocker.Mock(spec=Target, prefs=str(prefs)) + fake_target = mocker.Mock(spec=Target, launch_timeout=30, prefs=str(prefs)) # set target.log_size to test warning code path fake_target.log_size.return_value = Session.TARGET_LOG_SIZE_WARN + 1 with IOManager() as iomgr: @@ -74,7 +74,7 @@ def generate(self, testcase, server_map): fake_serv = mocker.Mock(spec=Sapphire, port=0x1337) prefs = tmp_path / "prefs.js" prefs.touch() - fake_target = mocker.Mock(spec=Target, prefs=str(prefs), rl_reset=10) + fake_target = mocker.Mock(spec=Target, launch_timeout=30, prefs=str(prefs), rl_reset=10) fake_target.log_size.return_value = 1000 fake_target.monitor.launches = 1 with IOManager() as iomgr: @@ -122,7 +122,7 @@ def generate(self, testcase, server_map): adapter = FuzzAdapter() adapter.setup(None, None) fake_serv = mocker.Mock(spec=Sapphire, port=0x1337) - fake_target = mocker.Mock(spec=Target, prefs=None) + fake_target = mocker.Mock(spec=Target, launch_timeout=30, prefs=None) fake_target.monitor.launches = 1 with IOManager() as iomgr: fake_serv.serve_path.return_value = (SERVED_NONE, []) @@ -149,7 +149,7 @@ def test_session_05(tmp_path, mocker): fake_serv = mocker.Mock(spec=Sapphire, port=0x1337) # return SERVED_TIMEOUT to test IGNORE_UNSERVED code path fake_serv.serve_path.return_value = (SERVED_TIMEOUT, [fake_testcase.landing_page]) - fake_target = mocker.Mock(spec=Target, prefs=None) + fake_target = mocker.Mock(spec=Target, launch_timeout=30, prefs=None) fake_target.monitor.launches = 1 with Session(fake_adapter, fake_iomgr, None, fake_serv, fake_target) as session: session.run([], iteration_limit=1) diff --git a/tox.ini b/tox.ini index 44cd55e8..b9ed51be 100644 --- a/tox.ini +++ b/tox.ini @@ -27,6 +27,7 @@ addopts = --cache-clear --cov . --cov-report term-missing + --log-level DEBUG filterwarnings = ignore:cannot collect test class 'Test.*' because it has a __init__ constructor:pytest.PytestCollectionWarning ignore:Using or importing the ABCs:DeprecationWarning:botocore From ca0e620594894f0fc8506291c5b32f2e4fc2f774 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 29 Oct 2020 12:10:43 -0700 Subject: [PATCH 051/531] [ci] Update tox.ini --- tox.ini | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tox.ini b/tox.ini index b9ed51be..33e367ce 100644 --- a/tox.ini +++ b/tox.ini @@ -27,10 +27,10 @@ addopts = --cache-clear --cov . --cov-report term-missing - --log-level DEBUG filterwarnings = ignore:cannot collect test class 'Test.*' because it has a __init__ constructor:pytest.PytestCollectionWarning ignore:Using or importing the ABCs:DeprecationWarning:botocore +log_level = DEBUG [testenv] commands = pytest -v --cache-clear --cov="{toxinidir}" --cov-report term-missing --basetemp="{envtmpdir}" {posargs} @@ -40,7 +40,7 @@ extras = usedevelop = true [tox] -envlist = py27,py35,py36,py37,py38 -minversion = 3.2 +envlist = py35,py36,py37,py38 +minversion = 3.5 skip_missing_interpreters = true tox_pip_extensions_ext_venv_update = true From d66770545381c178ccd2e63c7d38ee485da4b338 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 4 Nov 2020 09:32:20 -0800 Subject: [PATCH 052/531] [replay] Add '--test-index' argument --- grizzly/replay/args.py | 5 +++++ grizzly/replay/replay.py | 14 +++++++++++++- grizzly/replay/test_main.py | 29 ++++++++++++++++++++++------- 3 files changed, 40 insertions(+), 8 deletions(-) diff --git a/grizzly/replay/args.py b/grizzly/replay/args.py index d7e764a8..d11b201a 100644 --- a/grizzly/replay/args.py +++ b/grizzly/replay/args.py @@ -48,6 +48,11 @@ def __init__(self): replay_args.add_argument( "--sig", help="Signature (JSON) file to match.") + replay_args.add_argument( + "--test-index", type=int, + help="Select a testcase to run when multiple testcases are loaded. " \ + "Testscases are ordered oldest to newest. Indexing is 0 based. " \ + "0 == Oldest, -1 == Newest (default: run all testcases)") self.launcher_grp.add_argument( "--rr", action="store_true", diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index d1f25f0a..374ad969 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -347,6 +347,16 @@ def main(cls, args): testcases = TestCase.load(args.input, args.prefs is None) if not testcases: raise TestCaseLoadFailure("Failed to load TestCases") + if args.test_index is not None: + LOG.debug("using TestCase with index %d", args.test_index) + try: + selected = testcases.pop(args.test_index) + except IndexError: + raise TestCaseLoadFailure("Invalid '--test-index'") from None + finally: + for test in testcases: + test.cleanup() + testcases = [selected] except TestCaseLoadFailure as exc: LOG.error("Error: %s", str(exc)) return 1 @@ -356,7 +366,9 @@ def main(cls, args): tmp_prefs = None try: if args.no_harness and len(testcases) > 1: - LOG.error("'--no-harness' cannot be used with multiple testcases") + LOG.error( + "'--no-harness' cannot be used with multiple testcases. " \ + "Perhaps '--test-index' can help.") return 1 repeat = max(args.min_crashes, args.repeat) relaunch = min(args.relaunch, repeat) diff --git a/grizzly/replay/test_main.py b/grizzly/replay/test_main.py index 98ae1638..b23e0ebd 100644 --- a/grizzly/replay/test_main.py +++ b/grizzly/replay/test_main.py @@ -102,6 +102,7 @@ def test_main_01(mocker, tmp_path): repeat=4, rr=False, sig=str(tmp_path / "sig.json"), + test_index=None, timeout=10, valgrind=False) assert ReplayManager.main(args) == 0 @@ -135,15 +136,16 @@ def test_main_02(mocker): prefs=None, relaunch=1, repeat=1, - sig=None) + sig=None, + test_index=None) # user abort fake_load_target.side_effect = KeyboardInterrupt # coverage args.rr = True args.valgrind = False assert ReplayManager.main(args) == 1 - # invalid test case fake_load_target.reset_mock() + # invalid test case fake_tc.load.side_effect = TestCaseLoadFailure # coverage args.rr = False @@ -156,8 +158,14 @@ def test_main_02(mocker): fake_tc.load.return_value = list() assert ReplayManager.main(args) == 1 assert fake_load_target.call_count == 0 + fake_load_target.reset_mock() # multiple test cases with --no-harness + fake_tc.load.return_value = [mocker.Mock(), mocker.Mock()] + assert ReplayManager.main(args) == 1 + assert fake_load_target.call_count == 0 fake_load_target.reset_mock() + # multiple test cases with invalid --test-index + args.test_index = 100 fake_tc.load.return_value = [mocker.Mock(), mocker.Mock()] assert ReplayManager.main(args) == 1 assert fake_load_target.call_count == 0 @@ -180,7 +188,8 @@ def test_main_03(mocker, tmp_path): prefs=None, relaunch=1, repeat=1, - sig=None) + sig=None, + test_index=None) # target launch error fake_logs = (tmp_path / "fake_report") fake_logs.mkdir() @@ -194,17 +203,18 @@ def test_main_03(mocker, tmp_path): assert ReplayManager.main(args) == 1 def test_main_04(mocker): - """test ReplayManager.main() loading GRZ_FORCED_CLOSE from test case""" + """test ReplayManager.main() loading GRZ_FORCED_CLOSE from selected test case""" mocker.patch("grizzly.replay.replay.Sapphire.serve_path", return_value=(None, ["x.html"])) target = mocker.Mock(spec=Target, forced_close=True, launch_timeout=30) load_target = mocker.patch("grizzly.replay.replay.load_target", autospec=True) load_target.return_value.return_value = target - testcase = mocker.Mock( + test0 = mocker.Mock( spec=TestCase, env_vars={"GRZ_FORCED_CLOSE": "0"}, landing_page="x.html", optional=[]) - mocker.patch("grizzly.replay.replay.TestCase.load", return_value=[testcase]) + test1 = mocker.Mock(spec=TestCase) + mocker.patch("grizzly.replay.replay.TestCase.load", return_value=[test0, test1]) # setup args args = mocker.Mock( fuzzmanager=False, @@ -217,9 +227,13 @@ def test_main_04(mocker): relaunch=1, repeat=1, sig=None, + test_index=0, timeout=1) ReplayManager.main(args) - assert testcase.cleanup.call_count == 1 + assert test0.cleanup.call_count == 1 + assert test0.dump.call_count == 1 + assert test1.cleanup.call_count == 1 + assert test1.dump.call_count == 0 assert target.cleanup.call_count == 1 assert not target.forced_close @@ -244,6 +258,7 @@ def test_main_05(mocker, tmp_path): relaunch=1, repeat=1, sig=None, + test_index=None, timeout=1) log_path = (tmp_path / "logs") args.logs = str(log_path) From da581a9581454912184f49ebc1df4d5cf3905cf0 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 4 Nov 2020 18:28:11 -0800 Subject: [PATCH 053/531] [replay] Update log output and sanity checks --- grizzly/replay/replay.py | 11 +++++++++-- grizzly/replay/test_main.py | 13 +++++++++---- grizzly/replay/test_replay.py | 24 ++++++++++++------------ 3 files changed, 30 insertions(+), 18 deletions(-) diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 374ad969..79fb9880 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -55,6 +55,9 @@ def __init__(self, ignore, server, target, any_crash=False, signature=None, use_ if use_harness: with open(self.HARNESS_FILE, "rb") as in_fp: self._harness = in_fp.read() + else: + # target must relaunch every iteration when not using harness + assert target.rl_reset == 1 def __enter__(self): return self @@ -129,6 +132,7 @@ def run(self, testcases, repeat=1, min_results=1, exit_early=True, idle_delay=0, assert repeat > 0 assert repeat >= min_results assert testcases + assert len(testcases) == 1 or self._harness is not None if self.status is not None: LOG.debug("clearing previous status data") @@ -185,7 +189,11 @@ def _dyn_close(): # pragma: no cover durations = list() served = list() for test_idx in range(test_count): - LOG.debug("running test: %d of %d", test_idx + 1, test_count) + if test_count > 1: + LOG.info("Running test, part %d/%d (%d/%d)...", + test_idx + 1, test_count, self.status.iteration, repeat) + else: + LOG.info("Running test (%d/%d)...", self.status.iteration, repeat) # update redirects if self._harness is not None: next_idx = (test_idx + 1) % test_count @@ -372,7 +380,6 @@ def main(cls, args): return 1 repeat = max(args.min_crashes, args.repeat) relaunch = min(args.relaunch, repeat) - assert not args.no_harness or (args.no_harness and relaunch == 1) LOG.info("Repeat: %d, Minimum crashes: %d, Relaunch %d", repeat, args.min_crashes, relaunch) LOG.debug("initializing the Target") diff --git a/grizzly/replay/test_main.py b/grizzly/replay/test_main.py index b23e0ebd..960e0a3a 100644 --- a/grizzly/replay/test_main.py +++ b/grizzly/replay/test_main.py @@ -77,7 +77,7 @@ def test_main_01(mocker, tmp_path): serve_path.return_value = (None, ["test.html"]) # passed to mocked Target.detect_failure # setup Target load_target = mocker.patch("grizzly.replay.replay.load_target") - target = mocker.Mock(spec=Target, binary="bin", forced_close=True, launch_timeout=30) + target = mocker.Mock(spec=Target, binary="bin", forced_close=True, launch_timeout=30, rl_reset=1) target.RESULT_FAILURE = Target.RESULT_FAILURE target.RESULT_IGNORED = Target.RESULT_IGNORED target.RESULT_NONE = Target.RESULT_NONE @@ -97,6 +97,7 @@ def test_main_01(mocker, tmp_path): input=str(tmp_path / "test.html"), logs=str(log_path), min_crashes=2, + no_harness=True, prefs=str(tmp_path / "prefs.js"), relaunch=1, repeat=4, @@ -173,9 +174,11 @@ def test_main_02(mocker): def test_main_03(mocker, tmp_path): """test ReplayManager.main() target exceptions""" mocker.patch("grizzly.replay.replay.FuzzManagerReporter", autospec=True) - mocker.patch("grizzly.replay.replay.load_target", autospec=True) mocker.patch("grizzly.replay.replay.Sapphire", autospec=True) mocker.patch("grizzly.replay.replay.TestCase", autospec=True) + target = mocker.Mock(spec=Target, forced_close=True, launch_timeout=30, rl_reset=1) + load_target = mocker.patch("grizzly.replay.replay.load_target", autospec=True) + load_target.return_value.return_value = target fake_tmp = (tmp_path / "grz_tmp") fake_tmp.mkdir() mocker.patch("grizzly.replay.replay.grz_tmp", autospec=True, return_value=str(fake_tmp)) @@ -205,7 +208,7 @@ def test_main_03(mocker, tmp_path): def test_main_04(mocker): """test ReplayManager.main() loading GRZ_FORCED_CLOSE from selected test case""" mocker.patch("grizzly.replay.replay.Sapphire.serve_path", return_value=(None, ["x.html"])) - target = mocker.Mock(spec=Target, forced_close=True, launch_timeout=30) + target = mocker.Mock(spec=Target, forced_close=True, launch_timeout=30, rl_reset=1) load_target = mocker.patch("grizzly.replay.replay.load_target", autospec=True) load_target.return_value.return_value = target test0 = mocker.Mock( @@ -223,6 +226,7 @@ def test_main_04(mocker): ignore=None, input="test", min_crashes=1, + no_harness=True, prefs=None, relaunch=1, repeat=1, @@ -242,7 +246,7 @@ def test_main_05(mocker, tmp_path): serve_path = mocker.patch("grizzly.replay.replay.Sapphire.serve_path", autospec=True) serve_path.return_value = (None, ["test.html"]) # passed to mocked Target.detect_failure # setup Target - target = mocker.Mock(spec=Target, binary="bin", forced_close=True, launch_timeout=30) + target = mocker.Mock(spec=Target, binary="bin", forced_close=True, launch_timeout=30, rl_reset=1) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE target.save_logs = _fake_save_logs @@ -255,6 +259,7 @@ def test_main_05(mocker, tmp_path): idle_threshold=0, ignore=None, min_crashes=1, + no_harness=True, relaunch=1, repeat=1, sig=None, diff --git a/grizzly/replay/test_replay.py b/grizzly/replay/test_replay.py index 398c6486..9aca0404 100644 --- a/grizzly/replay/test_replay.py +++ b/grizzly/replay/test_replay.py @@ -77,12 +77,12 @@ def test_replay_04(mocker, tmp_path): served = ["index.html"] server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, served) - target = mocker.Mock(spec=Target, binary="C:\\fake_bin", launch_timeout=30) + target = mocker.Mock(spec=Target, binary="C:\\fake_bin", launch_timeout=30, rl_reset=10) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE target.save_logs = _fake_save_logs with TestCase("index.html", "redirect.html", "test-adapter") as testcase: - with ReplayManager([], server, target, use_harness=False) as replay: + with ReplayManager([], server, target) as replay: results = replay.run([testcase]) assert replay._signature is not None assert replay.status.ignored == 0 @@ -104,7 +104,7 @@ def test_replay_05(mocker): """test ReplayManager.run() - Error (landing page not requested/served)""" server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_REQUEST, ["x"]) - target = mocker.Mock(spec=Target, launch_timeout=30) + target = mocker.Mock(spec=Target, launch_timeout=30, rl_reset=1) target.RESULT_NONE = Target.RESULT_NONE target.detect_failure.return_value = Target.RESULT_NONE testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] @@ -122,7 +122,7 @@ def test_replay_06(mocker, tmp_path): mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target, launch_timeout=30) + target = mocker.Mock(spec=Target, launch_timeout=30, rl_reset=1) target.RESULT_IGNORED = Target.RESULT_IGNORED target.detect_failure.return_value = Target.RESULT_IGNORED testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] @@ -139,7 +139,7 @@ def test_replay_07(mocker, tmp_path): mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target, binary="path/fake_bin", launch_timeout=30) + target = mocker.Mock(spec=Target, binary="path/fake_bin", launch_timeout=30, rl_reset=1) target.RESULT_FAILURE = Target.RESULT_FAILURE target.RESULT_IGNORED = Target.RESULT_IGNORED target.RESULT_NONE = Target.RESULT_NONE @@ -204,7 +204,7 @@ def test_replay_08(mocker, tmp_path): server.serve_path.return_value = (SERVED_ALL, ["index.html"]) signature = mocker.Mock() signature.matches.side_effect = (True, False, False) - target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30) + target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30, rl_reset=1) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] @@ -239,7 +239,7 @@ def test_replay_09(mocker, tmp_path): server.serve_path.return_value = (SERVED_ALL, ["a.html"]) signature = mocker.Mock() signature.matches.side_effect = (True, True) - target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30) + target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30, rl_reset=1) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="a.html", optional=[])] @@ -271,7 +271,7 @@ def test_replay_10(mocker, tmp_path): fake_report.side_effect = (report_0, report_1, report_2) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30) + target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30, rl_reset=1) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] @@ -305,11 +305,11 @@ def test_replay_11(mocker, tmp_path): fake_report.side_effect = (report_0, report_1, report_2, report_3) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30) + target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30, rl_reset=1) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] - with ReplayManager([], server, target, any_crash=True, use_harness=False) as replay: + with ReplayManager([], server, target, any_crash=True) as replay: assert not replay.run(testcases, repeat=4, min_results=3) assert replay._signature is None assert replay.status.iteration == 4 @@ -337,7 +337,7 @@ def test_replay_12(mocker, tmp_path): fake_report.side_effect = (report_1, report_2, report_3) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) - target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30) + target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30, rl_reset=1) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] @@ -365,7 +365,7 @@ def test_replay_13(mocker, tmp_path): fake_report.side_effect = (report_0,) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.side_effect = ((SERVED_ALL, ["index.html"]), KeyboardInterrupt) - target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30) + target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30, rl_reset=1) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] From 3c888eba054c7d586380348805c3ddaac558eb9b Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 5 Nov 2020 07:51:05 -0800 Subject: [PATCH 054/531] [replay] Remove duplicate log line --- grizzly/replay/replay.py | 1 - 1 file changed, 1 deletion(-) diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 79fb9880..9705541d 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -184,7 +184,6 @@ def _dyn_close(): # pragma: no cover # be run individually. runner.launch(location, env_mod=testcases[0].env_vars) self.target.step() - LOG.info("Performing replay (%d/%d)...", self.status.iteration, repeat) # run tests durations = list() served = list() From cf8f1415c2a3724c2d21e1d2e7f4f35ac68d6b54 Mon Sep 17 00:00:00 2001 From: Jesse Schwartzentruber Date: Wed, 30 Sep 2020 14:23:36 -0400 Subject: [PATCH 055/531] Initial rewrite of grizzly.reduce --- grizzly/common/reporter.py | 8 +- grizzly/reduce/__init__.py | 24 +- grizzly/reduce/__main__.py | 8 +- grizzly/reduce/args.py | 170 +--- grizzly/reduce/bucket.py | 136 --- grizzly/reduce/crash.py | 182 ---- grizzly/reduce/exceptions.py | 23 - grizzly/reduce/reduce.py | 1459 ++++++---------------------- grizzly/reduce/strategies.py | 581 ++++------- grizzly/reduce/test_common.py | 235 ----- grizzly/reduce/test_interesting.py | 484 --------- grizzly/reduce/test_main.py | 472 --------- grizzly/reduce/test_reduce.py | 700 ------------- grizzly/replay/__init__.py | 4 +- grizzly/replay/replay.py | 5 +- grizzly/target/target.py | 9 + setup.py | 7 +- tox.ini | 3 +- 18 files changed, 555 insertions(+), 3955 deletions(-) delete mode 100644 grizzly/reduce/bucket.py delete mode 100644 grizzly/reduce/crash.py delete mode 100644 grizzly/reduce/exceptions.py delete mode 100644 grizzly/reduce/test_common.py delete mode 100644 grizzly/reduce/test_interesting.py delete mode 100644 grizzly/reduce/test_main.py delete mode 100644 grizzly/reduce/test_reduce.py diff --git a/grizzly/common/reporter.py b/grizzly/common/reporter.py index b402990a..6e723eee 100644 --- a/grizzly/common/reporter.py +++ b/grizzly/common/reporter.py @@ -385,15 +385,16 @@ def submit(self, test_cases, report): report (Report): Report to submit. Returns: - None + *: implementation specific result indicating where the report was created """ assert isinstance(report, Report) assert report.path is not None self._pre_submit(report) - self._submit_report(report, test_cases) + result = self._submit_report(report, test_cases) if report is not None: report.cleanup() self._post_submit() + return result class FilesystemReporter(Reporter): @@ -435,6 +436,7 @@ def _submit_report(self, report, test_cases): free_space = disk_usage(log_path).free if free_space < self.DISK_SPACE_ABORT: raise RuntimeError("Running low on disk space (%0.1fMB)" % (free_space / 1048576.0,)) + return dest_path class FuzzManagerReporter(Reporter): @@ -611,6 +613,8 @@ def _submit_report(self, report, test_cases): if isfile(zip_name): unlink(zip_name) + return new_entry["id"] + class S3FuzzManagerReporter(FuzzManagerReporter): @staticmethod diff --git a/grizzly/reduce/__init__.py b/grizzly/reduce/__init__.py index a7af2a63..ac6ceed3 100644 --- a/grizzly/reduce/__init__.py +++ b/grizzly/reduce/__init__.py @@ -2,26 +2,8 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -import os -import re -__all__ = ("ReductionJob",) +__all__ = ("ReduceManager", "ReduceArgs") - -def testcase_contents(path="."): - for dir_name, _, dir_files in os.walk(path): - arc_path = os.path.relpath(dir_name, path) - # skip tmp folders - if re.match(r"^tmp.+$", arc_path.split(os.sep, 1)[0]) is not None: - continue - for file_name in dir_files: - # skip core files - if re.match(r"^core.\d+$", file_name) is not None: - continue - if arc_path == ".": - yield file_name - else: - yield os.path.join(arc_path, file_name) - - -from .reduce import ReductionJob # noqa pylint: disable=wrong-import-position +from .reduce import ReduceManager +from .args import ReduceArgs diff --git a/grizzly/reduce/__main__.py b/grizzly/reduce/__main__.py index 3fe92b5b..ab5d8aa9 100644 --- a/grizzly/reduce/__main__.py +++ b/grizzly/reduce/__main__.py @@ -2,10 +2,10 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -from sys import exit as sysexit +import sys -from .args import ReducerArgs -from .reduce import ReductionJob +from .args import ReduceArgs +from .reduce import ReduceManager -sysexit(ReductionJob.main(ReducerArgs().parse_args())) +sys.exit(ReduceManager.main(ReduceArgs().parse_args())) diff --git a/grizzly/reduce/args.py b/grizzly/reduce/args.py index 2c25c3c8..29afc0e4 100644 --- a/grizzly/reduce/args.py +++ b/grizzly/reduce/args.py @@ -2,139 +2,59 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -from os.path import isdir, isfile +from logging import getLogger +from pathlib import Path -from .reduce import ReductionJob -from .strategies import strategies_by_name -from ..args import CommonArgs +from ..replay.args import ReplayArgs +from .strategies import DEFAULT_STRATEGIES, STRATEGIES -class ReducerArgs(CommonArgs): +LOG = getLogger(__name__) - def __init__(self): - super(ReducerArgs, self).__init__() - self.parser.add_argument( - "input", - help="Test case or directory containing test cases") - - replay_args = self.parser.add_argument_group("Reduce Arguments") - replay_args.add_argument( - "--any-crash", action="store_true", - help="Any crash is interesting, not only crashes which match the original first crash") - replay_args.add_argument( - "--environ", - help="DEPRICATED: File containing line separated environment variables (VAR=value)" \ - "to be set in the firefox process.") - replay_args.add_argument( - "--idle-threshold", type=int, default=25, - help="CPU usage threshold to mark the process as idle (default: %(default)s)") - replay_args.add_argument( - "--idle-timeout", type=int, default=60, - help="Number of seconds to wait before polling testcase for idle (default: %(default)s)") - replay_args.add_argument( - "--min-crashes", type=int, default=1, - help="Require the testcase to crash n times before accepting the result. (default: %(default)sx)") - replay_args.add_argument( - "--no-analysis", action="store_true", - help="Disable analysis to auto-set --repeat/--min-crashes.") - replay_args.add_argument( - "--no-cache", action="store_true", - help="Disable testcase caching") - replay_args.add_argument( - "--no-harness", action="store_true", - help="Don't use the harness for sapphire redirection") - replay_args.add_argument( - "--reduce-file", - help="Value passed to lithium's --testcase option, needed for testcase cache " \ - "(default: input param)") - replay_args.add_argument( - "--repeat", type=int, default=1, - help="Try to run the testcase multiple times, for intermittent testcases (default: %(default)sx)") - replay_args.add_argument( - "--sig", - help="Signature (JSON) file to match.") - replay_args.add_argument( - "--skip", type=int, default=0, - help="Return interesting = False for the first n reductions (default: %(default)s)") - replay_args.add_argument( - "--static-timeout", action="store_true", dest="fixed_timeout", - help="Disable automatically updating the iteration timeout.") - replay_args.add_argument( - "--strategy", nargs="+", default=list(), metavar="STRATEGY", dest="strategies", - help="One or more strategies (space-separated). Available: %s (default: %s)" - % (" ".join(sorted(strategies_by_name())), " ".join(ReductionJob.DEFAULT_STRATEGIES))) - - def sanity_check(self, args): - super(ReducerArgs, self).sanity_check(args) - - if "input" not in self._sanity_skip: - if not (isdir(args.input) - or (isfile(args.input) and (args.input.lower().endswith(".zip") - or args.input.lower().endswith(".html")))): - self.parser.error("Testcase should be a folder, zip, or html file") - - if args.sig is not None and not isfile(args.sig): - self.parser.error("file not found: %r" % args.sig) - - if args.repeat < 1: - self.parser.error("'--repeat' value must be positive") - - if args.min_crashes < 1: - self.parser.error("'--min-crashes' value must be positive") - - if args.environ is not None and not isfile(args.environ): - self.parser.error("file not found: %r" % args.environ) - - if args.strategies: - known_strategies = set(strategies_by_name()) - for strategy in args.strategies: - if strategy not in known_strategies: - self.parser.error("invalid strategy: %s" % (strategy,)) - else: - args.strategies = None - if args.reduce_file is None: - args.reduce_file = args.input - - -class ReducerFuzzManagerIDArgs(ReducerArgs): +class ReduceArgs(ReplayArgs): def __init__(self): - super(ReducerFuzzManagerIDArgs, self).__init__() - - # madhax alert! - # - # We need to modify the meaning of the 'input' positional to accept an int ID instead of a - # local testcase. This is not possible with the public argparse API. - # - # refs: https://stackoverflow.com/questions/32807319/disable-remove-argument-in-argparse - # https://bugs.python.org/issue19462 - - # look up the action for the positional `input` arg - action = None - for arg in self.parser._actions: - if arg.dest == "input" and not arg.option_strings: - action = arg - break - assert action is not None - - # modify it's type and help string - action.type = int - action.help = "FuzzManager ID to reduce" - - # ... and Bob's your uncle - self._sanity_skip.add("input") + super().__init__() + # these arguments have other defaults vs how they are defined in ReplayArgs + self.parser.set_defaults( + include_test=True, + logs='.', + ) -class ReducerFuzzManagerIDQualityArgs(ReducerFuzzManagerIDArgs): - - def __init__(self): - super(ReducerFuzzManagerIDQualityArgs, self).__init__() - self.parser.add_argument("--quality", type=int, - help="Only try crashes with a given quality value") + reduce_args = self.parser.add_argument_group("Reduce Arguments") + reduce_args.add_argument( + "--no-analysis", action="store_true", + help="Disable analysis to auto-set --repeat/--min-crashes.") + reduce_args.add_argument( + "--strategy", nargs="+", default=DEFAULT_STRATEGIES, metavar="STRATEGY", + dest="strategies", + help="One or more strategies (space-separated). Available: %s (default: %s)" + % (" ".join(sorted(STRATEGIES)), " ".join(DEFAULT_STRATEGIES))) def sanity_check(self, args): - super(ReducerFuzzManagerIDQualityArgs, self).sanity_check(args) - - if args.quality is not None and args.quality < 0: - self.parser.error("'--quality' value must be positive or zero") + super().sanity_check(args) + + # if logs is specified, we need it to be a directory (whether existent or not) + if Path(args.logs).is_file(): + self.parser.error("'--logs' cannot be a file") + + # check that specified strategies exist + for strategy in args.strategies: + if strategy not in STRATEGIES: + self.parser.error("Unrecognized '--strategy': '%s'" % (strategy,)) + + if not args.no_analysis: + # analysis is enabled, but repeat/min_crashes specified. doesn't make sense + errors = [] + if args.repeat != self.parser.get_default("repeat"): + errors.append("'--repeat'") + if args.min_crashes != self.parser.get_default("min_crashes"): + errors.append("'--min-crashes'") + if errors: + error_str = " and ".join(errors) + LOG.warning( + "%s specified, with analysis enabled, they will be ignored", + error_str + ) diff --git a/grizzly/reduce/bucket.py b/grizzly/reduce/bucket.py deleted file mode 100644 index edb8e73f..00000000 --- a/grizzly/reduce/bucket.py +++ /dev/null @@ -1,136 +0,0 @@ -# coding=utf-8 -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -import collections -import json -import logging -import os -import sys -import tempfile - -from Collector.Collector import Collector - -from .args import ReducerFuzzManagerIDQualityArgs -from .crash import CrashReductionJob - - -LOG = logging.getLogger(__name__) - - -def bucket_crashes(bucket_id, quality_filter): - """Fetch all crash IDs for the specified FuzzManager bucket. - Only crashes with testcases are returned. - - Args: - bucket_id (int): ID of the requested bucket on the server side - quality_filter (int): Filter crashes by quality value (None for all) - - Returns: - generator: generator of crash ID (int) - """ - coll = Collector() - - def _get_results(endpoint, params=None): - """ - Function to get paginated results from FuzzManager - - Args: - endpoint (str): FuzzManager REST API to query (eg. "crashes"). - params (dict): Params to pass through to requests.get - - Returns: - generator: objects returned by FuzzManager (as dicts) - """ - LOG.debug("first request to /%s/", endpoint) - - url = "%s://%s:%d/crashmanager/rest/%s/" \ - % (coll.serverProtocol, coll.serverHost, coll.serverPort, endpoint) - - response = coll.get(url, params=params).json() - - while True: - LOG.debug("got %d/%d %s", len(response["results"]), response["count"], endpoint) - while response["results"]: - yield response["results"].pop() - - if response["next"] is None: - break - - LOG.debug("next request to /%s/", endpoint) - response = coll.get(response["next"]).json() - - # Get all crashes for bucket - query_args = [ - ("op", "AND"), - ("bucket", bucket_id), - ] - if quality_filter is not None: - query_args.append(("testcase__quality", quality_filter)) - query = json.dumps(collections.OrderedDict(query_args)) - - n_yielded = 0 - for crash in _get_results("crashes", params={"query": query, "include_raw": "0"}): - - if not crash["testcase"]: - LOG.warning("crash %d has no testcase, skipping", crash["id"]) - continue - - n_yielded += 1 - LOG.debug("yielding crash #%d", n_yielded) - yield crash["id"] - - -def get_signature(bucket_id): - """ - Download the signature for the specified FuzzManager bucket. - - Args: - bucket_id (int): ID of the requested bucket on the server side - - Returns: - str: temp filename to the JSON signature. caller must remove filename when done - """ - coll = Collector() - - url = "%s://%s:%d/crashmanager/rest/buckets/%d/" \ - % (coll.serverProtocol, coll.serverHost, coll.serverPort, bucket_id) - - response = coll.get(url).json() - - sig_fd, sig_fn = tempfile.mkstemp(suffix=".json") - with os.fdopen(sig_fd, "w") as sig_fp: - sig_fp.write(response["signature"]) - - return sig_fn - - -def main(args): - LOG.info("Trying all crashes in bucket %d until one reduces", args.input) - - # if no signature specified, download the signature from FM - rm_sig = False - if not args.sig: - args.sig = get_signature(args.input) - rm_sig = True - - try: - for crash_id in bucket_crashes(args.input, args.quality): - - # reduce.main expects input to be a crash ID - args.input = crash_id - - if CrashReductionJob.main(args) == 0: - # success! - return 0 - - # none of the testcases reduced - return 1 - - finally: - if rm_sig: - os.unlink(args.sig) - - -if __name__ == "__main__": - sys.exit(main(ReducerFuzzManagerIDQualityArgs().parse_args())) diff --git a/grizzly/reduce/crash.py b/grizzly/reduce/crash.py deleted file mode 100644 index dd17a6d4..00000000 --- a/grizzly/reduce/crash.py +++ /dev/null @@ -1,182 +0,0 @@ -# coding=utf-8 -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -import logging -import os -import re -import sys -import tempfile - -from Collector.Collector import Collector - -from .args import ReducerFuzzManagerIDArgs -from .reduce import ReductionJob -from ..common import FuzzManagerReporter - - -LOG = logging.getLogger(__name__) - - -def crashentry_data(crash_id, raw=False): - """Get the CrashEntry data for the specified FuzzManager crash - - Args: - crash_id (int): ID of the requested crash on the server side - raw (bool): include rawCrashData, rawStderr, rawStdout in result - - Returns: - dict: crash entry data (crashmanager.models.CrashEntry) - """ - coll = Collector() - - LOG.debug("crash %d, downloading metadata...", crash_id) - - url = "%s://%s:%d/crashmanager/rest/crashes/%s/" \ - % (coll.serverProtocol, coll.serverHost, coll.serverPort, crash_id) - - return coll.get(url, params={"include_raw": "1" if raw else "0"}).json() - - -def download_crash(crash_id): - """Download testcase for the specified FuzzManager crash. - - Args: - crash_id (int): ID of the requested crash on the server side - - Returns: - str: Temporary filename of the testcase. Caller must remove when finished. - """ - coll = Collector() - - LOG.debug("crash %d, downloading testcase...", crash_id) - - url = "%s://%s:%d/crashmanager/rest/crashes/%s/download/" \ - % (coll.serverProtocol, coll.serverHost, coll.serverPort, crash_id) - - response = coll.get(url) - - disp_m = re.match(r'^attachment; filename="(.*)"$', - response.headers.get("content-disposition", "")) - - if disp_m is None: - raise RuntimeError("Server sent malformed response: %r" % (response,)) - - prefix = "crash.%d." % (crash_id,) - suffix = os.path.splitext(disp_m.group(1))[1] - testcase_fd, testcase_fn = tempfile.mkstemp(prefix=prefix, suffix=suffix) - with os.fdopen(testcase_fd, "wb") as testcase_fp: - testcase_fp.write(response.content) - - return testcase_fn - - -def change_quality(crash_id, quality): - """Update a FuzzManager crash entry quality. - - Args: - crash_id (int): Crash ID on FuzzManager server - quality (int): Quality constant defined in FuzzManagerReporter.QUAL_* - - Returns: - None - """ - LOG.info("Updating crash %d to quality %s", crash_id, FuzzManagerReporter.quality_name(quality)) - coll = Collector() - - url = "%s://%s:%d/crashmanager/rest/crashes/%d/" \ - % (coll.serverProtocol, coll.serverHost, coll.serverPort, crash_id) - try: - Collector().patch(url, data={"testcase_quality": quality}) - except RuntimeError as exc: - # let 404's go .. evidently the crash was deleted - if str(exc) == "Unexpected HTTP response: 404": - LOG.warning("Failed to update (404), does the crash still exist?") - else: - raise - - -class CrashReductionJob(ReductionJob): - __slots__ = ['_crash_id', '_fm_reporter', '_quality', '_testcase_path', '_tool_override', - '_was_interesting'] - - def __init__(self, *args, **kwds): - super(CrashReductionJob, self).__init__(*args, **kwds) - self._crash_id = None - self._fm_reporter = False - self._quality = None - self._testcase_path = None - self._tool_override = False - self._was_interesting = False - - def on_result(self, result_code): - # only update quality of the original crash if we are reporting to FuzzManager - if not self._fm_reporter: - return - - if result_code == FuzzManagerReporter.QUAL_REDUCED_ORIGINAL: - # reduce succeeded - change_quality(self._crash_id, result_code) - - elif result_code == FuzzManagerReporter.QUAL_NOT_REPRODUCIBLE: - if self._quality == FuzzManagerReporter.QUAL_UNREDUCED: - # override result to request platform specific reduction - result_code = FuzzManagerReporter.QUAL_REQUEST_SPECIFIC - change_quality(self._crash_id, result_code) - - # for these cases, something went wrong. a reduce log/result would be really valuable - elif result_code in {FuzzManagerReporter.QUAL_REDUCER_BROKE, - FuzzManagerReporter.QUAL_REDUCER_ERROR}: - # for now just change the quality - change_quality(self._crash_id, result_code) - - else: - LOG.error("Got unhandled quality: %s", FuzzManagerReporter.quality_name(result_code)) - - def on_interesting_crash(self, *args, **kwds): - super(CrashReductionJob, self).on_interesting_crash(*args, **kwds) - if self._was_interesting: - return - LOG.info("Crash %d reproduced!", self._crash_id) - if self._fm_reporter: - change_quality(self._crash_id, FuzzManagerReporter.QUAL_REPRODUCIBLE) - self._was_interesting = True - - def run(self, *args, **kwds): - try: - return super(CrashReductionJob, self).run(*args, **kwds) - finally: - os.unlink(self._testcase_path) - - @classmethod - def from_args(cls, args, target): - LOG.info("Trying crash %d", args.input) - - try: - crash_id = args.input - testcase = download_crash(crash_id) - tool_override = args.tool is None - crash = crashentry_data(crash_id) - quality = crash["testcase_quality"] - if tool_override: - args.tool = crash["tool"] - LOG.info("Using toolname from crash: %s", args.tool) - - # reduce.main expects input to be a path to testcase - args.input = testcase - - job = super(CrashReductionJob, cls).from_args(args, target) - job._fm_reporter = args.fuzzmanager - job._crash_id = crash_id - job._tool_override = tool_override - job._quality = quality - job._testcase_path = testcase - return job - - except: # noqa - os.unlink(testcase) - raise - - -if __name__ == "__main__": - sys.exit(CrashReductionJob.main(ReducerFuzzManagerIDArgs().parse_args())) diff --git a/grizzly/reduce/exceptions.py b/grizzly/reduce/exceptions.py deleted file mode 100644 index f9ed245a..00000000 --- a/grizzly/reduce/exceptions.py +++ /dev/null @@ -1,23 +0,0 @@ -# coding=utf-8 -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. - -__author__ = "Jesse Schwartzentruber" -__credits__ = ["Tyson Smith", "Jesse Schwartzentruber"] - - -class ReducerError(Exception): - pass - - -class TestcaseError(ReducerError): - pass - - -class NoTestcaseError(TestcaseError): - pass - - -class CorruptTestcaseError(TestcaseError): - pass diff --git a/grizzly/reduce/reduce.py b/grizzly/reduce/reduce.py index cede6ced..7aaeaf4e 100644 --- a/grizzly/reduce/reduce.py +++ b/grizzly/reduce/reduce.py @@ -3,1225 +3,340 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. """ -Given a build and testcase, try to reproduce it using a set of strategies. +Given a build and testcase, try to reduce it using a set of strategies. """ -from __future__ import absolute_import -import hashlib -import io -import json -import logging -import os -import re -import shutil -import tempfile -import time -import zipfile -import zlib +from logging import getLogger +from math import ceil, log +from pathlib import Path +from shutil import rmtree +from tempfile import mkdtemp -import lithium -import sapphire +from Collector.Collector import Collector from FTB.Signatures.CrashInfo import CrashSignature +from sapphire import Sapphire -from . import strategies as strategies_module, testcase_contents -from .exceptions import CorruptTestcaseError, NoTestcaseError, ReducerError -from ..common.reporter import FilesystemReporter, FuzzManagerReporter, Report -from ..common.runner import Runner, RunResult -from ..common.status import ReducerStats, Status -from ..common.storage import TestCase, TestFile +from ..common.reporter import FilesystemReporter, FuzzManagerReporter +from ..common.storage import TestCase, TestCaseLoadFailure, TestFile from ..common.utils import grz_tmp from ..main import configure_logging -from ..session import Session -from ..target import load as load_target, sanitizer_opts, TargetLaunchError, \ - TargetLaunchTimeout +from ..replay import ReplayManager, ReplayResult +from ..target import load as load_target, TargetLaunchError, TargetLaunchTimeout +from .strategies import STRATEGIES __author__ = "Jesse Schwartzentruber" -__credits__ = ["Tyson Smith", "Jesse Schwartzentruber", "Jason Kratzer"] +__credits__ = ["Jesse Schwartzentruber", "Tyson Smith"] -LOG = logging.getLogger(__name__) +LOG = getLogger(__name__) -class LithiumInterestingProxy(object): - """Proxy to use a ReductionJob object as a Lithium interestingness script object. - """ - __slots__ = ['_job'] - - def __init__(self, job): - self._job = job - - def init(self, _args): - """Lithium initialization entrypoint. - - Do any per-reduction loop setup needed. - - Args: - _args (unused): Command line arguments from Lithium (N/A) - - Returns: - None - """ - self._job.lithium_init() - - def interesting(self, _args, temp_prefix): - """Lithium main iteration entrypoint. - - This should try the reduction and return True or False based on whether the reduction was - good or bad. - - Args: - _args (unused): Command line arguments from Lithium (N/A) - temp_prefix (str): A unique prefix for any files written during this iteration. - - Returns: - bool: True if reduced testcase is still interesting. - """ - return self._job.lithium_interesting(temp_prefix) - - def cleanup(self, _args): - """Lithium cleanup entrypoint. - - Do any per-reduction loop cleanup needed. - - Args: - _args (unused): Command line arguments from Lithium (N/A) - - Returns: - None - """ - self._job.lithium_cleanup() - - -class IterationParamsProxy(object): - __slots__ = ['_fixed_timeout', '_job', '_use_result_cache'] - - def __init__(self, job): - self._job = job - self._use_result_cache = None - self._fixed_timeout = None - - def __enter__(self): - # disable result cache setting - self._use_result_cache = self._job._use_result_cache - self._job._use_result_cache = False - - # do not update the iteration timeout during analysis - self._fixed_timeout = self._job._fixed_timeout - self._job._fixed_timeout = True - - return self - - def __exit__(self, *_args): - # restore saved values - self._job._use_result_cache = self._use_result_cache - self._job._fixed_timeout = self._fixed_timeout - - @property - def force_no_harness(self): - return self._job._force_no_harness - - @property - def min_crashes(self): - return self._job._min_crashes - - @min_crashes.setter - def min_crashes(self, value): - self._job._min_crashes = value - - @property - def no_harness(self): - return self._job._no_harness - - @no_harness.setter - def no_harness(self, value): - self._job._no_harness = value - - @property - def relaunch(self): - return self._job._target.rl_reset - - @relaunch.setter - def relaunch(self, value): - self._job._target.rl_reset = min(self._job._original_relaunch, value) - - @property - def repeat(self): - return self._job._repeat - - @repeat.setter - def repeat(self, value): - self._job._repeat = value - - def commit(self): - # close target so new parameters take effect - self._job.close_target() - - -class TimeoutsUpdateProxy(object): - __slots__ = ['_job'] - - def __init__(self, job): - self._job = job - - @property - def idle(self): - return self._job._idle_timeout - - @idle.setter - def idle(self, value): - self._job._idle_timeout = value - - @property - def iteration(self): - return self._job._iter_timeout - - @iteration.setter - def iteration(self, value): - self._job._iter_timeout = value - - -class RunState(object): - __slots__ = ['files_to_reduce', 'original_size'] - - def __init__(self, files_to_reduce): - self.files_to_reduce = files_to_reduce - self.original_size = -1 - - def total_size(self): - return sum(os.stat(fn).st_size for fn in self.files_to_reduce) - - -class TestcaseUpdateProxy(object): - __slots__ = ['_job', '_run_state'] - - def __init__(self, job, run_state): - self._job = job - self._run_state = run_state - - @property - def cache_iter_harness_created(self): - return self._job._cache_iter_harness_created - - @property - def root(self): - return self._job._tcroot - - @root.setter - def root(self, value): - self._job._tcroot = value - - @property - def entry(self): - return self._job._testcase - - @entry.setter - def entry(self, value): - self._job._testcase = value - - @property - def landing_page(self): - return self._job.landing_page - - @landing_page.setter - def landing_page(self, value): - self._job.landing_page = value - - @property - def files_to_reduce(self): - return self._run_state.files_to_reduce - - @property - def original_size(self): - return self._run_state.original_size - - @original_size.setter - def original_size(self, value): - self._run_state.original_size = value - - def total_size(self): - return self._run_state.total_size() - - -class ReductionJob(object): - LOGGERS_TO_WATCH = ("ffpuppet", "grizzly", "lithium", "sapphire") - DEFAULT_STRATEGIES = ("line", "cssbeautify", "jsbeautify", "collapsebraces", "jschar") - __slots__ = [ - '_any_crash', '_best_testcase', '_cache_iter_harness_created', '_env_mod', - '_fixed_timeout', '_force_no_harness', '_idle_threshold', '_idle_timeout', '_ignore', - '_input_fname', '_interesting_report', '_iter_timeout', '_landing_page', '_log_handler', - '_min_crashes', '_no_harness', '_orig_sig', '_original_relaunch', '_other_crashes', - '_reduce_file', '_repeat', '_reporter', '_result_cache', '_result_code', '_server', '_server_map', - '_signature', '_skip', '_skip_analysis', '_skipped', '_status', '_target', '_tcroot', '_testcase', - '_tmpdir', '_use_result_cache', - ] - - def __init__(self, ignore, target, iter_timeout, no_harness, any_crash, skip, min_crashes, - repeat, idle_threshold, idle_timeout, testcase_cache=True, skip_analysis=False): - """Use lithium to reduce a testcase. - - Args: - target (grizzly.target.Target): Target object to use for reduction. - """ - self._any_crash = any_crash - self._best_testcase = None - self._cache_iter_harness_created = None - self._env_mod = None # environment if specified in the testcase - self._fixed_timeout = False # if True iter_timeout will not be changed - self._force_no_harness = no_harness - self._idle_threshold = idle_threshold - self._idle_timeout = idle_timeout - self._ignore = ignore # things to ignore - self._input_fname = None - self._interesting_report = None - self._iter_timeout = iter_timeout - self._landing_page = None # the file to point the target at - self._min_crashes = min_crashes - self._no_harness = no_harness - self._orig_sig = None # signature to reduce to (if specified) - self._original_relaunch = target.rl_reset - self._other_crashes = {} - self._reduce_file = None # the file to reduce - self._repeat = repeat - self._reporter = None - self._result_cache = {} - self._result_code = None - self._server = None # a server to serve with - self._server_map = sapphire.ServerMap() # manage dynamic requests, includes and redirects - self._signature = None - self._skip = skip - self._skip_analysis = skip_analysis - self._skipped = None - self._status = Status.start() - self._target = target # a Puppet to run with - self._testcase = None - # testcase cache remembers if we have seen this reduce_file before and if so return the same - # interesting result - self._use_result_cache = testcase_cache - self._tmpdir = tempfile.mkdtemp(prefix="grzreduce", dir=grz_tmp("reduce")) - self._tcroot = os.path.join(self._tmpdir, "tc") - self._log_handler = self._start_log_capture() - if not self._skip_analysis: - # see if any of the args tweaked by analysis were overridden - # --relaunch is regarded as a maximum, so overriding the default is not a deal-breaker for this - if self._min_crashes != 1: - LOG.warning("--min-crashes=%d was given, skipping analysis", self._min_crashes) - self._skip_analysis = True - elif self._repeat != 1: - LOG.warning("--repeat=%d was given, skipping analysis", self._repeat) - self._skip_analysis = True - - @property - def landing_page(self): - return os.path.basename(self._landing_page) - - @landing_page.setter - def landing_page(self, value): - # this looks pointless, but it isn't since it affects both landing_page and wwwdir getters - self._landing_page = value - - @property - def reduce_file(self): - return self._reduce_file - - @reduce_file.setter - def reduce_file(self, value): - self._reduce_file = value - # landing page should default to same value as reduce file - if self._landing_page is None: - self._landing_page = value - - @property - def result_code(self): - return self._result_code - - @property - def server(self): - return self._server - - @property - def target(self): - return self._target - - @property - def wwwdir(self): - return os.path.dirname(os.path.realpath(self._landing_page)) - - def timeouts_proxy(self): - """Return a proxy for modifying the job timeouts. - - Returns: - (object): an object used to modify the timeouts for this job - attributes: - - iteration (iteration timeout) - - idle (idle timeout) - """ - return TimeoutsUpdateProxy(self) - - def testcase_proxy(self, run_state): - """Return a proxy for modifying the testcase. - - Returns: - (object): an object used to modify the testcase for this job - attributes: - - iteration (iteration timeout) - - idle (idle timeout) - """ - return TestcaseUpdateProxy(self, run_state) - - def analysis_mode(self, min_crashes=1, relaunch=1, repeat=1): - """Set parameters for testcase analysis. This has side-effects besides being a proxy: - - - min_crashes/repeat/relaunch are preset according to the function parameters - - result cache is disabled (so every iteration runs fully) if used as a with-statement context - - times are not dynamically adjusted if used as a with-statement context - - Args: - min_crashes (int): How many crashes are needed for a success. - relaunch (int): How many iterations between relaunch. - repeat (int): How many times to repeat the testcase per iteration. - - Returns: - (context manager): an object that can be used to set new parameters - as a result of analysis: - attributes: - - min_crashes - - no_harness - - relaunch - - repeat - """ - # pylint: disable=no-self-argument,no-self-use,protected-access - - proxy = IterationParamsProxy(self) - - # Set parameters for analysis - proxy.min_crashes = min_crashes - proxy.repeat = repeat - proxy.relaunch = relaunch - - return proxy - - def close_target(self): - if not self._target.closed: - self._target.close() - - def lithium_init(self): - """Lithium initialization entrypoint. Do any per-reduction loop setup needed. - - Args: - None - - Returns: - None - """ - self._skipped = None - self._best_testcase = None - self._result_cache = {} - - def lithium_interesting(self, temp_prefix): - """Lithium main iteration entrypoint. - - This should try the reduction and return True or False based on whether the reduction was - good or bad. This is subject to a number of options (skip, repeat, cache) and so may - result in 0 or more actual runs of the target. - - Args: - temp_prefix (str): A unique prefix for any files written during this iteration. - - Returns: - bool: True if reduced testcase is still interesting. - """ - # ensure the target is closed so "repeat" and "relaunch" never get out of sync - self.close_target() - if self._skip: - if self._skipped is None: - self._skipped = 0 - elif self._skipped < self._skip: - self._skipped += 1 - return False - n_crashes = 0 - n_tries = max(self._repeat, self._min_crashes) - if self._use_result_cache: - with open(self.reduce_file, "rb") as test_fp: - cache_key = hashlib.sha1(test_fp.read()).hexdigest() - if cache_key in self._result_cache: - result = self._result_cache[cache_key]['result'] - if result: - LOG.info("Interesting (cached)") - else: - LOG.info("Uninteresting (cached)") - return result - - # create the TestCase to try - testcase = TestCase(self.landing_page, None, "grizzly.reduce", input_fname=self._input_fname) - - # add testcase contents - for file_name in testcase_contents(self.wwwdir): - testcase.add_from_file(os.path.join(self.wwwdir, file_name), file_name, - required=bool(file_name == self.landing_page)) - - # add prefs - if self._target.prefs is not None: - testcase.add_meta(TestFile.from_file(self._target.prefs, "prefs.js")) - - # add environment variables - if self._env_mod is not None: - for name, value in self._env_mod.items(): - testcase.add_environ_var(name, value) - - max_duration = 0 - run_prefix = None - for try_num in range(n_tries): - if (n_tries - try_num) < (self._min_crashes - n_crashes): - break # no longer possible to get min_crashes, so stop - self._status.report() - self._status.iteration += 1 - run_prefix = "%s(%d)" % (temp_prefix, try_num) - interesting_report = self._run(testcase, run_prefix) - if interesting_report: - # track the maximum duration of the successful reduction attempts - if testcase.duration > max_duration: - max_duration = testcase.duration - n_crashes += 1 - if n_crashes >= self._min_crashes: - self.on_interesting_crash(interesting_report) - if self._use_result_cache: - self._result_cache[cache_key] = { - 'result': interesting_report, - 'prefix': run_prefix - } - self._best_testcase = testcase - # the amount of time it can take to replay a test case can vary - # when under Valgrind so do not update the timeout in that case - if not self._fixed_timeout and not getattr(self._target, "use_valgrind", False): - self.update_timeout(max_duration) - return True - if self._use_result_cache: - # No need to save the temp_prefix on uninteresting testcases - # But let's do it anyway to stay consistent - self._result_cache[cache_key] = { - 'result': None, - 'prefix': run_prefix - } - return False - - def lithium_cleanup(self): - """Lithium cleanup entrypoint. Do any per-reduction loop cleanup needed. - - Args: - None - - Returns: - None - """ - try: - if self._server is not None: - self._server.close() - self._server = None - finally: - if self._target is not None: - self._target.close() - - def _add_san_suppressions(self, supp_file): - # Update the sanitizer *SAN_OPTIONS environment variable to use provided - # suppressions file - opt_key = '%s_OPTIONS' % os.path.basename(supp_file).split('.')[0].upper() - opts_data = self._env_mod.get(opt_key, '') - # the value matching *SAN_OPTIONS can be set to None - if opts_data is None: - opts_data = '' - opts = sanitizer_opts(opts_data) - opts['suppressions'] = '\'%s\'' % (supp_file,) - self._env_mod[opt_key] = ':'.join('='.join((k, v)) for k, v in opts.items()) - - def _start_log_capture(self): - """Add a log handler for grizzly and lithium messages generated during this job. - The handler is removed again by close() - - Args: - None - - Returns: - logging.Handler: The log handler to be removed later. - """ - formatter = logging.Formatter("%(levelname).1s %(name)s [%(asctime)s] %(message)s") - handler = logging.FileHandler(os.path.join(self._tmpdir, "reducelog.txt")) - handler.setLevel(logging.DEBUG) - handler.setFormatter(formatter) - for logname in self.LOGGERS_TO_WATCH: - logging.getLogger(logname).addHandler(handler) - - # check that DEBUG messages will actually get through - # if the root logger level is > DEBUG, messages will not get through to our log handler - # set root to DEBUG, and propagate the old root level to each root handler - root_logger = logging.getLogger() - root_level = root_logger.getEffectiveLevel() - if root_level > logging.DEBUG: - root_logger.setLevel(logging.DEBUG) - for root_handler in root_logger.handlers: - if root_handler.level < root_level: - root_handler.setLevel(root_level) - - return handler - - def update_timeout(self, run_time): - # If run_time is less than poll-time, update it - LOG.debug('Run time %r', run_time) - new_poll_timeout = max(10, min(run_time * 1.5, self._idle_timeout)) - if new_poll_timeout < self._idle_timeout: - LOG.info("Updating poll timeout to: %r", new_poll_timeout) - self._idle_timeout = new_poll_timeout - # If run_time * 2 is less than iter_timeout, update it - # in other words, decrease the timeout if this ran in less than half the timeout - # (floored at 10s) - new_iter_timeout = max(10, min(run_time * 2, self._iter_timeout)) - if new_iter_timeout < self._iter_timeout: - LOG.info("Updating max timeout to: %r", new_iter_timeout) - self._iter_timeout = new_iter_timeout - - def _run(self, testcase, temp_prefix): - """Run a single iteration against the target and determine if it is interesting. This is the - low-level iteration function used by `interesting`. - - Args: - testcase (TestCase): The testcase to serve - temp_prefix (str): A unique prefix for any files written during this iteration. - - Returns: - Report: Report from reduced testcase if still interesting else None. - """ - interesting = None - - # if target is closed and server is alive, we should restart it or else the first request - # against /first_test will 404 - if self._target.closed and self._server is not None: - self._server.close() - self._server = None - self._server_map.dynamic.clear() - self._server_map.redirect.clear() - - # launch sapphire if needed - if self._server is None: - # have client error pages (code 4XX) call window.close() after a few seconds - self._server = sapphire.Sapphire(auto_close=2) - - if not self._no_harness: - harness = os.path.join(os.path.dirname(__file__), '..', 'common', 'harness.html') - with open(harness, 'rb') as harness_fp: - harness = harness_fp.read() - self._server_map.set_dynamic_response("grz_harness", lambda: harness, mime_type="text/html") - self._server_map.set_redirect("grz_current_test", str(self.landing_page), required=False) +def change_quality(crash_id, quality): + """Update a FuzzManager crash entry quality. - runner = Runner(self._server, self._target, self._idle_threshold, self._idle_timeout) - if self._no_harness: - self._server.timeout = self._iter_timeout - else: - # wait a few extra seconds to avoid races between the harness & sapphire timing out - self._server.timeout = self._iter_timeout + 10 - - # (re)launch Target - if self._target.closed: - if self._no_harness: - location = runner.location( - "/grz_current_test", - self._server.port) - else: - location = runner.location( - "/grz_harness", - self._server.port, - close_after=self._target.rl_reset, - forced_close=self._target.forced_close, - timeout=self._iter_timeout) - # Try to launch the browser, retry 4 times at most - runner.launch(location, env_mod=self._env_mod, max_retries=4, retry_delay=15) - self._target.step() - - if not self._no_harness: - def _dyn_resp_close(): # pragma: no cover - if self.target.monitor.is_healthy(): - # delay to help catch window close/shutdown related crashes - time.sleep(0.1) - self.target.close() - return b"

Close Browser

" - self._server_map.set_dynamic_response("grz_close_browser", _dyn_resp_close, mime_type="text/html") - self._server_map.set_redirect("grz_next_test", str(self.landing_page), required=True) - - # run test case - result = runner.run(self._ignore, self._server_map, testcase, wait_for_callback=self._no_harness) - testcase.duration = result.duration - - # handle failure if detected - if result.status == RunResult.FAILED: - self._target.close() - testcase.purge_optional(result.served) - - # save logs - result_logs = temp_prefix + "_logs" - if not os.path.exists(result_logs): - os.mkdir(result_logs) - self._target.save_logs(result_logs) - - # create report - report = Report(result_logs, self._target.binary) - - short_sig = report.crash_info.createShortSignature() - if short_sig == "No crash detected": - # XXX: need to change this to support reducing timeouts? - LOG.info("Uninteresting: no crash detected") - elif self._orig_sig is None or self._orig_sig.matches(report.crash_info): - interesting = report - LOG.info("Interesting: %s", short_sig) - if self._orig_sig is None and not self._any_crash: - self._orig_sig = report.crash_signature - else: - LOG.info("Uninteresting: different signature: %s", short_sig) - self.on_other_crash_found(testcase, report) - - elif result.status == RunResult.IGNORED: - LOG.info("Uninteresting: ignored") - self._target.close() - - else: - LOG.info("Uninteresting: no failure detected") - - # trigger relaunch by closing the browser if needed - self._target.check_relaunch() - - return interesting - - def _stop_log_capture(self): - """Stop handling reduce logs. + Args: + crash_id (int): Crash ID on FuzzManager server + quality (int): Quality constant defined in FuzzManagerReporter.QUAL_* - Args: - None - - Returns: - None - """ - if self._log_handler is None: - return - for logname in self.LOGGERS_TO_WATCH: - logging.getLogger(logname).removeHandler(self._log_handler) - self._log_handler.flush() - self._log_handler.close() - self._log_handler = None - - def config_environ(self, environ): - with open(environ) as in_fp: - try: - self._env_mod = json.load(in_fp).get('env', {}) - except ValueError: - # TODO: remove this once switched to 'test_info.json' - # legacy support for 'env_vars.txt' - self._env_mod = {} - in_fp.seek(0) - for line in in_fp: - line = line.rstrip() - if not line: - continue - key, value = line.split('=', 1) - if not value: - value = None - self._env_mod[key] = value - # known sanitizer suppression files - known_suppressions = ('lsan.supp', 'ubsan.supp') - working_dir = os.path.dirname(environ) - for file_name in os.listdir(working_dir): - if file_name in known_suppressions: - self._add_san_suppressions(os.path.join(working_dir, file_name)) - - def config_signature(self, signature): - """Configure a signature to use for reduction. If none is given, an automatic signature is - created based on the initial repro. - - Args: - signature (str): A JSON signature to match for reduction. - - Returns: - None - """ - self._signature = CrashSignature(signature) - - @staticmethod - def _get_landing_page(testpath): - """Parse test_info.json for landing page - - Args: - testpath (str): Path to a testcase folder (containing a test_info.json from Grizzly). - - Returns: - str: Path to the landing page within testpath - """ - info_file = os.path.join(testpath, "test_info.json") - if os.path.isfile(info_file): - with open(info_file) as info: - landing_page = json.load(info).get("target", None) - if landing_page is None: - raise ReducerError("Could not find landing page in %s!" % (os.path.abspath(info_file),)) - landing_page = os.path.join(testpath, landing_page) + Returns: + None + """ + LOG.info("Updating crash %d to quality %s", crash_id, FuzzManagerReporter.quality_name(quality)) + coll = Collector() + + url = "%s://%s:%d/crashmanager/rest/crashes/%d/" \ + % (coll.serverProtocol, coll.serverHost, coll.serverPort, crash_id) + try: + Collector().patch(url, data={"testcase_quality": quality}) + except RuntimeError as exc: + # let 404's go .. evidently the crash was deleted + if str(exc) == "Unexpected HTTP response: 404": + LOG.warning("Failed to update (404), does the crash still exist?") else: - LOG.warning("Using deprecated test_info.txt") - with io.open(os.path.join(testpath, "test_info.txt"), encoding="utf-8") as info: - for line in info: - if line.lower().startswith("landing page: "): - landing_page = os.path.join(testpath, - line.split(": ", 1)[1].strip()) - break - else: - raise ReducerError("Could not find landing page in %s!" - % (os.path.abspath(info.name),)) - if not os.path.isfile(landing_page): - raise ReducerError("Landing page %s does not exist in %s!" - % (landing_page, os.path.abspath(info.name))) - return landing_page - - def _http_abspath(self, path): - """Return an absolute HTTP path to `path` relative to tcroot""" - path = os.path.relpath(path, self._tcroot) - return '/' + '/'.join(path.split(os.sep)) - - def config_testcase(self, testcase): - """Prepare a user provided testcase for reduction. - - Args: - testcase (str): Path to a testcase. This should be a Grizzly testcase (zip or folder) or html - file. - - Returns: - None - """ - try: - # extract the testcase if necessary - if os.path.exists(self._tcroot): - raise ReducerError("Testcase already configured?") - if os.path.isfile(testcase): - if testcase.lower().endswith(".html"): - os.mkdir(self._tcroot) - shutil.copy(testcase, self._tcroot) - info = {"target": os.path.basename(testcase)} - with open(os.path.join(self._tcroot, "test_info.json"), "w") as info_fp: - json.dump(info, info_fp, indent=2, sort_keys=True) - elif testcase.lower().endswith(".zip"): - os.mkdir(self._tcroot) - try: - with zipfile.ZipFile(testcase) as zip_fp: - zip_fp.extractall(path=self._tcroot) - except (zlib.error, zipfile.BadZipfile): - raise CorruptTestcaseError("Testcase is corrupted") - else: - raise ReducerError("Testcase must be zip, html, or directory") - elif os.path.isdir(testcase): - shutil.copytree(testcase, self._tcroot) - else: - raise ReducerError("Testcase must be zip, html or directory") - - self._input_fname = os.path.basename(testcase) - - # get a list of all directories containing testcases (1-n, depending on how much history - # grizzly saved) - entries = set(os.listdir(self._tcroot)) - if "test_info.json" in entries: - dirs = [self._tcroot] - elif "test_info.txt" in entries: - dirs = [self._tcroot] - else: - dirs = sorted([os.path.join(self._tcroot, entry) for entry in entries - if os.path.exists(os.path.join(self._tcroot, entry, "test_info.json")) - or os.path.exists(os.path.join(self._tcroot, entry, "test_info.txt"))], - key=lambda x: -int(x.rsplit('-', 1)[1])) - if not dirs: - raise NoTestcaseError("No testcase recognized at %r" % (testcase,)) - - # check for included prefs and environment - if "prefs.js" in os.listdir(dirs[0]): - # move the file out of tcroot because we prune these non-testcase files later - os.rename(os.path.join(dirs[0], "prefs.js"), os.path.join(self._tmpdir, "prefs.js")) - self._target.prefs = os.path.abspath(os.path.join(self._tmpdir, "prefs.js")) - LOG.warning("Using prefs included in testcase: %r", self._target.prefs) - if "test_info.json" in os.listdir(dirs[0]): - self.config_environ(os.path.join(dirs[0], "test_info.json")) - elif "env_vars.txt" in os.listdir(dirs[0]): - # TODO: remove this block once move to 'test_info.json' is complete - self.config_environ(os.path.join(dirs[0], "env_vars.txt")) - if self._env_mod: - LOG.warning("Using environment included in testcase") - self._target.forced_close = self._env_mod.get("GRZ_FORCED_CLOSE") != "0" - - # if dirs is singular, we can use the testcase directly, otherwise we need to iterate over - # them all in order - pages = [self._get_landing_page(d) for d in dirs] - if len(pages) == 1: - self._testcase = pages[0] - self._cache_iter_harness_created = False - - else: - # create a harness to iterate over the whole history - harness_path = os.path.join(os.path.dirname(__file__), '..', 'common', 'harness.html') - with io.open(harness_path, encoding="utf-8") as harness_fp: - harness = harness_fp.read() - # change dump string so that logs can be told apart - harness = harness.replace("[grz harness]", "[cache iter]") - # change the window name so that window.open doesn't clobber self - harness = harness.replace("'GrizzlyFuzz'", "'CacheIterator'") - # insert the iteration timeout. insert it directly because we can't set a hash value - new_harness = re.sub(r"^(\s*let\s.*\btime_limit\b)", - r"\1 = %d" % (self._iter_timeout * 1000), - harness, - flags=re.MULTILINE) - if new_harness == harness: - raise ReducerError("Unable to set time_limit in harness, please update pattern " - "to match harness!") - harness = new_harness - # make first test and next test grab from the array - harness = harness.replace("'/grz_current_test'", "_reduce_next()") - harness = harness.replace("'/grz_next_test'", "_reduce_next()") - # insert the close condition. we are iterating over the array of landing pages, - # undefined means we hit the end and the harness should close - # newer harness uses conditional operator in open() call - if re.search(r'open\(.*_reduce_next\(\)\s*:\s*_reduce_next\(\)', harness) is None: - raise ReducerError("Unable to insert finish condition, please update pattern " - "to match harness!") - # insert the landing page loop - harness = harness.replace("\n", + test_name="test.html", + expected_run_calls=7, + expected_results={"'required'"}, + expected_num_reports=2, + strategies=["jsbeautify", "lines"], + ), + marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), + ), + # test beautify a .css file + pytest.param( + *BeautifyStrategyParams( + test_data="*,#a{fluff:0;required:1}\n", + test_name="test.css", + expected_run_calls=8, + expected_results={"required: 1"}, + expected_num_reports=2, + strategies=["cssbeautify", "lines"], + ), + marks=pytest.mark.skipif(not HAVE_CSSBEAUTIFIER, reason="cssbeautifier required"), + ), + # test beautify css embedded in html + pytest.param( + *BeautifyStrategyParams( + test_data="\n", + test_name="test.html", + expected_run_calls=6, + expected_results={"required: 1"}, + expected_num_reports=2, + strategies=["cssbeautify", "lines"], + ), + marks=pytest.mark.skipif(not HAVE_CSSBEAUTIFIER, reason="cssbeautifier required"), + ), + ] +) +def test_beautifier(mocker, tmp_path, test_data, test_name, expected_run_calls, expected_results, + expected_num_reports, strategies): + """test for the "beautify" strategies""" + replayer = mocker.patch("grizzly.reduce.core.ReplayManager", autospec=True) + + def replay_run(testcases, **_): + for test in testcases: + contents = test.get_file(test_name).data.decode("ascii") + LOG.debug("interesting if 'required' in %r", contents) + if "required" in contents: + log_path = tmp_path / ("crash%d_logs" % (replayer.return_value.run.call_count,)) + log_path.mkdir() + _fake_save_logs_foo(log_path) + report = Report(str(log_path), "bin") + return [ReplayResult(report, [test_name], [], True)] + return [] + replayer.return_value.run.side_effect = replay_run + + test = TestCase(test_name, None, "test-adapter") + test.add_from_data(test_data, test_name) + tests = [test] + log_path = tmp_path / "logs" + + target = mocker.Mock(spec=Target) + target.relaunch = 1 + try: + mgr = ReduceManager([], mocker.Mock(spec=Sapphire), target, tests, strategies, log_path, + use_analysis=False) + assert mgr.run() + finally: + for test in tests: + test.cleanup() + + assert replayer.return_value.run.call_count == expected_run_calls + assert set(log_path.iterdir()) == {log_path / "reports"} + tests = {test.read_text().strip() for test in log_path.glob("reports/*-*/" + test_name)} + assert tests == expected_results + assert len(list((log_path / "reports").iterdir())) == expected_num_reports, \ + list((log_path / "reports").iterdir()) diff --git a/setup.py b/setup.py index 02deb6e9..35a2007b 100755 --- a/setup.py +++ b/setup.py @@ -42,8 +42,12 @@ 'ffpuppet = grizzly.target.puppet_target:PuppetTarget', ], 'grizzly_reduce_strategies': [ - 'check = grizzly.reduce.strategies:Check', 'chars = grizzly.reduce.strategies:MinimizeChars', + 'check = grizzly.reduce.strategies:Check', + 'collapsebraces = grizzly.reduce.strategies:CollapseEmptyBraces', + 'cssbeautify = grizzly.reduce.strategies:CSSBeautify', + 'jsbeautify = grizzly.reduce.strategies:JSBeautify', + 'jschars = grizzly.reduce.strategies:MinimizeJSChars', 'lines = grizzly.reduce.strategies:MinimizeLines', 'list = grizzly.reduce.strategies:MinimizeTestcaseList', ], From ec8e8f10885f0bb037acaa987bf9d5ce7adc1cef Mon Sep 17 00:00:00 2001 From: Jesse Schwartzentruber Date: Wed, 14 Oct 2020 15:56:58 -0400 Subject: [PATCH 059/531] Implement sanity checking for abstract attributes required by the reduction strategy classes. --- grizzly/reduce/strategies.py | 56 ++++++++++++++++++++++++------- grizzly/reduce/test_strategies.py | 4 +++ 2 files changed, 48 insertions(+), 12 deletions(-) diff --git a/grizzly/reduce/strategies.py b/grizzly/reduce/strategies.py index f73b92f1..cee25310 100644 --- a/grizzly/reduce/strategies.py +++ b/grizzly/reduce/strategies.py @@ -14,8 +14,9 @@ from tempfile import mkdtemp from types import MappingProxyType -from lithium.strategies import CheckOnly, CollapseEmptyBraces as LithCollapseEmptyBraces, Minimize -from lithium.testcases import TestcaseChar, TestcaseJsStr, TestcaseLine +from lithium.strategies import CheckOnly, CollapseEmptyBraces as LithCollapseEmptyBraces, Minimize, \ + Strategy as LithStrategy +from lithium.testcases import TestcaseChar, TestcaseJsStr, TestcaseLine, Testcase as LithTestcase from pkg_resources import iter_entry_points from ..common.utils import grz_tmp @@ -52,6 +53,7 @@ def _load_strategies(): for entry_point in iter_entry_points("grizzly_reduce_strategies"): try: strategy_cls = entry_point.load() + strategy_cls.sanity_check_impl() assert ( strategy_cls.name == entry_point.name ), "entry_point name mismatch, check setup.py and %s.name" % ( @@ -76,6 +78,8 @@ class Strategy(ABC): Attributes: name (str): The strategy name. """ + name = None + def __init__(self, testcases): self._testcase_root = Path(mkdtemp(prefix="tc_", dir=grz_tmp("reduce"))) for idx, testcase in enumerate(testcases): @@ -83,6 +87,10 @@ def __init__(self, testcases): testpath = self._testcase_root / ("%03d" % (idx,)) testcase.dump(str(testpath), include_details=True) + @classmethod + def sanity_check_impl(cls): + assert isinstance(cls.name, str) + @abstractmethod def __iter__(self): pass @@ -112,22 +120,37 @@ class _BeautifyStrategy(Strategy, ABC): native_extension (str): The native file extension for this type. tag_name (str): Tag name to search for in other (non-native) extensions. """ + all_extensions = None blacklist_files = {"test_info.json", "prefs.js"} + import_available = None + import_name = None + native_extension = None + tag_name = None def __init__(self, *args, **kwds): super().__init__(*args, **kwds) self._files_to_reduce = [] for path in self._testcase_root.glob("**/*"): - if (path.is_file() and path.suffix in self.all_extensions # pylint: disable=no-member + if (path.is_file() and path.suffix in self.all_extensions and path.name not in self.blacklist_files): self._files_to_reduce.append(path) self._current_feedback = None - tag_bytes = self.tag_name.encode("ascii") # pylint: disable=no-member + tag_bytes = self.tag_name.encode("ascii") self._re_tag = re.compile(br"(<" + tag_bytes + br".*?>)(.*?)()", flags=re.DOTALL | re.IGNORECASE) self._re_tag_start = re.compile(br"<" + tag_bytes + br".*?>\s*$", flags=re.DOTALL | re.IGNORECASE) self._re_tag_end = re.compile(br"^\s*", flags=re.IGNORECASE) + @classmethod + def sanity_check_impl(cls): + super().sanity_check_impl() + assert isinstance(cls.all_extensions, set) + assert all(isinstance(ext, str) for ext in cls.all_extensions) + assert isinstance(cls.import_available, bool) + assert isinstance(cls.import_name, str) + assert isinstance(cls.native_extension, str) + assert isinstance(cls.tag_name, str) + def update(self, success): assert self._current_feedback is None self._current_feedback = success @@ -138,8 +161,8 @@ def beautify_bytes(cls, data): pass def __iter__(self): - if not self.import_available: # pylint: disable=no-member - LOG.warning("%s not available, skipping strategy.", self.import_name) # pylint: disable=no-member + if not self.import_available: + LOG.warning("%s not available, skipping strategy.", self.import_name) return LOG.info("Beautifying %d files", len(self._files_to_reduce)) @@ -156,7 +179,7 @@ def __iter__(self): in_tag_already = (self._re_tag_start.match(lith_tc.before) is not None and self._re_tag_end.match(lith_tc.after) is not None) - if file.suffix == self.native_extension or in_tag_already: # pylint: disable=no-member + if file.suffix == self.native_extension or in_tag_already: with file.open("wb") as testcase_fp: testcase_fp.write(lith_tc.before) testcase_fp.write(self.beautify_bytes(to_reduce)) @@ -179,16 +202,16 @@ def __iter__(self): testcase_fp.write(to_reduce[pos:]) testcase_fp.write(lith_tc.after) if pos == 0: - LOG.warning("<%s> tags not found, skipping", self.tag_name) # pylint: disable=no-member + LOG.warning("<%s> tags not found, skipping", self.tag_name) continue yield TestCase.load(str(self._testcase_root), False) assert self._current_feedback is not None, "No feedback for last iteration" if self._current_feedback: - LOG.info("%s was successful", self.name) # pylint: disable=no-member + LOG.info("%s was successful", self.name) else: - LOG.warning("%s failed (reverting)", self.name) # pylint: disable=no-member + LOG.warning("%s failed (reverting)", self.name) lith_tc.dump(file) self._current_feedback = None @@ -201,6 +224,9 @@ class _LithiumStrategy(Strategy, ABC): strategy_cls (lithium.strategies.Strategy): Lithium strategy type. testcase_cls (lithium.testcases.Testcase): Lithium testcase type. """ + strategy_cls = None + testcase_cls = None + def __init__(self, *args, **kwds): super().__init__(*args, **kwds) self._current_reducer = None @@ -209,6 +235,12 @@ def __init__(self, *args, **kwds): if path.is_file() and path.name not in {"test_info.json", "prefs.js"}: self._files_to_reduce.append(path) + @classmethod + def sanity_check_impl(cls): + super().sanity_check_impl() + assert issubclass(cls.strategy_cls, LithStrategy) + assert issubclass(cls.testcase_cls, LithTestcase) + def update(self, success): assert self._current_reducer is not None self._current_reducer.feedback(success) @@ -217,9 +249,9 @@ def __iter__(self): LOG.info("Reducing %d files", len(self._files_to_reduce)) for file_no, file in enumerate(self._files_to_reduce): LOG.info("Reducing %s (file %d/%d)", file, file_no + 1, len(self._files_to_reduce)) - lithium_testcase = self.testcase_cls() # pylint: disable=no-member + lithium_testcase = self.testcase_cls() # pylint: disable=not-callable lithium_testcase.load(file) - # pylint: disable=no-member + # pylint: disable=not-callable self._current_reducer = self.strategy_cls().reduce(lithium_testcase) for reduction in self._current_reducer: reduction.dump() diff --git a/grizzly/reduce/test_strategies.py b/grizzly/reduce/test_strategies.py index 225485ea..ca9b8f0d 100644 --- a/grizzly/reduce/test_strategies.py +++ b/grizzly/reduce/test_strategies.py @@ -41,6 +41,10 @@ def test_strategy_load_fail(mocker): """test that a broken strategy doesn't block other strategies""" class _GoodStrategy(object): name = "good" + @classmethod + def sanity_check_impl(cls): + pass + @classmethod def load(cls): return cls From 4e0e774341a50864232dbfde5f66e48bfa4f44f2 Mon Sep 17 00:00:00 2001 From: Jesse Schwartzentruber Date: Thu, 15 Oct 2020 09:42:36 -0400 Subject: [PATCH 060/531] Fix JS/CSS Beautifier strategies. They should work now for any \n", test_name="test.html", - expected_run_calls=7, - expected_results={"'required'"}, + expected_run_calls=1, + expected_results={"\n"}, expected_num_reports=2, - strategies=["jsbeautify", "lines"], + strategies=["jsbeautify"], + ), + marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), + ), + # test beautify multiple js embedded in html + pytest.param( + *BeautifyStrategyParams( + test_data="" + "\n", + test_name="test.html", + expected_run_calls=1, + expected_results={"" + "\n"}, + expected_num_reports=2, + strategies=["jsbeautify"], + ), + marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), + ), + # test beautify js embedded in html with no end + pytest.param( + *BeautifyStrategyParams( + test_data="\n", + test_name="test.html", + expected_run_calls=1, + expected_results={"\n\n"}, + expected_num_reports=2, + strategies=["jsbeautify"], + ), + marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), + ), + # test DDBEGIN/END respected for js embedded in html, DD inside \n", + test_name="test.html", + expected_run_calls=1, + expected_results={"\n"}, + expected_num_reports=2, + strategies=["jsbeautify"], + ), + marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), + ), + # test DDBEGIN/END respected for js embedded in html, DD straddle before + pytest.param( + *BeautifyStrategyParams( + test_data="\n\n", + test_name="test.html", + expected_run_calls=1, + expected_results={"\n\n"}, + expected_num_reports=2, + strategies=["jsbeautify"], + ), + marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), + ), + # test beautify js embedded in html (no \n", test_name="test.html", expected_run_calls=1, - expected_results={"\n"}, + expected_results={"\n"}, expected_num_reports=2, strategies=["jsbeautify"], ), - marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), + marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, + reason="jsbeautifier required"), ), # test beautify multiple js embedded in html pytest.param( @@ -195,12 +203,14 @@ def replay_run(testcases, **_): "\n", test_name="test.html", expected_run_calls=1, - expected_results={"" - "\n"}, + expected_results={"\n"}, expected_num_reports=2, strategies=["jsbeautify"], ), - marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), + marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, + reason="jsbeautifier required"), ), # test beautify js embedded in html with no end pytest.param( @@ -208,11 +218,14 @@ def replay_run(testcases, **_): test_data="\n", + test_data="\n\n", test_name="test.html", expected_run_calls=1, - expected_results={"\n\n"}, + expected_results={ + "\n\n" + }, expected_num_reports=2, strategies=["jsbeautify"], ), - marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), + marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, + reason="jsbeautifier required"), ), # test DDBEGIN/END respected for js embedded in html, DD inside \n", + test_data="\n", test_name="test.html", expected_run_calls=1, - expected_results={"\n"}, + expected_results={ + "\n" + }, expected_num_reports=2, strategies=["jsbeautify"], ), - marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), + marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, + reason="jsbeautifier required"), ), - # test DDBEGIN/END respected for js embedded in html, DD straddle before + # test DDBEGIN/END respected for js embedded in html, DD straddle after + # pytest.param( *BeautifyStrategyParams( - test_data="\n\n", + test_data="\n\n", test_name="test.html", expected_run_calls=1, - expected_results={"\n\n"}, + expected_results={ + "\n\n" + }, expected_num_reports=2, strategies=["jsbeautify"], ), - marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), + marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, + reason="jsbeautifier required"), ), # test beautify js embedded in html (no \n"}, expected_num_reports=2, strategies=["jsbeautify"], + have_beautifiers=True, ), marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), @@ -212,6 +215,7 @@ def replay_run(testcases, **_): "requisite'\n\n"}, expected_num_reports=2, strategies=["jsbeautify"], + have_beautifiers=True, ), marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), @@ -227,6 +231,7 @@ def replay_run(testcases, **_): }, expected_num_reports=2, strategies=["jsbeautify"], + have_beautifiers=True, ), marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), @@ -242,6 +247,7 @@ def replay_run(testcases, **_): }, expected_num_reports=2, strategies=["jsbeautify"], + have_beautifiers=True, ), marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), @@ -259,6 +265,7 @@ def replay_run(testcases, **_): }, expected_num_reports=2, strategies=["jsbeautify"], + have_beautifiers=True, ), marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), @@ -276,6 +283,7 @@ def replay_run(testcases, **_): }, expected_num_reports=2, strategies=["jsbeautify"], + have_beautifiers=True, ), marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), @@ -294,6 +302,7 @@ def replay_run(testcases, **_): }, expected_num_reports=2, strategies=["jsbeautify"], + have_beautifiers=True, ), marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), @@ -311,6 +320,7 @@ def replay_run(testcases, **_): }, expected_num_reports=2, strategies=["jsbeautify"], + have_beautifiers=True, ), marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), @@ -329,6 +339,7 @@ def replay_run(testcases, **_): }, expected_num_reports=2, strategies=["jsbeautify"], + have_beautifiers=True, ), marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), @@ -343,6 +354,7 @@ def replay_run(testcases, **_): expected_num_reports=2, # no beautify performed, add check so the run succeeds strategies=["jsbeautify", "check"], + have_beautifiers=True, ), marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), @@ -357,6 +369,7 @@ def replay_run(testcases, **_): expected_num_reports=2, # no beautify performed, add check so the run succeeds strategies=["jsbeautify", "check"], + have_beautifiers=True, ), marks=pytest.mark.skipif(not HAVE_JSBEAUTIFIER, reason="jsbeautifier required"), @@ -370,6 +383,7 @@ def replay_run(testcases, **_): expected_results={"*,\n#a {\n fluff: 0;\n required: 1\n}\n"}, expected_num_reports=2, strategies=["cssbeautify"], + have_beautifiers=True, ), marks=pytest.mark.skipif(not HAVE_CSSBEAUTIFIER, reason="cssbeautifier required"), @@ -385,6 +399,7 @@ def replay_run(testcases, **_): }, expected_num_reports=2, strategies=["cssbeautify"], + have_beautifiers=True, ), marks=pytest.mark.skipif(not HAVE_CSSBEAUTIFIER, reason="cssbeautifier required"), @@ -398,6 +413,7 @@ def replay_run(testcases, **_): expected_results={"\n"), + "\n" + ), test_name="test.html", expected_run_calls=1, expected_results={ @@ -459,8 +500,9 @@ def replay_run(testcases, _time_limit, **_): strategies=["check", "cssbeautify"], have_beautifiers=True, ), - marks=pytest.mark.skipif(not HAVE_CSSBEAUTIFIER, - reason="cssbeautifier required"), + marks=pytest.mark.skipif( + not HAVE_CSSBEAUTIFIER, reason="cssbeautifier required" + ), ), # test almost beautified css (any change breaks test, beautify only removes an # extra blank line so `lines` will have already tried it, and this will hit the @@ -469,7 +511,8 @@ def replay_run(testcases, _time_limit, **_): *BeautifyStrategyParams( test_data=( "\n"), + " required: 1\n}\n/*DDEND*/\n\n" + ), test_name="test.html", expected_run_calls=8, expected_results={ @@ -480,8 +523,9 @@ def replay_run(testcases, _time_limit, **_): strategies=["check", "lines", "cssbeautify"], have_beautifiers=True, ), - marks=pytest.mark.skipif(not HAVE_CSSBEAUTIFIER, - reason="cssbeautifier required"), + marks=pytest.mark.skipif( + not HAVE_CSSBEAUTIFIER, reason="cssbeautifier required" + ), ), # test that when beautifiers are not available, the strategies have no effect BeautifyStrategyParams( @@ -493,11 +537,19 @@ def replay_run(testcases, _time_limit, **_): strategies=["check", "cssbeautify"], have_beautifiers=False, ), - ] + ], ) -def test_beautifier(mocker, tmp_path, test_data, test_name, expected_run_calls, - expected_results, expected_num_reports, strategies, - have_beautifiers): +def test_beautifier( + mocker, + tmp_path, + test_data, + test_name, + expected_run_calls, + expected_results, + expected_num_reports, + strategies, + have_beautifiers, +): """test for the "beautify" strategies""" if not have_beautifiers: mocker.patch.object(CSSBeautify, "import_available", False) @@ -515,22 +567,25 @@ def replay_run(testcases, _time_limit, **_): LOG.debug("interesting if test unchanged") interesting = test_data == contents elif "requisite" in test_data: - LOG.debug("interesting if ('requisite' and 'required') or " - "'requi'+'red' in %r", contents) - interesting = ("required" in contents and "requisite" in contents) \ - or "'requi'+'red'" in contents + LOG.debug( + "interesting if ('requisite' and 'required') or " + "'requi'+'red' in %r", + contents, + ) + interesting = ( + "required" in contents and "requisite" in contents + ) or "'requi'+'red'" in contents else: LOG.debug("interesting if 'required' or 'requi'+'red' in %r", contents) interesting = "required" in contents or "'requi'+'red'" in contents if interesting: - log_path = tmp_path / ( - "crash%d_logs" % (replayer.run.call_count,) - ) + log_path = tmp_path / ("crash%d_logs" % (replayer.run.call_count,)) log_path.mkdir() _fake_save_logs_foo(log_path) report = Report(str(log_path), "bin") return [ReplayResult(report, [[test_name]], [], True)] return [] + replayer.run.side_effect = replay_run test = TestCase(test_name, None, "test-adapter") @@ -541,8 +596,15 @@ def replay_run(testcases, _time_limit, **_): target = mocker.Mock(spec=Target) target.relaunch = 1 try: - mgr = ReduceManager([], mocker.Mock(spec=Sapphire, timeout=30), target, tests, - strategies, log_path, use_analysis=False) + mgr = ReduceManager( + [], + mocker.Mock(spec=Sapphire, timeout=30), + target, + tests, + strategies, + log_path, + use_analysis=False, + ) assert mgr.run() == 0 finally: for test in tests: @@ -552,14 +614,15 @@ def replay_run(testcases, _time_limit, **_): assert set(log_path.iterdir()) == {log_path / "reports"} tests = {test.read_text() for test in log_path.glob("reports/*-*/" + test_name)} assert tests == expected_results - assert sum(1 for _ in (log_path / "reports").iterdir()) == expected_num_reports, \ - list((log_path / "reports").iterdir()) + assert ( + sum(1 for _ in (log_path / "reports").iterdir()) == expected_num_reports + ), list((log_path / "reports").iterdir()) PurgeUnservedTestParams = namedtuple( "PurgeUnservedTestParams", "strategies, test_data, served, expected_results, expected_run_calls," - "expected_num_reports, purging_breaks" + "expected_num_reports, purging_breaks", ) @@ -601,11 +664,15 @@ def replay_run(testcases, _time_limit, **_): # second test. PurgeUnservedTestParams( strategies=["chars"], - test_data=[{"test.html": "123", "opt.html": "456"}, - {"test.html": "789", "opt.html": "abc"}], - served=[[["test.html", "opt.html"], ["test.html", "opt.html"]], - [["test.html", "opt.html"], ["test.html"]], - [["test.html", "opt.html"], ["test.html"]]], + test_data=[ + {"test.html": "123", "opt.html": "456"}, + {"test.html": "789", "opt.html": "abc"}, + ], + served=[ + [["test.html", "opt.html"], ["test.html", "opt.html"]], + [["test.html", "opt.html"], ["test.html"]], + [["test.html", "opt.html"], ["test.html"]], + ], expected_results={"1", "4", "7"}, expected_run_calls=6, expected_num_reports=3, @@ -615,11 +682,15 @@ def replay_run(testcases, _time_limit, **_): # (first test remains) PurgeUnservedTestParams( strategies=["chars"], - test_data=[{"test.html": "123", "opt.html": "456"}, - {"test.html": "789", "opt.html": "abc"}], - served=[[["test.html", "opt.html"], ["test.html", "opt.html"]], - [["test.html", "opt.html"], ["opt.html"]], - [["test.html", "opt.html"]]], + test_data=[ + {"test.html": "123", "opt.html": "456"}, + {"test.html": "789", "opt.html": "abc"}, + ], + served=[ + [["test.html", "opt.html"], ["test.html", "opt.html"]], + [["test.html", "opt.html"], ["opt.html"]], + [["test.html", "opt.html"]], + ], expected_results={"1", "4"}, expected_run_calls=5, expected_num_reports=2, @@ -628,11 +699,12 @@ def replay_run(testcases, _time_limit, **_): # triple test, list strategy. first test gets reduced, third gets eliminated PurgeUnservedTestParams( strategies=["list"], - test_data=[{"test.html": "123"}, {"test.html": "456"}, - {"test.html": "789"}], - served=[[["test.html"]], - [["test.html"]], - [["test.html"]]], + test_data=[ + {"test.html": "123"}, + {"test.html": "456"}, + {"test.html": "789"}, + ], + served=[[["test.html"]], [["test.html"]], [["test.html"]]], expected_results={"456"}, expected_run_calls=2, expected_num_reports=2, @@ -641,19 +713,30 @@ def replay_run(testcases, _time_limit, **_): # triple test, list strategy. None for served still eliminates first two tests PurgeUnservedTestParams( strategies=["list"], - test_data=[{"test.html": "123"}, {"test.html": "456"}, - {"test.html": "789"}], + test_data=[ + {"test.html": "123"}, + {"test.html": "456"}, + {"test.html": "789"}, + ], served=[None, None, None], expected_results={"789"}, expected_run_calls=2, expected_num_reports=2, purging_breaks=False, ), - ] + ], ) -def test_purge_unserved(mocker, tmp_path, strategies, test_data, served, - expected_results, expected_run_calls, expected_num_reports, - purging_breaks): +def test_purge_unserved( + mocker, + tmp_path, + strategies, + test_data, + served, + expected_results, + expected_run_calls, + expected_num_reports, + purging_breaks, +): """test purging unserved files""" mocker.patch("grizzly.reduce.strategies.lithium._contains_dd", return_value=True) replayer = mocker.patch("grizzly.reduce.core.ReplayManager", autospec=True) @@ -679,6 +762,7 @@ def replay_run(testcases, _time_limit, **_): _fake_save_logs_foo(log_path) report = Report(str(log_path), "bin") return [ReplayResult(report, served.pop(0), [], True)] + replayer.run.side_effect = replay_run tests = [] @@ -693,8 +777,15 @@ def replay_run(testcases, _time_limit, **_): target = mocker.Mock(spec=Target) target.relaunch = 1 try: - mgr = ReduceManager([], mocker.Mock(spec=Sapphire, timeout=30), target, tests, - strategies, log_path, use_analysis=False) + mgr = ReduceManager( + [], + mocker.Mock(spec=Sapphire, timeout=30), + target, + tests, + strategies, + log_path, + use_analysis=False, + ) if purging_breaks: with raises(AssertionError): mgr.run() @@ -710,15 +801,17 @@ def replay_run(testcases, _time_limit, **_): assert set(log_path.iterdir()) == {log_path / "reports"} tests = {test.read_text() for test in log_path.glob("reports/*-*/*.html")} assert tests == expected_results - assert sum(1 for _ in (log_path / "reports").iterdir()) == expected_num_reports, \ - list((log_path / "reports").iterdir()) + assert ( + sum(1 for _ in (log_path / "reports").iterdir()) == expected_num_reports + ), list((log_path / "reports").iterdir()) # each input has 2 files, so if there are more than 2 reports, the result has 2 # testcases assert ( sum(1 for _ in log_path.glob("reports/*-*/prefs.js")) == (expected_num_reports + 1) // 2 ), "prefs.js missing in %r" % [ - str(p.relative_to(log_path)) for p in log_path.glob("reports/**/*") + str(p.relative_to(log_path)) + for p in log_path.glob("reports/**/*") if p.is_file() ] @@ -735,14 +828,13 @@ def replay_run(testcases, _time_limit, **_): LOG.debug("interesting if 'required' in %r", contents) interesting = "required" in contents if interesting: - log_path = tmp_path / ( - "crash%d_logs" % (replayer.run.call_count,) - ) + log_path = tmp_path / ("crash%d_logs" % (replayer.run.call_count,)) log_path.mkdir() _fake_save_logs_foo(log_path) report = Report(str(log_path), "bin") return [ReplayResult(report, [["test.html", "other.html"]], [], True)] return [] + replayer.run.side_effect = replay_run test = TestCase("test.html", None, "test-adapter") @@ -754,8 +846,15 @@ def replay_run(testcases, _time_limit, **_): target = mocker.Mock(spec=Target) target.relaunch = 1 try: - mgr = ReduceManager([], mocker.Mock(spec=Sapphire, timeout=30), target, tests, - ["lines"], log_path, use_analysis=False) + mgr = ReduceManager( + [], + mocker.Mock(spec=Sapphire, timeout=30), + target, + tests, + ["lines"], + log_path, + use_analysis=False, + ) assert mgr.run() == 0 finally: for test in tests: @@ -769,7 +868,8 @@ def replay_run(testcases, _time_limit, **_): assert set(log_path.iterdir()) == {log_path / "reports"} tests = {test.read_text() for test in log_path.glob("reports/*-*/test.html")} assert tests == expected_results - assert sum(1 for _ in (log_path / "reports").iterdir()) == expected_num_reports, \ - list((log_path / "reports").iterdir()) + assert ( + sum(1 for _ in (log_path / "reports").iterdir()) == expected_num_reports + ), list((log_path / "reports").iterdir()) others = {test.read_text() for test in log_path.glob("reports/*-*/other.html")} assert others == {"blah\n"} diff --git a/grizzly/replay/args.py b/grizzly/replay/args.py index 2dcbc995..6757ae11 100644 --- a/grizzly/replay/args.py +++ b/grizzly/replay/args.py @@ -8,59 +8,83 @@ class ReplayArgs(CommonArgs): - def __init__(self): super().__init__() self.parser.add_argument( "input", - help="Accepted input includes: " \ - "1) A directory containing testcase data. " \ - "2) A directory with one or more subdirectories containing testcase data. " \ - "3) A zip archive containing testcase data or subdirectories containing testcase data. " \ - "4) A single file to be used as a testcase. " \ - "When using a directory it must contain a 'test_info.json' file.") + help="Accepted input includes: " + "1) A directory containing testcase data. " + "2) A directory with one or more subdirectories containing testcase data. " + "3) A zip archive containing testcase data or subdirectories containing" + " testcase data. " + "4) A single file to be used as a testcase. " + "When using a directory it must contain a 'test_info.json' file.", + ) replay_args = self.parser.add_argument_group("Replay Arguments") replay_args.add_argument( - "--any-crash", action="store_true", - help="Any crash is interesting, not only crashes which match the original signature.") - replay_args.add_argument( - "--idle-delay", type=int, default=30, - help="Number of seconds to wait before polling for idle (default: %(default)s)") + "--any-crash", + action="store_true", + help="Any crash is interesting, not only crashes which match the original" + " signature.", + ) replay_args.add_argument( - "--idle-threshold", type=int, default=0, - help="CPU usage threshold to mark the process as idle (default: disabled)") + "--idle-delay", + type=int, + default=30, + help="Number of seconds to wait before polling for idle" + " (default: %(default)s)", + ) replay_args.add_argument( - "-l", "--logs", - help="Location to save logs. If the path exists it must be empty, if it " \ - "does not exist it will be created.") + "--idle-threshold", + type=int, + default=0, + help="CPU usage threshold to mark the process as idle (default: disabled)", + ) replay_args.add_argument( - "--min-crashes", type=int, default=1, - help="Require the testcase to crash n times before accepting the result." \ - " Helpful for intermittent testcases (default: %(default)sx)") + "-l", + "--logs", + help="Location to save logs. If the path exists it must be empty, if it " + "does not exist it will be created.", + ) replay_args.add_argument( - "--no-harness", action="store_true", - help="Don't use the harness for redirection. Implies '--relaunch=1'.") + "--min-crashes", + type=int, + default=1, + help="Require the testcase to crash n times before accepting the result." + " Helpful for intermittent testcases (default: %(default)sx)", + ) replay_args.add_argument( - "--repeat", type=int, default=1, - help="Run the testcase n times." \ - " Helpful for intermittent testcases (default: %(default)sx)") + "--no-harness", + action="store_true", + help="Don't use the harness for redirection. Implies '--relaunch=1'.", + ) replay_args.add_argument( - "--sig", - help="Signature (JSON) file to match.") + "--repeat", + type=int, + default=1, + help="Run the testcase n times." + " Helpful for intermittent testcases (default: %(default)sx)", + ) + replay_args.add_argument("--sig", help="Signature (JSON) file to match.") replay_args.add_argument( - "--test-index", type=int, nargs="+", - help="Select a testcase to run when multiple testcases are loaded. " \ - "Testscases are ordered oldest to newest. Indexing is 0 based. " \ - "0 == Oldest, n-1 == Newest (default: run all testcases)") + "--test-index", + type=int, + nargs="+", + help="Select a testcase to run when multiple testcases are loaded. " + "Testscases are ordered oldest to newest. Indexing is 0 based. " + "0 == Oldest, n-1 == Newest (default: run all testcases)", + ) self.launcher_grp.add_argument( - "--rr", action="store_true", - help="Use RR (Linux only)") + "--rr", action="store_true", help="Use RR (Linux only)" + ) self.reporter_grp.add_argument( - "--include-test", action="store_true", - help="Include the testcase when reporting results.") + "--include-test", + action="store_true", + help="Include the testcase when reporting results.", + ) def sanity_check(self, args): super().sanity_check(args) diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 0b237289..9a45b462 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -44,11 +44,28 @@ class ReplayManager: HARNESS_FILE = pathjoin(dirname(__file__), "..", "common", "harness.html") DEFAULT_TIME_LIMIT = 30 - __slots__ = ("ignore", "server", "status", "target", "_any_crash", - "_harness", "_signature", "_relaunch", "_unpacked") - - def __init__(self, ignore, server, target, any_crash=False, - relaunch=1, signature=None, use_harness=True): + __slots__ = ( + "ignore", + "server", + "status", + "target", + "_any_crash", + "_harness", + "_signature", + "_relaunch", + "_unpacked", + ) + + def __init__( + self, + ignore, + server, + target, + any_crash=False, + relaunch=1, + signature=None, + use_harness=True, + ): self.ignore = ignore self.server = server self.status = None @@ -113,12 +130,12 @@ def expect_hang(ignore, signature, tests): if is_hang: if signature is None: raise ConfigError( - "Hangs require a signature to replay", - Session.EXIT_ERROR) + "Hangs require a signature to replay", Session.EXIT_ERROR + ) if "timeout" in ignore: raise ConfigError( - "Cannot ignore 'timeout' when detecting hangs", - Session.EXIT_ERROR) + "Cannot ignore 'timeout' when detecting hangs", Session.EXIT_ERROR + ) return is_hang @classmethod @@ -166,7 +183,9 @@ def report_to_filesystem(path, results, tests=None): """ others = list(x.report for x in results if not x.expected) if others: - reporter = FilesystemReporter(pathjoin(path, "other_reports"), major_bucket=False) + reporter = FilesystemReporter( + pathjoin(path, "other_reports"), major_bucket=False + ) for report in others: reporter.submit(tests or [], report=report) expected = list(x for x in results if x.expected) @@ -190,7 +209,7 @@ def run( exit_early=True, expect_hang=False, idle_delay=0, - idle_threshold=0 + idle_threshold=0, ): """Run testcase replay. @@ -229,7 +248,9 @@ def run( server_map = ServerMap() if self._harness is not None: - server_map.set_dynamic_response("grz_harness", lambda: self._harness, mime_type="text/html") + server_map.set_dynamic_response( + "grz_harness", lambda: self._harness, mime_type="text/html" + ) # track unprocessed results reports = dict() @@ -249,21 +270,23 @@ def run( self.target, idle_threshold=idle_threshold, idle_delay=idle_delay, - relaunch=relaunch * test_count) + relaunch=relaunch * test_count, + ) # perform iterations for _ in range(repeat): self.status.iteration += 1 if self.target.closed: if self._harness is None: location = runner.location( - "/grz_current_test", - self.server.port) + "/grz_current_test", self.server.port + ) else: location = runner.location( "/grz_harness", self.server.port, close_after=relaunch * test_count, - time_limit=time_limit) + time_limit=time_limit, + ) startup_error = False # The environment from the initial testcase is used because # a sequence of testcases is expected to be run without @@ -276,28 +299,38 @@ def run( served = list() for test_idx in range(test_count): if test_count > 1: - LOG.info("Running test, part %d/%d (%d/%d)...", - test_idx + 1, test_count, self.status.iteration, repeat) + LOG.info( + "Running test, part %d/%d (%d/%d)...", + test_idx + 1, + test_count, + self.status.iteration, + repeat, + ) else: - LOG.info("Running test (%d/%d)...", self.status.iteration, repeat) + LOG.info( + "Running test (%d/%d)...", self.status.iteration, repeat + ) # update redirects if self._harness is not None: next_idx = (test_idx + 1) % test_count server_map.set_redirect( "grz_next_test", testcases[next_idx].landing_page, - required=True) + required=True, + ) server_map.set_redirect( "grz_current_test", testcases[test_idx].landing_page, - required=False) + required=False, + ) # run testcase run_result = runner.run( self.ignore, server_map, testcases[test_idx], test_path=unpacked[test_idx], - wait_for_callback=self._harness is None) + wait_for_callback=self._harness is None, + ) durations.append(run_result.duration) served.append(run_result.served) if run_result.status is not None or not run_result.attempted: @@ -317,9 +350,7 @@ def run( log_path = mkdtemp(prefix="logs_", dir=grz_tmp("logs")) self.target.save_logs(log_path) report = Report( - log_path, - self.target.binary, - is_hang=run_result.timeout + log_path, self.target.binary, is_hang=run_result.timeout ) # check signatures if run_result.timeout: @@ -339,35 +370,47 @@ def run( self._signature = report.crash_signature # bucket result if short_sig == "No crash detected": - # TODO: verify report.major == "NO_STACK" otherwise FM failed to parse the logs + # TODO: verify report.major == "NO_STACK" + # otherwise FM failed to parse the logs # TODO: change this to support hangs/timeouts, etc LOG.info("Result: No crash detected") - elif ( - not startup_error - and (self._any_crash - or self.check_match(self._signature, report, expect_hang)) + elif not startup_error and ( + self._any_crash + or self.check_match(self._signature, report, expect_hang) ): self.status.count_result(short_sig) - LOG.info("Result: %s (%s:%s)", - short_sig, report.major[:8], report.minor[:8]) + LOG.info( + "Result: %s (%s:%s)", + short_sig, + report.major[:8], + report.minor[:8], + ) if sig_hash: LOG.debug("using provided signature (hash) to bucket") bucket_hash = sig_hash else: bucket_hash = report.crash_hash if bucket_hash not in reports: - reports[bucket_hash] = ReplayResult(report, served, durations, True) + reports[bucket_hash] = ReplayResult( + report, served, durations, True + ) LOG.debug("now tracking %s", bucket_hash) report = None # don't remove report else: reports[bucket_hash].count += 1 LOG.debug("already tracking %s", bucket_hash) else: - LOG.info("Result: Different signature: %s (%s:%s)", - short_sig, report.major[:8], report.minor[:8]) + LOG.info( + "Result: Different signature: %s (%s:%s)", + short_sig, + report.major[:8], + report.minor[:8], + ) self.status.ignored += 1 if report.crash_hash not in reports: - reports[report.crash_hash] = ReplayResult(report, served, durations, False) + reports[report.crash_hash] = ReplayResult( + report, served, durations, False + ) LOG.debug("now tracking %s", report.crash_hash) report = None # don't remove report else: @@ -383,12 +426,19 @@ def run( if exit_early: # failed to meet minimum number of results - if repeat - self.status.iteration + self.status.results < min_results: + if ( + repeat - self.status.iteration + self.status.results + < min_results + ): if self.status.iteration < repeat: LOG.debug("skipping remaining attempts") # failed to reproduce issue - LOG.debug("results (%d) < minimum (%d), after %d attempts", - self.status.results, min_results, self.status.iteration) + LOG.debug( + "results (%d) < minimum (%d), after %d attempts", + self.status.results, + min_results, + self.status.iteration, + ) # NOTE: this can be tricky if the harness is used because it can # skip the shutdown performed in the harness and runner, if this # is an issue for now use relaunch=1 @@ -396,15 +446,18 @@ def run( # check if complete (minimum number of results found) if self.status.results >= min_results: assert self.status.results == min_results - assert sum(x.count for x in reports.values() if x.expected) >= min_results - LOG.debug("results == expected (%d), after %d attempts", - min_results, self.status.iteration) + assert ( + sum(x.count for x in reports.values() if x.expected) + >= min_results + ) + LOG.debug( + "results == expected (%d), after %d attempts", + min_results, + self.status.iteration, + ) break - # warn about large browser logs - #self.status.log_size = self.target.log_size() - #if self.status.log_size > self.TARGET_LOG_SIZE_WARN: - # LOG.warning("Large browser logs: %dMBs", (self.status.log_size / 0x100000)) + # TODO: should we warn about large browser logs? # process results results = list() @@ -413,7 +466,11 @@ def run( if sum(x.count for x in reports.values()) >= min_results: results = list(reports.values()) else: - LOG.debug("%d (any_crash) less than minimum %d", self.status.results, min_results) + LOG.debug( + "%d (any_crash) less than minimum %d", + self.status.results, + min_results, + ) for report in reports.values(): report.report.cleanup() else: @@ -421,14 +478,23 @@ def run( # filter out unreliable expected results for crash_hash, report in reports.items(): if report.expected and report.count < min_results: - LOG.debug("%r less than minimum (%d/%d)", crash_hash, report.count, min_results) + LOG.debug( + "%r less than minimum (%d/%d)", + crash_hash, + report.count, + min_results, + ) report.report.cleanup() continue results.append(report) # this should only be displayed when both conditions are met: # 1) runner does not close target (no delay was given before shutdown) # 2) result has not been successfully reproduced - if self._relaunch > 1 and not self.target.closed and not any(x.expected for x in results): + if ( + self._relaunch > 1 + and not self.target.closed + and not any(x.expected for x in results) + ): LOG.info("Perhaps try with --relaunch=1") # active reports have been moved to results # clear reports to avoid cleanup of active reports @@ -467,8 +533,10 @@ def time_limits(cls, time_limit, timeout, tests): timeout = time_limit + TIMEOUT_DELAY if timeout < time_limit: raise ConfigError( - "Timeout (%d) cannot be less than time limit (%d)" % (timeout, time_limit), - Session.EXIT_ARGS) + "Timeout (%d) cannot be less than time limit (%d)" + % (timeout, time_limit), + Session.EXIT_ARGS, + ) return time_limit, timeout @classmethod @@ -496,9 +564,8 @@ def main(cls, args): try: testcases = cls.load_testcases( - args.input, - args.prefs is None, - subset=args.test_index) + args.input, args.prefs is None, subset=args.test_index + ) except TestCaseLoadFailure as exc: LOG.error("Error: %s", str(exc)) return Session.EXIT_ERROR @@ -510,20 +577,24 @@ def main(cls, args): if args.no_harness and len(testcases) > 1: LOG.error( "'--no-harness' cannot be used with multiple testcases. " - "Perhaps '--test-index' can help.") + "Perhaps '--test-index' can help." + ) return Session.EXIT_ARGS # check if hangs are expected expect_hang = cls.expect_hang(args.ignore, signature, testcases) # check test time limit and timeout time_limit, timeout = cls.time_limits( - args.time_limit, - args.timeout, - testcases) + args.time_limit, args.timeout, testcases + ) LOG.info("Using time limit: %ds, timeout: %ds", time_limit, timeout) repeat = max(args.min_crashes, args.repeat) relaunch = min(args.relaunch, repeat) - LOG.info("Repeat: %d, Minimum crashes: %d, Relaunch %d", - repeat, args.min_crashes, relaunch) + LOG.info( + "Repeat: %d, Minimum crashes: %d, Relaunch %d", + repeat, + args.min_crashes, + relaunch, + ) LOG.debug("initializing the Target") target = load_target(args.platform)( args.binary, @@ -533,7 +604,8 @@ def main(cls, args): args.memory, rr=args.rr, valgrind=args.valgrind, - xvfb=args.xvfb) + xvfb=args.xvfb, + ) # prioritize specified prefs.js file over included file if args.prefs is not None: for testcase in testcases: @@ -560,7 +632,7 @@ def main(cls, args): any_crash=args.any_crash, relaunch=relaunch, signature=signature, - use_harness=not args.no_harness + use_harness=not args.no_harness, ) as replay: results = replay.run( testcases, @@ -569,7 +641,8 @@ def main(cls, args): idle_delay=args.idle_delay, idle_threshold=args.idle_threshold, min_results=args.min_crashes, - repeat=repeat) + repeat=repeat, + ) # handle results success = any(x.expected for x in results) if success: @@ -578,9 +651,8 @@ def main(cls, args): LOG.info("Failed to reproduce results") if args.logs and results: cls.report_to_filesystem( - args.logs, - results, - testcases if args.include_test else None) + args.logs, results, testcases if args.include_test else None + ) # TODO: add fuzzmanager reporting return Session.EXIT_SUCCESS if success else Session.EXIT_FAILURE diff --git a/grizzly/replay/test_main.py b/grizzly/replay/test_main.py index 1e9c36c6..a5770f8e 100644 --- a/grizzly/replay/test_main.py +++ b/grizzly/replay/test_main.py @@ -39,7 +39,9 @@ def test_args_01(capsys, tmp_path): (inp / "prefs.js").touch() ReplayArgs().parse_args([str(exe), str(inp)]) # test case file - ReplayArgs().parse_args([str(exe), str(inp / "somefile"), "--prefs", str(inp / "prefs.js")]) + ReplayArgs().parse_args( + [str(exe), str(inp / "somefile"), "--prefs", str(inp / "prefs.js")] + ) # test negative min-crashes value with raises(SystemExit): ReplayArgs().parse_args([str(exe), str(inp), "--min-crashes", "-1"]) @@ -55,14 +57,19 @@ def test_args_01(capsys, tmp_path): # test any crash and signature with raises(SystemExit): ReplayArgs().parse_args([str(exe), str(inp), "--any-crash", "--sig", "x"]) - assert "error: signature is ignored when running with '--any-crash'" in capsys.readouterr()[-1] + assert ( + "error: signature is ignored when running with '--any-crash'" + in capsys.readouterr()[-1] + ) # test multiple debuggers with raises(SystemExit): ReplayArgs().parse_args([str(exe), str(inp), "--rr", "--valgrind"]) assert "'--rr' and '--valgrind' cannot be used together" in capsys.readouterr()[-1] # test idle args with raises(SystemExit): - ReplayArgs().parse_args([str(exe), str(inp), "--idle-threshold", "1", "--idle-delay", "0"]) + ReplayArgs().parse_args( + [str(exe), str(inp), "--idle-threshold", "1", "--idle-delay", "0"] + ) assert "'--idle-delay' value must be positive" in capsys.readouterr()[-1] # force relaunch == 1 with --no-harness args = ReplayArgs().parse_args([str(exe), str(inp), "--no-harness"]) @@ -76,22 +83,33 @@ def test_main_01(mocker, tmp_path): # and the forth attempt should be skipped. # mock Sapphire.serve_path only mocker.patch("grizzly.common.runner.sleep", autospec=True) - serve_path = mocker.patch("grizzly.replay.replay.Sapphire.serve_path", autospec=True) - serve_path.return_value = (SERVED_ALL, ["test.html"]) # passed to mocked Target.detect_failure + serve_path = mocker.patch( + "grizzly.replay.replay.Sapphire.serve_path", autospec=True + ) + serve_path.return_value = ( + SERVED_ALL, + ["test.html"], + ) # passed to mocked Target.detect_failure # setup Target load_target = mocker.patch("grizzly.replay.replay.load_target") target = mocker.Mock(spec=Target, binary="bin", launch_timeout=30) target.RESULT_FAILURE = Target.RESULT_FAILURE target.RESULT_IGNORED = Target.RESULT_IGNORED target.RESULT_NONE = Target.RESULT_NONE - target.detect_failure.side_effect = (Target.RESULT_FAILURE, Target.RESULT_NONE, Target.RESULT_FAILURE) + target.detect_failure.side_effect = ( + Target.RESULT_FAILURE, + Target.RESULT_NONE, + Target.RESULT_FAILURE, + ) target.save_logs = _fake_save_logs load_target.return_value.return_value = target # setup args - log_path = (tmp_path / "logs") + log_path = tmp_path / "logs" (tmp_path / "test.html").touch() (tmp_path / "prefs.js").touch() - (tmp_path / "sig.json").write_bytes(b"{\"symptoms\": [{\"type\": \"crashAddress\", \"address\": \"0\"}]}") + (tmp_path / "sig.json").write_bytes( + b'{"symptoms": [{"type": "crashAddress", "address": "0"}]}' + ) args = mocker.Mock( fuzzmanager=False, idle_delay=0, @@ -109,7 +127,8 @@ def test_main_01(mocker, tmp_path): test_index=None, time_limit=10, timeout=None, - valgrind=False) + valgrind=False, + ) assert ReplayManager.main(args) == Session.EXIT_SUCCESS assert target.reverse.call_count == 1 assert target.launch.call_count == 3 @@ -123,12 +142,18 @@ def test_main_01(mocker, tmp_path): assert any(log_path.glob("reports/*/log_stderr.txt")) assert any(log_path.glob("reports/*/log_stdout.txt")) + def test_main_02(mocker, tmp_path): """test ReplayManager.main() - no repro""" # mock Sapphire.serve_path only mocker.patch("grizzly.common.runner.sleep", autospec=True) - serve_path = mocker.patch("grizzly.replay.replay.Sapphire.serve_path", autospec=True) - serve_path.return_value = (SERVED_ALL, ["test.html"]) # passed to mocked Target.detect_failure + serve_path = mocker.patch( + "grizzly.replay.replay.Sapphire.serve_path", autospec=True + ) + serve_path.return_value = ( + SERVED_ALL, + ["test.html"], + ) # passed to mocked Target.detect_failure # setup Target load_target = mocker.patch("grizzly.replay.replay.load_target") target = mocker.Mock(spec=Target, binary="bin", launch_timeout=30) @@ -154,12 +179,14 @@ def test_main_02(mocker, tmp_path): test_index=None, time_limit=10, timeout=None, - valgrind=False) + valgrind=False, + ) assert ReplayManager.main(args) == Session.EXIT_FAILURE assert target.detect_failure.call_count == 1 assert target.close.call_count == 2 assert target.cleanup.call_count == 1 + def test_main_03(mocker): """test ReplayManager.main() error cases""" fake_sig = mocker.patch("grizzly.replay.replay.CrashSignature", autospec=True) @@ -179,7 +206,8 @@ def test_main_03(mocker): sig=None, test_index=None, time_limit=10, - timeout=None) + timeout=None, + ) # user abort fake_load_target.side_effect = KeyboardInterrupt # coverage @@ -220,6 +248,7 @@ def test_main_03(mocker): assert fake_load_target.call_count == 0 fake_load_target.reset_mock() + def test_main_04(mocker, tmp_path): """test ReplayManager.main() target exceptions""" mocker.patch("grizzly.replay.replay.FuzzManagerReporter", autospec=True) @@ -228,9 +257,11 @@ def test_main_04(mocker, tmp_path): target = mocker.Mock(spec=Target, launch_timeout=30) load_target = mocker.patch("grizzly.replay.replay.load_target", autospec=True) load_target.return_value.return_value = target - fake_tmp = (tmp_path / "grz_tmp") + fake_tmp = tmp_path / "grz_tmp" fake_tmp.mkdir() - mocker.patch("grizzly.replay.replay.grz_tmp", autospec=True, return_value=str(fake_tmp)) + mocker.patch( + "grizzly.replay.replay.grz_tmp", autospec=True, return_value=str(fake_tmp) + ) # setup args args = mocker.Mock( ignore=list(), @@ -243,23 +274,35 @@ def test_main_04(mocker, tmp_path): sig=None, test_index=None, time_limit=10, - timeout=None) + timeout=None, + ) # target launch error - fake_logs = (tmp_path / "fake_report") + fake_logs = tmp_path / "fake_report" fake_logs.mkdir() report = mocker.Mock(spec=Report, prefix="fake_report", path=str(fake_logs)) - mocker.patch("grizzly.replay.replay.ReplayManager.run", side_effect=TargetLaunchError("", report)) + mocker.patch( + "grizzly.replay.replay.ReplayManager.run", + side_effect=TargetLaunchError("", report), + ) assert ReplayManager.main(args) == Session.EXIT_LAUNCH_FAILURE assert not fake_logs.is_dir() assert "fake_report_logs" in (x.name for x in fake_tmp.iterdir()) # target launch timeout - mocker.patch("grizzly.replay.replay.ReplayManager.run", side_effect=TargetLaunchTimeout) + mocker.patch( + "grizzly.replay.replay.ReplayManager.run", side_effect=TargetLaunchTimeout + ) assert ReplayManager.main(args) == Session.EXIT_LAUNCH_FAILURE + def test_main_05(mocker, tmp_path): """test ReplayManager.main() loading/generating prefs.js""" - serve_path = mocker.patch("grizzly.replay.replay.Sapphire.serve_path", autospec=True) - serve_path.return_value = (None, ["test.html"]) # passed to mocked Target.detect_failure + serve_path = mocker.patch( + "grizzly.replay.replay.Sapphire.serve_path", autospec=True + ) + serve_path.return_value = ( + None, + ["test.html"], + ) # passed to mocked Target.detect_failure # setup Target target = mocker.Mock(spec=Target, binary="bin", launch_timeout=30) target.RESULT_FAILURE = Target.RESULT_FAILURE @@ -281,13 +324,14 @@ def test_main_05(mocker, tmp_path): sig=None, test_index=None, time_limit=1, - timeout=None) - log_path = (tmp_path / "logs") + timeout=None, + ) + log_path = tmp_path / "logs" args.logs = str(log_path) - input_path = (tmp_path / "input") + input_path = tmp_path / "input" input_path.mkdir() # build a test case - entry_point = (input_path / "test.html") + entry_point = input_path / "test.html" entry_point.touch() with TestCase("test.html", None, "test-adapter") as src: src.add_from_file(str(entry_point)) @@ -301,7 +345,7 @@ def test_main_05(mocker, tmp_path): assert target.detect_failure.call_count == 1 assert serve_path.call_count == 1 assert log_path.is_dir() - assert not any(log_path.glob('**/prefs.js')) + assert not any(log_path.glob("**/prefs.js")) target.reset_mock() serve_path.reset_mock() @@ -314,7 +358,7 @@ def test_main_05(mocker, tmp_path): assert target.detect_failure.call_count == 1 assert serve_path.call_count == 1 assert log_path.is_dir() - prefs = next(log_path.glob('**/prefs.js')) + prefs = next(log_path.glob("**/prefs.js")) assert prefs.read_bytes() == b"included" target.reset_mock() @@ -329,9 +373,10 @@ def test_main_05(mocker, tmp_path): assert target.detect_failure.call_count == 1 assert serve_path.call_count == 1 assert log_path.is_dir() - prefs = next(log_path.glob('**/prefs.js')) + prefs = next(log_path.glob("**/prefs.js")) assert prefs.read_bytes() == b"specified" + @mark.parametrize( "arg_timelimit, arg_timeout, test_timelimit, result", [ @@ -347,14 +392,19 @@ def test_main_05(mocker, tmp_path): (10, 11, None, Session.EXIT_FAILURE), # set test time limit greater than timeout (11, 10, None, Session.EXIT_ARGS), - ] + ], ) def test_main_06(mocker, tmp_path, arg_timelimit, arg_timeout, test_timelimit, result): """test ReplayManager.main() - test time limit and timeout""" # mock Sapphire.serve_path only mocker.patch("grizzly.common.runner.sleep", autospec=True) - serve_path = mocker.patch("grizzly.replay.replay.Sapphire.serve_path", autospec=True) - serve_path.return_value = (SERVED_ALL, ["test.html"]) # passed to mocked Target.detect_failure + serve_path = mocker.patch( + "grizzly.replay.replay.Sapphire.serve_path", autospec=True + ) + serve_path.return_value = ( + SERVED_ALL, + ["test.html"], + ) # passed to mocked Target.detect_failure # setup Target target = mocker.Mock(spec=Target, binary="bin", launch_timeout=30) target.RESULT_NONE = Target.RESULT_NONE @@ -388,5 +438,6 @@ def test_main_06(mocker, tmp_path, arg_timelimit, arg_timeout, test_timelimit, r test_index=None, time_limit=arg_timelimit, timeout=arg_timeout, - valgrind=False) + valgrind=False, + ) assert ReplayManager.main(args) == result diff --git a/grizzly/replay/test_replay.py b/grizzly/replay/test_replay.py index 1b1ded74..3a1ec9ce 100644 --- a/grizzly/replay/test_replay.py +++ b/grizzly/replay/test_replay.py @@ -18,7 +18,7 @@ from .replay import ReplayManager, ReplayResult -def _fake_save_logs(result_logs, meta=False): # pylint: disable=unused-argument +def _fake_save_logs(result_logs, _meta=False): """write fake log data to disk""" with open(pathjoin(result_logs, "log_stderr.txt"), "w") as log_fp: log_fp.write("STDERR log\n") @@ -30,13 +30,17 @@ def _fake_save_logs(result_logs, meta=False): # pylint: disable=unused-argument log_fp.write(" #0 0xbad000 in foo /file1.c:123:234\n") log_fp.write(" #1 0x1337dd in bar /file2.c:1806:19\n") + def test_replay_01(mocker): """test ReplayManager.cleanup()""" - replay = ReplayManager([], mocker.Mock(spec=Sapphire), mocker.Mock(spec=Target), [mocker.Mock()]) + replay = ReplayManager( + [], mocker.Mock(spec=Sapphire), mocker.Mock(spec=Target), [mocker.Mock()] + ) replay.status = mocker.Mock(spec=Status) replay.cleanup() assert replay.status.cleanup.call_count == 1 + def test_replay_02(mocker, tmp_path): """test ReplayManager.run() - no repro""" mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) @@ -57,6 +61,7 @@ def test_replay_02(mocker, tmp_path): assert target.close.call_count == 2 assert not any(tmp_path.iterdir()) + def test_replay_03(mocker): """test ReplayManager.run() - no repro - with repeats""" server = mocker.Mock(spec=Sapphire, port=0x1337) @@ -66,7 +71,9 @@ def test_replay_03(mocker): target.detect_failure.return_value = Target.RESULT_NONE target.monitor.is_healthy.return_value = False with TestCase("index.html", "redirect.html", "test-adapter") as testcase: - with ReplayManager([], server, target, use_harness=True, relaunch=100) as replay: + with ReplayManager( + [], server, target, use_harness=True, relaunch=100 + ) as replay: assert not replay.run([testcase], 10, repeat=10, min_results=1) assert replay.signature is None assert replay.status.ignored == 0 @@ -76,6 +83,7 @@ def test_replay_03(mocker): assert target.monitor.is_healthy.call_count == 1 assert target.close.call_count == 2 + def test_replay_04(mocker): """test ReplayManager.run() - exit - skip shutdown in runner""" # this will make runner appear to have just relaunched the target @@ -83,14 +91,17 @@ def test_replay_04(mocker): mocker.patch( "grizzly.common.runner.Runner._tests_run", new_callable=mocker.PropertyMock, - return_value=0) + return_value=0, + ) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_ALL, ["index.html"]) target = mocker.Mock(spec=Target, closed=False, launch_timeout=30) target.RESULT_NONE = Target.RESULT_NONE target.detect_failure.return_value = Target.RESULT_NONE with TestCase("index.html", "redirect.html", "test-adapter") as testcase: - with ReplayManager([], server, target, use_harness=True, relaunch=100) as replay: + with ReplayManager( + [], server, target, use_harness=True, relaunch=100 + ) as replay: assert not replay.run([testcase], 10, repeat=10, min_results=1) assert replay.status.ignored == 0 assert replay.status.iteration == 10 @@ -99,6 +110,7 @@ def test_replay_04(mocker): assert target.monitor.is_healthy.call_count == 0 assert target.close.call_count == 1 + def test_replay_05(mocker, tmp_path): """test ReplayManager.run() - successful repro""" mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) @@ -131,6 +143,7 @@ def test_replay_05(mocker, tmp_path): results[0].report.cleanup() assert not any(tmp_path.iterdir()) + def test_replay_06(mocker, tmp_path): """test ReplayManager.run() - error - landing page not requested""" mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) @@ -138,7 +151,9 @@ def test_replay_06(mocker, tmp_path): target = mocker.Mock(spec=Target, binary="bin", closed=True, launch_timeout=30) target.RESULT_FAILURE = Target.RESULT_FAILURE target.RESULT_NONE = Target.RESULT_NONE - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] + testcases = [ + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]) + ] # test target unresponsive target.detect_failure.return_value = Target.RESULT_NONE server.serve_path.return_value = (SERVED_NONE, []) @@ -165,8 +180,10 @@ def test_replay_06(mocker, tmp_path): assert results[0].count == 1 assert not results[0].expected + def test_replay_07(mocker, tmp_path): - """test ReplayManager.run() - delayed failure - following test landing page not requested""" + """test ReplayManager.run() + delayed failure - following test landing page not requested""" mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) server = mocker.Mock(spec=Sapphire, port=0x1337, timeout=10) target = mocker.Mock(spec=Target, binary="bin", launch_timeout=30) @@ -176,8 +193,13 @@ def test_replay_07(mocker, tmp_path): target.detect_failure.side_effect = (Target.RESULT_NONE, Target.RESULT_FAILURE) target.monitor.is_healthy.return_value = False target.save_logs = _fake_save_logs - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] - server.serve_path.side_effect = ((SERVED_ALL, ["index.html"]), (SERVED_REQUEST, ["x"])) + testcases = [ + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]) + ] + server.serve_path.side_effect = ( + (SERVED_ALL, ["index.html"]), + (SERVED_REQUEST, ["x"]), + ) with ReplayManager([], server, target, use_harness=True, relaunch=10) as replay: assert replay.run(testcases, 10, repeat=2) assert replay.status.ignored == 0 @@ -186,6 +208,7 @@ def test_replay_07(mocker, tmp_path): # target.close() called once in runner and once by ReplayManager.run() assert target.close.call_count == 2 + def test_replay_08(mocker, tmp_path): """test ReplayManager.run() - ignored""" mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) @@ -195,7 +218,9 @@ def test_replay_08(mocker, tmp_path): target.RESULT_IGNORED = Target.RESULT_IGNORED target.detect_failure.return_value = Target.RESULT_IGNORED target.monitor.is_healthy.return_value = False - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] + testcases = [ + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]) + ] with ReplayManager([], server, target, use_harness=False) as replay: assert not replay.run(testcases, 10) assert target.monitor.is_healthy.call_count == 1 @@ -205,6 +230,7 @@ def test_replay_08(mocker, tmp_path): assert replay.status.results == 0 assert not any(tmp_path.iterdir()) + def test_replay_09(mocker, tmp_path): """test ReplayManager.run() - early exit""" mocker.patch("grizzly.common.runner.sleep", autospec=True) @@ -216,9 +242,15 @@ def test_replay_09(mocker, tmp_path): target.RESULT_IGNORED = Target.RESULT_IGNORED target.RESULT_NONE = Target.RESULT_NONE target.save_logs = _fake_save_logs - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] + testcases = [ + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]) + ] # early failure - target.detect_failure.side_effect = (Target.RESULT_FAILURE, Target.RESULT_IGNORED, Target.RESULT_NONE) + target.detect_failure.side_effect = ( + Target.RESULT_FAILURE, + Target.RESULT_IGNORED, + Target.RESULT_NONE, + ) target.monitor.is_healthy.side_effect = (False, False, True, False) with ReplayManager([], server, target, use_harness=False) as replay: assert not replay.run(testcases, 10, repeat=4, min_results=3) @@ -228,7 +260,11 @@ def test_replay_09(mocker, tmp_path): assert replay.status.ignored == 1 # early success target.reset_mock() - target.detect_failure.side_effect = (Target.RESULT_FAILURE, Target.RESULT_IGNORED, Target.RESULT_FAILURE) + target.detect_failure.side_effect = ( + Target.RESULT_FAILURE, + Target.RESULT_IGNORED, + Target.RESULT_FAILURE, + ) target.monitor.is_healthy.side_effect = (False, False, False) with ReplayManager([], server, target, use_harness=False) as replay: results = replay.run(testcases, 10, repeat=4, min_results=2) @@ -263,17 +299,26 @@ def test_replay_09(mocker, tmp_path): assert len(results) == 1 assert sum(x.count for x in results) == 4 + def test_replay_10(mocker, tmp_path): """test ReplayManager.run() - test signatures - fail to meet minimum""" mocker.patch("grizzly.common.runner.sleep", autospec=True) - mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) + mocker.patch( + "grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path) + ) report_0 = mocker.Mock(spec=Report) report_0.crash_info.createShortSignature.return_value = "No crash detected" - report_1 = mocker.Mock(spec=Report, crash_hash="hash1", major="0123abcd", minor="01239999") + report_1 = mocker.Mock( + spec=Report, crash_hash="hash1", major="0123abcd", minor="01239999" + ) report_1.crash_info.createShortSignature.return_value = "[@ test1]" - report_2 = mocker.Mock(spec=Report, crash_hash="hash2", major="0123abcd", minor="abcd9876") + report_2 = mocker.Mock( + spec=Report, crash_hash="hash2", major="0123abcd", minor="abcd9876" + ) report_2.crash_info.createShortSignature.return_value = "[@ test2]" - report_3 = mocker.Mock(spec=Report, crash_hash="hash2", major="0123abcd", minor="abcd9876") + report_3 = mocker.Mock( + spec=Report, crash_hash="hash2", major="0123abcd", minor="abcd9876" + ) report_3.crash_info.createShortSignature.return_value = "[@ test2]" fake_report = mocker.patch("grizzly.replay.replay.Report", autospec=True) fake_report.side_effect = (report_0, report_1, report_2, report_3) @@ -285,8 +330,12 @@ def test_replay_10(mocker, tmp_path): target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] - with ReplayManager([], server, target, signature=signature, use_harness=False) as replay: + testcases = [ + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]) + ] + with ReplayManager( + [], server, target, signature=signature, use_harness=False + ) as replay: results = replay.run(testcases, 10, repeat=4, min_results=2) assert target.close.call_count == 5 assert replay.signature == signature @@ -303,12 +352,19 @@ def test_replay_10(mocker, tmp_path): assert report_3.cleanup.call_count == 1 assert signature.matches.call_count == 3 + def test_replay_11(mocker, tmp_path): """test ReplayManager.run() - test signatures - multiple matches""" - mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) - report_0 = mocker.Mock(spec=Report, crash_hash="hash1", major="0123abcd", minor="01239999") + mocker.patch( + "grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path) + ) + report_0 = mocker.Mock( + spec=Report, crash_hash="hash1", major="0123abcd", minor="01239999" + ) report_0.crash_info.createShortSignature.return_value = "[@ test1]" - report_1 = mocker.Mock(spec=Report, crash_hash="hash2", major="0123abcd", minor="abcd9876") + report_1 = mocker.Mock( + spec=Report, crash_hash="hash2", major="0123abcd", minor="abcd9876" + ) report_1.crash_info.createShortSignature.return_value = "[@ test2]" fake_report = mocker.patch("grizzly.replay.replay.Report", autospec=True) fake_report.side_effect = (report_0, report_1) @@ -321,8 +377,12 @@ def test_replay_11(mocker, tmp_path): target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE target.monitor.is_healthy.return_value = False - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="a.html", optional=[])] - with ReplayManager([], server, target, signature=signature, use_harness=False) as replay: + testcases = [ + mocker.Mock(spec=TestCase, env_vars=[], landing_page="a.html", optional=[]) + ] + with ReplayManager( + [], server, target, signature=signature, use_harness=False + ) as replay: results = replay.run(testcases, 10, repeat=2, min_results=2) assert target.close.call_count == 3 assert replay.signature == signature @@ -337,14 +397,21 @@ def test_replay_11(mocker, tmp_path): assert report_1.cleanup.call_count == 1 assert signature.matches.call_count == 2 + def test_replay_12(mocker, tmp_path): """test ReplayManager.run() - any crash - success""" - mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) + mocker.patch( + "grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path) + ) report_0 = mocker.Mock(spec=Report) report_0.crash_info.createShortSignature.return_value = "No crash detected" - report_1 = mocker.Mock(spec=Report, crash_hash="hash1", major="0123abcd", minor="01239999") + report_1 = mocker.Mock( + spec=Report, crash_hash="hash1", major="0123abcd", minor="01239999" + ) report_1.crash_info.createShortSignature.return_value = "[@ test1]" - report_2 = mocker.Mock(spec=Report, crash_hash="hash2", major="0123abcd", minor="abcd9876") + report_2 = mocker.Mock( + spec=Report, crash_hash="hash2", major="0123abcd", minor="abcd9876" + ) report_2.crash_info.createShortSignature.return_value = "[@ test2]" fake_report = mocker.patch("grizzly.replay.replay.Report", autospec=True) fake_report.side_effect = (report_0, report_1, report_2) @@ -354,7 +421,9 @@ def test_replay_12(mocker, tmp_path): target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE target.monitor.is_healthy.return_value = False - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] + testcases = [ + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]) + ] with ReplayManager([], server, target, any_crash=True, use_harness=False) as replay: results = replay.run(testcases, 10, repeat=3, min_results=2) assert target.close.call_count == 4 @@ -370,14 +439,21 @@ def test_replay_12(mocker, tmp_path): assert report_1.cleanup.call_count == 0 assert report_2.cleanup.call_count == 0 + def test_replay_13(mocker, tmp_path): """test ReplayManager.run() - any crash - fail to meet minimum""" - mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) + mocker.patch( + "grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path) + ) report_0 = mocker.Mock(spec=Report) report_0.crash_info.createShortSignature.return_value = "No crash detected" - report_1 = mocker.Mock(spec=Report, crash_hash="hash1", major="0123abcd", minor="01239999") + report_1 = mocker.Mock( + spec=Report, crash_hash="hash1", major="0123abcd", minor="01239999" + ) report_1.crash_info.createShortSignature.return_value = "[@ test1]" - report_2 = mocker.Mock(spec=Report, crash_hash="hash2", major="0123abcd", minor="abcd9876") + report_2 = mocker.Mock( + spec=Report, crash_hash="hash2", major="0123abcd", minor="abcd9876" + ) report_2.crash_info.createShortSignature.return_value = "[@ test2]" report_3 = mocker.Mock(spec=Report) report_3.crash_info.createShortSignature.return_value = "No crash detected" @@ -389,7 +465,9 @@ def test_replay_13(mocker, tmp_path): target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE target.monitor.is_healthy.return_value = False - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] + testcases = [ + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]) + ] with ReplayManager([], server, target, any_crash=True) as replay: assert not replay.run(testcases, 10, repeat=4, min_results=3) assert target.close.call_count == 5 @@ -403,9 +481,12 @@ def test_replay_13(mocker, tmp_path): assert report_2.cleanup.call_count == 1 assert report_3.cleanup.call_count == 1 + def test_replay_14(mocker, tmp_path): """test ReplayManager.run() - no signature - use first crash""" - mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) + mocker.patch( + "grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path) + ) report_1 = mocker.Mock(spec=Report, crash_hash="hash1", major="0123", minor="9999") report_1.crash_info.createShortSignature.return_value = "[@ test1]" auto_sig = mocker.Mock() @@ -423,7 +504,9 @@ def test_replay_14(mocker, tmp_path): target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE target.monitor.is_healthy.return_value = False - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] + testcases = [ + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]) + ] with ReplayManager([], server, target, use_harness=False) as replay: results = replay.run(testcases, 10, repeat=3, min_results=2) assert target.close.call_count == 4 @@ -439,10 +522,15 @@ def test_replay_14(mocker, tmp_path): assert report_2.cleanup.call_count == 0 assert report_3.cleanup.call_count == 1 + def test_replay_15(mocker, tmp_path): """test ReplayManager.run() - unexpected exception""" - mocker.patch("grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path)) - report_0 = mocker.Mock(spec=Report, crash_hash="hash1", major="0123abcd", minor="01239999") + mocker.patch( + "grizzly.replay.replay.mkdtemp", autospec=True, return_value=str(tmp_path) + ) + report_0 = mocker.Mock( + spec=Report, crash_hash="hash1", major="0123abcd", minor="01239999" + ) report_0.crash_info.createShortSignature.return_value = "[@ test1]" fake_report = mocker.patch("grizzly.replay.replay.Report", autospec=True) fake_report.side_effect = (report_0,) @@ -451,8 +539,12 @@ def test_replay_15(mocker, tmp_path): target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30) target.RESULT_FAILURE = Target.RESULT_FAILURE target.detect_failure.return_value = Target.RESULT_FAILURE - testcases = [mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] - with ReplayManager([], server, target, any_crash=True, use_harness=True, relaunch=2) as replay: + testcases = [ + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]) + ] + with ReplayManager( + [], server, target, any_crash=True, use_harness=True, relaunch=2 + ) as replay: with raises(KeyboardInterrupt): replay.run(testcases, 10, repeat=3, min_results=2) assert replay.signature is None @@ -464,6 +556,7 @@ def test_replay_15(mocker, tmp_path): assert fake_report.call_count == 1 assert report_0.cleanup.call_count == 1 + def test_replay_16(mocker): """test ReplayManager.run() - multiple TestCases - no repro""" mocker.patch("grizzly.common.runner.sleep", autospec=True) @@ -475,7 +568,8 @@ def test_replay_16(mocker): testcases = [ mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), - mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), + ] with ReplayManager([], server, target, use_harness=True) as replay: assert not replay.run(testcases, 10) assert replay.status.ignored == 0 @@ -484,6 +578,7 @@ def test_replay_16(mocker): assert target.close.call_count == 2 assert all(x.dump.call_count == 1 for x in testcases) + def test_replay_17(mocker): """test ReplayManager.run() - multiple TestCases - no repro - with repeats""" server = mocker.Mock(spec=Sapphire, port=0x1337, timeout=10) @@ -497,7 +592,8 @@ def test_replay_17(mocker): testcases = [ mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), - mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[])] + mocker.Mock(spec=TestCase, env_vars=[], landing_page="index.html", optional=[]), + ] with ReplayManager([], server, target, use_harness=True, relaunch=2) as replay: assert not replay.run(testcases, 10, repeat=10) assert server.serve_path.call_count == 30 @@ -509,6 +605,7 @@ def test_replay_17(mocker): assert target.monitor.is_healthy.call_count == 5 assert all(x.dump.call_count == 1 for x in testcases) + def test_replay_18(mocker, tmp_path): """test ReplayManager.run() - multiple TestCases - successful repro""" mocker.patch("grizzly.replay.replay.grz_tmp", return_value=str(tmp_path)) @@ -516,20 +613,23 @@ def test_replay_18(mocker, tmp_path): server.serve_path.side_effect = ( (SERVED_ALL, ["a.html"]), (SERVED_ALL, ["b.html"]), - (SERVED_ALL, ["c.html"])) + (SERVED_ALL, ["c.html"]), + ) target = mocker.Mock(spec=Target, binary="fake_bin", launch_timeout=30) target.RESULT_FAILURE = Target.RESULT_FAILURE target.RESULT_NONE = Target.RESULT_NONE target.detect_failure.side_effect = ( Target.RESULT_NONE, Target.RESULT_NONE, - Target.RESULT_FAILURE) + Target.RESULT_FAILURE, + ) target.monitor.is_healthy.return_value = False target.save_logs = _fake_save_logs testcases = [ mocker.Mock(spec=TestCase, env_vars=[], landing_page="a.html", optional=[]), mocker.Mock(spec=TestCase, env_vars=[], landing_page="b.html", optional=[]), - mocker.Mock(spec=TestCase, env_vars=[], landing_page="c.html", optional=[])] + mocker.Mock(spec=TestCase, env_vars=[], landing_page="c.html", optional=[]), + ] with ReplayManager([], server, target, use_harness=True) as replay: results = replay.run(testcases, 30) assert target.close.call_count == 2 @@ -544,6 +644,7 @@ def test_replay_18(mocker, tmp_path): assert len(results[0].durations) == len(testcases) assert all(x.dump.call_count == 1 for x in testcases) + def test_replay_19(mocker, tmp_path): """test ReplayManager.run() - multiple calls""" mocker.patch("grizzly.common.runner.sleep", autospec=True) @@ -563,6 +664,7 @@ def test_replay_19(mocker, tmp_path): assert replay.status.iteration == 1 assert server.serve_path.call_count == 3 + def test_replay_20(mocker, tmp_path): """test ReplayManager.report_to_filesystem()""" # no reports @@ -570,26 +672,31 @@ def test_replay_20(mocker, tmp_path): assert not any(tmp_path.iterdir()) # with reports and tests (tmp_path / "report_expected").mkdir() - result0 = mocker.Mock(ReplayResult, count=1, durations=[1], expected=True, served=[]) + result0 = mocker.Mock( + ReplayResult, count=1, durations=[1], expected=True, served=[] + ) result0.report = mocker.Mock( - spec=Report, - path=str(tmp_path / "report_expected"), - prefix="expected") + spec=Report, path=str(tmp_path / "report_expected"), prefix="expected" + ) (tmp_path / "report_other1").mkdir() - result1 = mocker.Mock(ReplayResult, count=1, durations=[1], expected=False, served=None) + result1 = mocker.Mock( + ReplayResult, count=1, durations=[1], expected=False, served=None + ) result1.report = mocker.Mock( - spec=Report, - path=str(tmp_path / "report_other1"), - prefix="other1") + spec=Report, path=str(tmp_path / "report_other1"), prefix="other1" + ) (tmp_path / "report_other2").mkdir() - result2 = mocker.Mock(ReplayResult, count=1, durations=[1], expected=False, served=None) + result2 = mocker.Mock( + ReplayResult, count=1, durations=[1], expected=False, served=None + ) result2.report = mocker.Mock( - spec=Report, - path=str(tmp_path / "report_other2"), - prefix="other2") + spec=Report, path=str(tmp_path / "report_other2"), prefix="other2" + ) test = mocker.Mock(spec=TestCase) path = tmp_path / "dest" - ReplayManager.report_to_filesystem(str(path), [result0, result1, result2], tests=[test]) + ReplayManager.report_to_filesystem( + str(path), [result0, result1, result2], tests=[test] + ) assert test.dump.call_count == 3 # called once per report assert not (tmp_path / "report_expected").is_dir() assert not (tmp_path / "report_other1").is_dir() @@ -609,6 +716,7 @@ def test_replay_20(mocker, tmp_path): assert path.is_dir() assert (path / "reports" / "expected_logs").is_dir() + def test_replay_21(mocker, tmp_path): """test ReplayManager.load_testcases()""" fake_load = mocker.patch("grizzly.replay.replay.TestCase.load") @@ -643,6 +751,7 @@ def test_replay_21(mocker, tmp_path): assert test1.cleanup.call_count == 1 assert test2.cleanup.call_count == 0 + @mark.parametrize( "is_hang, use_sig, match_sig, ignored, results", [ @@ -656,7 +765,7 @@ def test_replay_21(mocker, tmp_path): (False, True, False, 1, 0), # unexpected hang (no signature) (False, False, False, 1, 0), - ] + ], ) def test_replay_22(mocker, tmp_path, is_hang, use_sig, match_sig, ignored, results): """test ReplayManager.run() - detect hangs""" @@ -677,7 +786,9 @@ def test_replay_22(mocker, tmp_path, is_hang, use_sig, match_sig, ignored, resul target.save_logs = _fake_save_logs with TestCase("index.html", "redirect.html", "test-adapter") as testcase: testcase.hang = is_hang - with ReplayManager([], server, target, signature=signature, relaunch=10) as replay: + with ReplayManager( + [], server, target, signature=signature, relaunch=10 + ) as replay: found = replay.run([testcase], 10, expect_hang=is_hang) assert replay.status.iteration == 1 assert replay.status.ignored == ignored diff --git a/grizzly/session.py b/grizzly/session.py index ed9f6be3..15df2ec8 100644 --- a/grizzly/session.py +++ b/grizzly/session.py @@ -21,7 +21,14 @@ class SessionError(Exception): class LogOutputLimiter: - __slots__ = ("_delay", "_iterations", "_launches", "_multiplier", "_time", "_verbose") + __slots__ = ( + "_delay", + "_iterations", + "_launches", + "_multiplier", + "_time", + "_verbose", + ) def __init__(self, delay=300, delta_multiplier=2, verbose=False): self._delay = delay # maximum time delay between output @@ -59,7 +66,8 @@ class Session: EXIT_LAUNCH_FAILURE = 4 # unrelated Target failure (browser startup crash, etc) EXIT_FAILURE = 5 # expected results not reproduced (opposite of EXIT_SUCCESS) - TARGET_LOG_SIZE_WARN = 0x1900000 # display warning when target log files exceed limit (25MB) + # display warning when target log files exceed limit (25MB) + TARGET_LOG_SIZE_WARN = 0x1900000 __slots__ = ( "_coverage", @@ -70,7 +78,7 @@ class Session: "reporter", "server", "status", - "target" + "target", ) def __init__( @@ -82,7 +90,7 @@ def __init__( coverage=False, enable_profiling=False, relaunch=1, - report_size=1 + report_size=1, ): assert relaunch > 0 assert report_size > 0 @@ -116,7 +124,8 @@ def display_status(self, log_limiter): self.status.iteration, self.adapter.remaining, self.status.results, - self.status.test_name) + self.status.test_name, + ) elif log_limiter.ready(self.status.iteration, self.target.monitor.launches): LOG.info("I%04d-R%02d ", self.status.iteration, self.status.results) @@ -138,7 +147,7 @@ def run( time_limit, input_path=None, iteration_limit=0, - display_mode=DISPLAY_NORMAL + display_mode=DISPLAY_NORMAL, ): assert time_limit > 0 assert iteration_limit >= 0 @@ -174,16 +183,19 @@ def run( "/grz_harness", self.server.port, close_after=relaunch, - time_limit=time_limit) + time_limit=time_limit, + ) try: with self.status.measure("launch"): runner.launch(location, max_retries=3, retry_delay=0) except TargetLaunchError as exc: short_sig = exc.report.crash_info.createShortSignature() - LOG.info("Result: %s (%s:%s)", + LOG.info( + "Result: %s (%s:%s)", short_sig, exc.report.major[:8], - exc.report.minor[:8]) + exc.report.minor[:8], + ) self.reporter.submit([], exc.report) exc.report.cleanup() self.status.count_result(short_sig) @@ -200,7 +212,8 @@ def run( ignore, self.iomanager.server_map, current_test, - coverage=self._coverage) + coverage=self._coverage, + ) current_test.duration = result.duration # adapter callbacks if result.timeout: @@ -239,10 +252,9 @@ def run( short_sig = "Potential hang detected" else: short_sig = report.crash_info.createShortSignature() - LOG.info("Result: %s (%s:%s)", - short_sig, - report.major[:8], - report.minor[:8]) + LOG.info( + "Result: %s (%s:%s)", short_sig, report.major[:8], report.minor[:8] + ) self.reporter.submit(self.iomanager.tests, report) report.cleanup() self.status.count_result(short_sig) @@ -265,4 +277,6 @@ def run( # warn about large browser logs self.status.log_size = self.target.log_size() if self.status.log_size > self.TARGET_LOG_SIZE_WARN: - LOG.warning("Large browser logs: %dMBs", (self.status.log_size / 0x100000)) + LOG.warning( + "Large browser logs: %dMBs", (self.status.log_size / 0x100000) + ) diff --git a/grizzly/target/__init__.py b/grizzly/target/__init__.py index adf315bd..1b65654f 100644 --- a/grizzly/target/__init__.py +++ b/grizzly/target/__init__.py @@ -16,8 +16,15 @@ sanitizer_opts, ) -__all__ = ("Target", "TargetError", "TargetLaunchError", "TargetLaunchTimeout", - "available", "load", "sanitizer_opts") +__all__ = ( + "Target", + "TargetError", + "TargetLaunchError", + "TargetLaunchTimeout", + "available", + "load", + "sanitizer_opts", +) __author__ = "Tyson Smith" __credits__ = ["Tyson Smith", "Jesse Schwartzentruber"] @@ -28,21 +35,32 @@ def _load_targets(): global TARGETS # pylint: disable=global-statement TARGETS = {} - for entry_point in iter_entry_points('grizzly_targets'): + for entry_point in iter_entry_points("grizzly_targets"): LOG.debug("scanning target %r", entry_point.name) try: target = entry_point.load() except Exception: # pylint: disable=broad-except exc_type, exc_obj, exc_tb = exc_info() tbinfo = extract_tb(exc_tb)[-1] - LOG.warning("Target %r raised an exception %s: %s (%s:%d)", entry_point.name, exc_type.__name__, - exc_obj, tbinfo[0], tbinfo[1]) + LOG.warning( + "Target %r raised an exception %s: %s (%s:%d)", + entry_point.name, + exc_type.__name__, + exc_obj, + tbinfo[0], + tbinfo[1], + ) continue if not issubclass(target, Target): - LOG.warning("Target %r doesn't inherit from grizzly.target.Target, skipping.", entry_point.name) + LOG.warning( + "Target %r doesn't inherit from grizzly.target.Target, skipping.", + entry_point.name, + ) elif entry_point.name in TARGETS: - raise RuntimeError("Target %r already exists as %r. (duplicate: %r)" % - (entry_point.name, TARGETS[entry_point.name], target)) + raise RuntimeError( + "Target %r already exists as %r. (duplicate: %r)" + % (entry_point.name, TARGETS[entry_point.name], target) + ) else: TARGETS[entry_point.name] = target diff --git a/grizzly/target/puppet_target.py b/grizzly/target/puppet_target.py index 894a8cb8..c9ed4e06 100644 --- a/grizzly/target/puppet_target.py +++ b/grizzly/target/puppet_target.py @@ -34,7 +34,9 @@ class PuppetTarget(Target): __slots__ = ("use_rr", "use_valgrind", "_puppet", "_remove_prefs") - def __init__(self, binary, extension, launch_timeout, log_limit, memory_limit, **kwds): + def __init__( + self, binary, extension, launch_timeout, log_limit, memory_limit, **kwds + ): super().__init__(binary, extension, launch_timeout, log_limit, memory_limit) self.use_rr = kwds.pop("rr", False) self.use_valgrind = kwds.pop("valgrind", False) @@ -43,9 +45,12 @@ def __init__(self, binary, extension, launch_timeout, log_limit, memory_limit, * self._puppet = FFPuppet( use_rr=self.use_rr, use_valgrind=self.use_valgrind, - use_xvfb=kwds.pop("xvfb", False)) + use_xvfb=kwds.pop("xvfb", False), + ) if kwds: - LOG.warning("PuppetTarget ignoring unsupported arguments: %s", ", ".join(kwds)) + LOG.warning( + "PuppetTarget ignoring unsupported arguments: %s", ", ".join(kwds) + ) def add_abort_token(self, token): self._puppet.add_abort_token(token) @@ -80,19 +85,25 @@ def create_report(self, is_hang=False): @property def monitor(self): if self._monitor is None: + class _PuppetMonitor(TargetMonitor): # pylint: disable=no-self-argument,protected-access def clone_log(_, log_id, offset=0): return self._puppet.clone_log(log_id, offset=offset) + def is_running(_): return self._puppet.is_running() + def is_healthy(_): return self._puppet.is_healthy() + @property def launches(_): return self._puppet.launches + def log_length(_, log_id): return self._puppet.log_length(log_id) + self._monitor = _PuppetMonitor() return self._monitor @@ -198,7 +209,9 @@ def dump_coverage(self, timeout=15): break if elapsed >= timeout: # timeout failure - LOG.warning("gcda file open by pid %d after %0.2fs", gcda_open, elapsed) + LOG.warning( + "gcda file open by pid %d after %0.2fs", gcda_open, elapsed + ) try: kill(gcda_open, SIGABRT) except OSError: @@ -233,7 +246,8 @@ def launch(self, location, env_mod=None): memory_limit=self.memory_limit, prefs_js=self.prefs, extension=self.extension, - env_mod=env_mod) + env_mod=env_mod, + ) except LaunchError as exc: LOG.error("FFPuppet LaunchError: %s", str(exc)) self.close() @@ -251,7 +265,9 @@ def prefs(self): for prefs_template in PrefPicker.templates(): if prefs_template.endswith("browser-fuzzing.yml"): LOG.debug("using prefpicker template %r", prefs_template) - tmp_fd, self._prefs = mkstemp(prefix="prefs_", suffix=".js", dir=grz_tmp()) + tmp_fd, self._prefs = mkstemp( + prefix="prefs_", suffix=".js", dir=grz_tmp() + ) close(tmp_fd) PrefPicker.load_template(prefs_template).create_prefsjs(self._prefs) LOG.debug("generated prefs.js %r", self._prefs) diff --git a/grizzly/target/target.py b/grizzly/target/target.py index 4ff5a044..50ec0576 100644 --- a/grizzly/target/target.py +++ b/grizzly/target/target.py @@ -41,6 +41,7 @@ class TargetError(Exception): class TargetLaunchError(TargetError): """Raised if a failure during launch occurs""" + def __init__(self, message, report): super().__init__(message) self.report = report @@ -56,8 +57,15 @@ class Target(metaclass=ABCMeta): RESULT_IGNORED = 2 __slots__ = ( - "_lock", "_monitor", "_prefs", "binary", "extension", "launch_timeout", - "log_limit", "memory_limit") + "_lock", + "_monitor", + "_prefs", + "binary", + "extension", + "launch_timeout", + "log_limit", + "memory_limit", + ) def __init__(self, binary, extension, launch_timeout, log_limit, memory_limit): assert log_limit >= 0 diff --git a/grizzly/target/test_puppet_target.py b/grizzly/target/test_puppet_target.py index 5101dc22..b12957c1 100644 --- a/grizzly/target/test_puppet_target.py +++ b/grizzly/target/test_puppet_target.py @@ -41,6 +41,7 @@ def test_puppet_target_01(mocker, tmp_path): with PuppetTarget(str(fake_file), None, 1, 1, 1, rr=True, fake=1) as target: pass + def test_puppet_target_02(mocker, tmp_path): """test PuppetTarget.launch()""" fake_ffp = mocker.patch("grizzly.target.puppet_target.FFPuppet", autospec=True) @@ -65,12 +66,17 @@ def test_puppet_target_02(mocker, tmp_path): fake_ffp.reset_mock() (tmp_path / "log_stderr.txt").write_text("fake log") (tmp_path / "log_stdout.txt").write_text("fake log") - mocker.patch("grizzly.target.puppet_target.mkdtemp", autospec=True, return_value=str(tmp_path)) + mocker.patch( + "grizzly.target.puppet_target.mkdtemp", + autospec=True, + return_value=str(tmp_path), + ) fake_ffp.return_value.launch.side_effect = BrowserTerminatedError("fail") with raises(TargetLaunchError, match="fail"): target.launch("launch_target_page") assert fake_ffp.return_value.save_logs.call_count == 1 + @mark.parametrize( "healthy, reason, ignore, result, closes", [ @@ -88,7 +94,7 @@ def test_puppet_target_02(mocker, tmp_path): (False, FFPuppet.RC_WORKER, ["memory"], Target.RESULT_IGNORED, 1), # ffpuppet check ignored (log-limit) (False, FFPuppet.RC_WORKER, ["log-limit"], Target.RESULT_IGNORED, 1), - ] + ], ) def test_puppet_target_03(mocker, tmp_path, healthy, reason, ignore, result, closes): """test PuppetTarget.detect_failure()""" @@ -109,6 +115,7 @@ def test_puppet_target_03(mocker, tmp_path, healthy, reason, ignore, result, clo assert target.detect_failure(ignore) == result assert fake_ffp.return_value.close.call_count == closes + @mark.parametrize( "healthy, usage, os_name, killed", [ @@ -120,12 +127,13 @@ def test_puppet_target_03(mocker, tmp_path, healthy, reason, ignore, result, clo (True, [(234, 10), (236, 75), (238, 60)], "Linux", 1), # ignore idle timeout (close don't abort) (True, [(234, 10)], "Linux", 0), - - ] + ], ) def test_puppet_target_04(mocker, tmp_path, healthy, usage, os_name, killed): """test PuppetTarget.handle_hang()""" - mocker.patch("grizzly.target.puppet_target.system", autospec=True, return_value=os_name) + mocker.patch( + "grizzly.target.puppet_target.system", autospec=True, return_value=os_name + ) fake_ffp = mocker.patch("grizzly.target.puppet_target.FFPuppet", autospec=True) fake_kill = mocker.patch("grizzly.target.puppet_target.kill", autospec=True) # raise OSError for code coverage @@ -141,13 +149,16 @@ def test_puppet_target_04(mocker, tmp_path, healthy, usage, os_name, killed): assert fake_ffp.return_value.cpu_usage.call_count == (1 if usage else 0) assert fake_kill.call_count == fake_ffp.return_value.wait.call_count == killed + @mark.skipif(system() == "Windows", reason="Unsupported on Windows") def test_puppet_target_05(mocker, tmp_path): """test PuppetTarget.dump_coverage()""" fake_ffp = mocker.patch("grizzly.target.puppet_target.FFPuppet", autospec=True) fake_proc = mocker.patch("grizzly.target.puppet_target.Process", autospec=True) fake_proc.return_value.children.return_value = (mocker.Mock(pid=101),) - fake_proc_iter = mocker.patch("grizzly.target.puppet_target.process_iter", autospec=True) + fake_proc_iter = mocker.patch( + "grizzly.target.puppet_target.process_iter", autospec=True + ) mocker.patch("grizzly.target.puppet_target.sleep", autospec=True) fake_time = mocker.patch("grizzly.target.puppet_target.time", autospec=True) fake_file = tmp_path / "fake" @@ -187,7 +198,9 @@ def test_puppet_target_05(mocker, tmp_path): fake_ffp.return_value.is_healthy.side_effect = None fake_ffp.return_value.get_pid.return_value = 100 fake_proc_iter.return_value = ( - mocker.Mock(info={"pid": 100, "ppid": 0, "open_files": (mocker.Mock(path="a.gcda"),)}), + mocker.Mock( + info={"pid": 100, "ppid": 0, "open_files": (mocker.Mock(path="a.gcda"),)} + ), ) fake_time.side_effect = (0, 1, 20, 20) target.dump_coverage(timeout=15) @@ -203,17 +216,40 @@ def test_puppet_target_05(mocker, tmp_path): fake_time.return_value = 1.0 fake_proc_iter.side_effect = ( ( - mocker.Mock(info={"pid": 100, "ppid": 0, "open_files": (mocker.Mock(path="a.bin"), mocker.Mock(path="/a/s/d"))}), + mocker.Mock( + info={ + "pid": 100, + "ppid": 0, + "open_files": ( + mocker.Mock(path="a.bin"), + mocker.Mock(path="/a/s/d"), + ), + } + ), mocker.Mock(info={"pid": 101, "ppid": 100, "open_files": None}), - mocker.Mock(info={"pid": 999, "ppid": 0, "open_files": None}) + mocker.Mock(info={"pid": 999, "ppid": 0, "open_files": None}), ), ( - mocker.Mock(info={"pid": 100, "ppid": 0, "open_files": (mocker.Mock(path="a.gcda"),)}), + mocker.Mock( + info={ + "pid": 100, + "ppid": 0, + "open_files": (mocker.Mock(path="a.gcda"),), + } + ), ), ( - mocker.Mock(info={"pid": 100, "ppid": 0, "open_files": (mocker.Mock(path="a.bin"),)}), - mocker.Mock(info={"pid": 999, "ppid": 0, "open_files": (mocker.Mock(path="ignore.gcda"),)}) - ) + mocker.Mock( + info={"pid": 100, "ppid": 0, "open_files": (mocker.Mock(path="a.bin"),)} + ), + mocker.Mock( + info={ + "pid": 999, + "ppid": 0, + "open_files": (mocker.Mock(path="ignore.gcda"),), + } + ), + ), ) target.dump_coverage() assert fake_proc_iter.call_count == 3 @@ -233,6 +269,7 @@ def test_puppet_target_05(mocker, tmp_path): fake_kill.reset_mock() fake_proc_iter.reset_mock() + def test_puppet_target_06(mocker, tmp_path): """test PuppetTarget.is_idle()""" fake_ffp = mocker.patch("grizzly.target.puppet_target.FFPuppet", autospec=True) @@ -244,6 +281,7 @@ def test_puppet_target_06(mocker, tmp_path): assert not target.is_idle(25) assert target.is_idle(50) + def test_puppet_target_07(mocker, tmp_path): """test PuppetTarget.monitor""" fake_ffp = mocker.patch("grizzly.target.puppet_target.FFPuppet", autospec=True) @@ -266,6 +304,7 @@ def test_puppet_target_07(mocker, tmp_path): target.monitor.clone_log("somelog") assert fake_ffp.return_value.clone_log.call_count == 1 + def test_puppet_target_08(mocker, tmp_path): """test PuppetTarget.prefs""" mocker.patch("grizzly.target.puppet_target.FFPuppet", autospec=True) diff --git a/grizzly/target/test_target.py b/grizzly/target/test_target.py index 6e11b557..bfd39b22 100644 --- a/grizzly/target/test_target.py +++ b/grizzly/target/test_target.py @@ -9,28 +9,38 @@ class SimpleTarget(Target): def cleanup(self): pass + def close(self): pass + @property def closed(self): pass + def create_report(self, is_hang=False): pass + def detect_failure(self, ignored): pass + def handle_hang(self, ignore_idle=True): pass + def launch(self): pass + @property def monitor(self): return self._monitor + @property def prefs(self): pass + def save_logs(self, *_args, **_kwargs): pass + def test_target_01(tmp_path): """test creating a simple Target""" fake_file = tmp_path / "fake" @@ -49,6 +59,7 @@ def test_target_01(tmp_path): target.dump_coverage() target.reverse(1, 2) + def test_sanitizer_opts_01(tmp_path): """test sanitizer_opts()""" # test empty string @@ -69,7 +80,7 @@ def test_sanitizer_opts_01(tmp_path): assert opts["p2"] == "'x:\\a.1'" assert opts["p3"] == "'/test/path/'" assert opts["p4"] == "''" - assert opts["p5"] == "\"x:/a.a\"" + assert opts["p5"] == '"x:/a.a"' # platform specific parsing fake_file = tmp_path / "fake.log" opts = sanitizer_opts("bar=1:file='%s':foo=2" % (str(fake_file),)) diff --git a/grizzly/target/test_target_loader.py b/grizzly/target/test_target_loader.py index 833e5c08..2d82663a 100644 --- a/grizzly/target/test_target_loader.py +++ b/grizzly/target/test_target_loader.py @@ -20,100 +20,112 @@ class _FakeTarget2(Target): # pylint: disable=abstract-method def test_target_load_01(mocker): - '''If no targets are available, available() should return nothing.''' - mocker.patch('grizzly.target.TARGETS', None) - mocker.patch('grizzly.target.iter_entry_points', lambda _: []) + """If no targets are available, available() should return nothing.""" + mocker.patch("grizzly.target.TARGETS", None) + mocker.patch("grizzly.target.iter_entry_points", lambda _: []) assert not available() def test_target_load_02(mocker): - '''Loading targets works.''' - mocker.patch('grizzly.target.TARGETS', None) + """Loading targets works.""" + mocker.patch("grizzly.target.TARGETS", None) class _FakeEntryPoint1: - name = 'test1' + name = "test1" @staticmethod def load(): return _FakeTarget1 class _FakeEntryPoint2: - name = 'test2' + name = "test2" @staticmethod def load(): return _FakeTarget2 - mocker.patch('grizzly.target.iter_entry_points', lambda _: [_FakeEntryPoint1, _FakeEntryPoint2]) - assert set(available()) == {'test1', 'test2'} - assert load('test1') is _FakeTarget1 - assert load('test2') is _FakeTarget2 + mocker.patch( + "grizzly.target.iter_entry_points", + lambda _: [_FakeEntryPoint1, _FakeEntryPoint2], + ) + assert set(available()) == {"test1", "test2"} + assert load("test1") is _FakeTarget1 + assert load("test2") is _FakeTarget2 def test_target_load_03(mocker): - '''Non-Target will be skipped.''' - mocker.patch('grizzly.target.TARGETS', None) + """Non-Target will be skipped.""" + mocker.patch("grizzly.target.TARGETS", None) class _FakeEntryPoint1: - name = 'test1' + name = "test1" @staticmethod def load(): return Target class _FakeEntryPoint2: - name = 'test2' + name = "test2" @staticmethod def load(): return object - mocker.patch('grizzly.target.iter_entry_points', lambda _: [_FakeEntryPoint1, _FakeEntryPoint2]) - assert set(available()) == {'test1'} - assert load('test1') is Target + mocker.patch( + "grizzly.target.iter_entry_points", + lambda _: [_FakeEntryPoint1, _FakeEntryPoint2], + ) + assert set(available()) == {"test1"} + assert load("test1") is Target def test_target_load_04(mocker): - '''test load() with name collision''' - mocker.patch('grizzly.target.TARGETS', None) + """test load() with name collision""" + mocker.patch("grizzly.target.TARGETS", None) class _FakeEntryPoint1: - name = 'test' + name = "test" @staticmethod def load(): return _FakeTarget1 class _FakeEntryPoint2: - name = 'test' + name = "test" @staticmethod def load(): return _FakeTarget2 - mocker.patch('grizzly.target.iter_entry_points', lambda _: [_FakeEntryPoint1, _FakeEntryPoint2]) + mocker.patch( + "grizzly.target.iter_entry_points", + lambda _: [_FakeEntryPoint1, _FakeEntryPoint2], + ) with pytest.raises(RuntimeError, match=r"Target (.)test\1 already exists"): available() def test_target_load_05(mocker): - '''test load() with broken Target''' - mocker.patch('grizzly.target.TARGETS', None) + """test load() with broken Target""" + mocker.patch("grizzly.target.TARGETS", None) class _FakeEntryPoint1: - name = 'test1' + name = "test1" @staticmethod def load(): return Target class _FakeEntryPoint2: - name = 'test2' + name = "test2" @staticmethod def load(): raise Exception("boo!") - mocker.patch('grizzly.target.iter_entry_points', lambda _: [_FakeEntryPoint1, _FakeEntryPoint2]) - assert set(available()) == {'test1'} - assert load('test1') is Target + mocker.patch( + "grizzly.target.iter_entry_points", + lambda _: [_FakeEntryPoint1, _FakeEntryPoint2], + ) + assert set(available()) == {"test1"} + assert load("test1") is Target diff --git a/grizzly/target/test_target_monitor.py b/grizzly/target/test_target_monitor.py index 53a8fd30..3d255b0d 100644 --- a/grizzly/target/test_target_monitor.py +++ b/grizzly/target/test_target_monitor.py @@ -8,21 +8,27 @@ def test_target_monitor_01(tmp_path): """test a basic TargetMonitor""" + class _BasicMonitor(TargetMonitor): # pylint: disable=no-self-argument def clone_log(_, log_id, offset=0): log_file = tmp_path / "test_log.txt" log_file.write_bytes(b"test") return str(log_file) + def is_healthy(_): return True + def is_running(_): return True + @property def launches(_): return 1 + def log_length(_, log_id): return 100 + mon = _BasicMonitor() test_log = mon.clone_log("test_log", offset=0) assert os.path.isfile(test_log) diff --git a/grizzly/test_args.py b/grizzly/test_args.py index 9caa1b61..e91ce4ec 100644 --- a/grizzly/test_args.py +++ b/grizzly/test_args.py @@ -16,12 +16,13 @@ def test_grizzly_args_01(capsys, tmp_path): out, _ = capsys.readouterr() assert "For addition help check out the wiki" in out # test success - fake_bin = (tmp_path / "fake.bin") + fake_bin = tmp_path / "fake.bin" fake_bin.touch() argp = GrizzlyArgs() argp._adapters = ["test_adapter"] assert argp.parse_args(argv=[str(fake_bin), "test_adapter"]) + def test_grizzly_args_03(capsys): """test GrizzlyArgs.parse_args() handling binary""" # test missing required args @@ -35,9 +36,10 @@ def test_grizzly_args_03(capsys): _, err = capsys.readouterr() assert "error: file not found: 'missing_bin'" in err + def test_grizzly_args_04(capsys, tmp_path): """test GrizzlyArgs.parse_args() handling adapter""" - fake_bin = (tmp_path / "fake.bin") + fake_bin = tmp_path / "fake.bin" fake_bin.touch() # no adapters with raises(SystemExit): @@ -52,4 +54,5 @@ def test_grizzly_args_04(capsys, tmp_path): _, err = capsys.readouterr() assert "error: Adapter 'missing' does not exist. Available adapters: a1, b2" in err + # TODO: Add CommonArgs tests diff --git a/grizzly/test_main.py b/grizzly/test_main.py index 6a950327..198136e8 100644 --- a/grizzly/test_main.py +++ b/grizzly/test_main.py @@ -41,15 +41,19 @@ def __init__(self): self.verbose = False self.xvfb = False + # TODO: these could use call_count checks + def test_main_01(mocker): """test main()""" fake_adapter = mocker.Mock(spec=Adapter) fake_adapter.NAME = "fake" fake_adapter.TIME_LIMIT = 10 mocker.patch("grizzly.main.get_adapter", return_value=lambda: fake_adapter) - mocker.patch.dict("grizzly.target.TARGETS", values={"fake-target": mocker.Mock(spec=Target)}) + mocker.patch.dict( + "grizzly.target.TARGETS", values={"fake-target": mocker.Mock(spec=Target)} + ) fake_session = mocker.patch("grizzly.main.Session", autospec=True) fake_session.return_value.server = mocker.Mock(spec=Sapphire) fake_session.EXIT_SUCCESS = Session.EXIT_SUCCESS @@ -95,13 +99,16 @@ def test_main_01(mocker): args.s3_fuzzmanager = True assert main(args) == Session.EXIT_SUCCESS + def test_main_02(mocker): """test main() exit codes""" fake_adapter = mocker.Mock(spec=Adapter) fake_adapter.TIME_LIMIT = 10 fake_adapter.RELAUNCH = 0 mocker.patch("grizzly.main.get_adapter", return_value=lambda: fake_adapter) - mocker.patch.dict("grizzly.target.TARGETS", values={"fake-target": mocker.Mock(spec=Target)}) + mocker.patch.dict( + "grizzly.target.TARGETS", values={"fake-target": mocker.Mock(spec=Target)} + ) fake_session = mocker.patch("grizzly.main.Session", autospec=True) fake_session.EXIT_SUCCESS = Session.EXIT_SUCCESS fake_session.EXIT_ABORT = Session.EXIT_ABORT @@ -117,6 +124,7 @@ def test_main_02(mocker): fake_session.return_value.run.side_effect = TargetLaunchError("test", None) assert main(args) == Session.EXIT_LAUNCH_FAILURE + @mark.parametrize( "arg_testlimit, arg_timeout, result", [ @@ -130,7 +138,7 @@ def test_main_02(mocker): (10, 11, Session.EXIT_SUCCESS), # set test time limit greater than timeout (11, 10, Session.EXIT_ARGS), - ] + ], ) def test_main_03(mocker, arg_testlimit, arg_timeout, result): """test main() time-limit and timeout""" @@ -139,7 +147,9 @@ def test_main_03(mocker, arg_testlimit, arg_timeout, result): fake_adapter.RELAUNCH = 1 fake_adapter.TIME_LIMIT = 10 mocker.patch("grizzly.main.get_adapter", return_value=lambda: fake_adapter) - mocker.patch.dict("grizzly.target.TARGETS", values={"fake-target": mocker.Mock(spec=Target)}) + mocker.patch.dict( + "grizzly.target.TARGETS", values={"fake-target": mocker.Mock(spec=Target)} + ) fake_session = mocker.patch("grizzly.main.Session", autospec=True) fake_session.return_value.server = mocker.Mock(spec=Sapphire) fake_session.EXIT_ARGS = Session.EXIT_ARGS diff --git a/grizzly/test_session.py b/grizzly/test_session.py index 9cbc81f7..6ab65908 100644 --- a/grizzly/test_session.py +++ b/grizzly/test_session.py @@ -17,14 +17,17 @@ class SimpleAdapter(Adapter): NAME = "simple" + def __init__(self, use_harness, remaining=None): super().__init__() self.remaining = remaining self._use_harness = use_harness + def setup(self, input_path, server_map): if self._use_harness: self.enable_harness() self.fuzz["input"] = input_path + def generate(self, testcase, server_map): assert testcase.adapter_name == self.NAME testcase.input_fname = self.fuzz["input"] @@ -53,7 +56,7 @@ def generate(self, testcase, server_map): (True, True, False, 10, 10), # test Session.dump_coverage() (True, True, True, 2, 2), - ] + ], ) def test_session_01(mocker, tmp_path, harness, profiling, coverage, relaunch, iters): """test Session with typical fuzzer Adapter""" @@ -81,7 +84,7 @@ def test_session_01(mocker, tmp_path, harness, profiling, coverage, relaunch, it ) as session: server.serve_path = lambda *a, **kv: ( SERVED_ALL, - [session.iomanager.page_name(offset=-1)] + [session.iomanager.page_name(offset=-1)], ) session.run([], 10, input_path="file.bin", iteration_limit=iters) assert session.status.iteration == iters @@ -98,6 +101,7 @@ def test_session_01(mocker, tmp_path, harness, profiling, coverage, relaunch, it else: assert target.dump_coverage.call_count == 0 + @mark.parametrize( "harness, relaunch, remaining", [ @@ -111,7 +115,7 @@ def test_session_01(mocker, tmp_path, harness, profiling, coverage, relaunch, it (True, 2, 10), # harness, 10 iterations (True, 10, 10), - ] + ], ) def test_session_02(tmp_path, mocker, harness, relaunch, remaining): """test Session with playback Adapter""" @@ -131,11 +135,11 @@ def test_session_02(tmp_path, mocker, harness, relaunch, remaining): None, server, target, - relaunch=relaunch + relaunch=relaunch, ) as session: server.serve_path = lambda *a, **kv: ( SERVED_ALL, - [session.iomanager.page_name(offset=-1)] + [session.iomanager.page_name(offset=-1)], ) session.run([], 10) assert session.status.iteration == remaining @@ -143,6 +147,7 @@ def test_session_02(tmp_path, mocker, harness, relaunch, remaining): assert target.detect_failure.call_count == remaining assert target.handle_hang.call_count == 0 + @mark.parametrize( "harness, report_size, relaunch, iters", [ @@ -189,19 +194,23 @@ def test_session_03(mocker, tmp_path, harness, report_size, relaunch, iters): ) as session: server.serve_path = lambda *a, **kv: ( SERVED_ALL, - [session.iomanager.page_name(offset=-1)] + [session.iomanager.page_name(offset=-1)], ) session.run([], 10, input_path="file.bin", iteration_limit=iters) assert reporter.submit.call_count == 1 assert len(reporter.submit.call_args[0][0]) == min(report_size, relaunch) assert reporter.submit.call_args[0][1].major == "major123" + def test_session_04(mocker, tmp_path): """test Adapter creating invalid test case""" + class FuzzAdapter(Adapter): NAME = "fuzz" + def generate(self, testcase, server_map): pass + Status.PATH = str(tmp_path) server = mocker.Mock(spec=Sapphire, port=0x1337) server.serve_path.return_value = (SERVED_NONE, []) @@ -211,6 +220,7 @@ def generate(self, testcase, server_map): with raises(SessionError, match="Test case is missing landing page"): session.run([], 10) + def test_session_05(mocker, tmp_path): """test Target not requesting landing page""" Status.PATH = str(tmp_path) @@ -222,6 +232,7 @@ def test_session_05(mocker, tmp_path): with raises(SessionError, match="Please check Adapter and Target"): session.run([], 10) + @mark.parametrize( "harness, report_size", [ @@ -254,7 +265,7 @@ def test_session_06(mocker, tmp_path, harness, report_size): server, target, relaunch=2, - report_size=report_size + report_size=report_size, ) as session: server.serve_path.side_effect = ( (SERVED_ALL, [session.iomanager.page_name()]), @@ -265,6 +276,7 @@ def test_session_06(mocker, tmp_path, harness, report_size): assert len(reporter.submit.call_args[0][0]) == 1 assert reporter.submit.call_args[0][1].major == "major123" + @mark.parametrize( "srv_results, target_result, ignored, results", [ @@ -298,6 +310,7 @@ def test_session_07(mocker, tmp_path, srv_results, target_result, ignored, resul assert target.detect_failure.call_count == results assert target.handle_hang.call_count == ignored + def test_session_08(tmp_path, mocker): """test Session.run() ignoring failures""" Status.PATH = str(tmp_path) @@ -321,6 +334,7 @@ def test_session_08(tmp_path, mocker): assert session.status.results == 0 assert session.status.ignored == 1 + def test_session_09(tmp_path, mocker): """test Session.run() handle TargetLaunchError""" Status.PATH = str(tmp_path) @@ -339,6 +353,7 @@ def test_session_09(tmp_path, mocker): assert session.status.results == 1 assert session.status.ignored == 0 + def test_session_10(tmp_path, mocker): """test Session.run() report hang""" Status.PATH = str(tmp_path) @@ -365,6 +380,7 @@ def test_session_10(tmp_path, mocker): assert session.status.results == 1 assert session.status.ignored == 0 + def test_log_output_limiter_01(mocker): """test LogOutputLimiter.ready() not ready""" fake_time = mocker.patch("grizzly.session.time", autospec=True) @@ -384,6 +400,7 @@ def test_log_output_limiter_01(mocker): lol._verbose = True assert lol.ready(0, 0) + def test_log_output_limiter_02(mocker): """test LogOutputLimiter.ready() due to iterations""" fake_time = mocker.patch("grizzly.session.time", autospec=True) @@ -396,6 +413,7 @@ def test_log_output_limiter_02(mocker): assert lol._launches == 2 assert lol._time == 1.1 + def test_log_output_limiter_03(mocker): """test LogOutputLimiter.ready() due to launches""" fake_time = mocker.patch("grizzly.session.time", autospec=True) @@ -407,6 +425,7 @@ def test_log_output_limiter_03(mocker): assert lol._iterations == 4 assert lol._time == 1.0 + def test_log_output_limiter_04(mocker): """test LogOutputLimiter.ready() due to time""" fake_time = mocker.patch("grizzly.session.time", autospec=True) diff --git a/loki/args.py b/loki/args.py index 4252936e..dd5fd7e6 100644 --- a/loki/args.py +++ b/loki/args.py @@ -11,25 +11,41 @@ def parse_args(argv=None): parser = ArgumentParser(description="Loki fuzzing library") + parser.add_argument("input", help="Output will be generated based on this file") parser.add_argument( - "input", - help="Output will be generated based on this file") + "-a", + "--aggression", + default=0.001, + type=float, + help="Maximum fuzz rate. 1.0 == 100%% (default: %(default)s)", + ) parser.add_argument( - "-a", "--aggression", default=0.001, type=float, - help="Maximum fuzz rate. 1.0 == 100%% (default: %(default)s)") + "-b", + "--byte-order", + default=None, + help="Byte order to use when mutating multiple bytes at once. " + "Use '>' for big-endian or '<' for little-endian (default: random)", + ) parser.add_argument( - "-b", "--byte-order", default=None, - help="Byte order to use when mutating multiple bytes at once. " \ - "Use '>' for big-endian or '<' for little-endian (default: random)") + "-c", + "--count", + default=1, + type=int, + help="Number test cases to generate, minimum 1 (default: %(default)s)", + ) parser.add_argument( - "-c", "--count", default=1, type=int, - help="Number test cases to generate, minimum 1 (default: %(default)s)") + "-q", + "--quiet", + default=False, + action="store_true", + help="Display limited output (default: %(default)s)", + ) parser.add_argument( - "-q", "--quiet", default=False, action="store_true", - help="Display limited output (default: %(default)s)") - parser.add_argument( - "-o", "--output", default=None, - help="Output directory for fuzzed test cases (default: '.')") + "-o", + "--output", + default=None, + help="Output directory for fuzzed test cases (default: '.')", + ) args = parser.parse_args(argv) if args.byte_order and args.byte_order not in Loki.BYTE_ORDERS: diff --git a/loki/loki.py b/loki/loki.py index d4764457..1b224d63 100644 --- a/loki/loki.py +++ b/loki/loki.py @@ -74,9 +74,7 @@ def _fuzz(self, tgt_fp): max_mutations = max(int(round(length * self.aggr)), 1) mutations = randint(1, max_mutations) LOG.debug( - "%d of a possible %d mutations will be performed", - mutations, - max_mutations + "%d of a possible %d mutations will be performed", mutations, max_mutations ) if self.byte_order is not None: assert self.byte_order in ("<", ">", "@", "!", "=") @@ -144,10 +142,7 @@ def main(cls, args): LOG.info("Output directory is %r", abspath(out_dir)) count = max(args.count, 1) LOG.info("Generating %d fuzzed test cases...", count) - loki = Loki( - aggression=args.aggression, - byte_order=args.byte_order - ) + loki = Loki(aggression=args.aggression, byte_order=args.byte_order) try: start_time = time() success = loki.fuzz_file(args.input, count, out_dir) diff --git a/loki/test_loki.py b/loki/test_loki.py index 4dbea7d1..4da23619 100644 --- a/loki/test_loki.py +++ b/loki/test_loki.py @@ -22,7 +22,7 @@ (4, 0.1, None), (5, 0.5, None), (100, 0.2, None), - ] + ], ) def test_loki_fuzz_file(tmp_path, in_size, aggression, byte_order): """test Loki.fuzz_file() with different file sizes""" @@ -45,6 +45,7 @@ def test_loki_fuzz_file(tmp_path, in_size, aggression, byte_order): else: raise AssertionError("failed to fuzz data") + def test_loki_01(tmp_path): """test Loki.fuzz_file() error cases""" fuzzer = Loki(aggression=0.1) @@ -59,6 +60,7 @@ def test_loki_01(tmp_path): assert not fuzzer.fuzz_file(str(tmp_fn), 1, str(out_path)) assert not list(out_path.iterdir()) + def test_loki_02(): """test Loki.fuzz_data()""" in_data = b"This is test DATA!" @@ -73,6 +75,7 @@ def test_loki_02(): else: raise AssertionError("failed to fuzz data") + def test_loki_fuzz_01(mocker): """test Loki._fuzz()""" loki = Loki(aggression=1) @@ -101,6 +104,7 @@ def test_loki_fuzz_01(mocker): tmp_fp.write(b"1") loki._fuzz(tmp_fp) + def test_loki_fuzz_02(mocker): """test Loki._fuzz_data() paths""" fake_randint = mocker.patch("loki.loki.randint", autospec=True) @@ -128,6 +132,7 @@ def test_loki_fuzz_02(mocker): with raises(AssertionError, match=r"Unsupported data size:"): Loki._fuzz_data(b"", ">") + def test_loki_stress_01(): """test Loki._fuzz_data() with random input""" orders = ("<", ">") @@ -142,28 +147,24 @@ def test_loki_stress_01(): in_data = pack("I", getrandbits(32)) assert len(Loki._fuzz_data(in_data, choice(orders))) == size + def test_main_01(mocker, tmp_path): """test main()""" out_path = tmp_path / "out" out_path.mkdir() # no output path provided fake_mkdtemp = mocker.patch( - "loki.loki.mkdtemp", - autospec=True, - return_value=str(out_path) + "loki.loki.mkdtemp", autospec=True, return_value=str(out_path) ) sample = tmp_path / "file.bin" sample.write_bytes(b"test!") args = mocker.Mock( - aggression=0.1, - byte_order=None, - count=15, - input=str(sample), - output=None + aggression=0.1, byte_order=None, count=15, input=str(sample), output=None ) assert Loki.main(args) == 0 assert fake_mkdtemp.call_count == 1 + def test_args_01(capsys): """test parse_args()""" assert parse_args(argv=["sample"]) diff --git a/sapphire/__init__.py b/sapphire/__init__.py index 49c6cb9f..7103f0ab 100644 --- a/sapphire/__init__.py +++ b/sapphire/__init__.py @@ -10,6 +10,13 @@ from .job import SERVED_ALL, SERVED_NONE, SERVED_REQUEST, SERVED_TIMEOUT from .server_map import ServerMap -__all__ = ("Sapphire", "SERVED_ALL", "SERVED_NONE", "SERVED_REQUEST", "SERVED_TIMEOUT", "ServerMap") +__all__ = ( + "Sapphire", + "SERVED_ALL", + "SERVED_NONE", + "SERVED_REQUEST", + "SERVED_TIMEOUT", + "ServerMap", +) __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] diff --git a/sapphire/__main__.py b/sapphire/__main__.py index a1c6b2ca..b0a1436f 100644 --- a/sapphire/__main__.py +++ b/sapphire/__main__.py @@ -18,26 +18,31 @@ def configure_logging(log_level): log_fmt = "[%(asctime)s] %(message)s" basicConfig(format=log_fmt, datefmt=date_fmt, level=log_level) + def parse_args(argv=None): # log levels for console logging level_map = {"DEBUG": DEBUG, "INFO": INFO} parser = ArgumentParser() + parser.add_argument("path", help="Specify a directory to act as wwwroot") parser.add_argument( - "path", - help="Specify a directory to act as wwwroot") - parser.add_argument( - "--log-level", default="INFO", - help="Configure console logging. Options: %s (default: %%(default)s)" % - ", ".join(k for k, v in sorted(level_map.items(), key=lambda x: x[1]))) + "--log-level", + default="INFO", + help="Configure console logging. Options: %s (default: %%(default)s)" + % ", ".join(k for k, v in sorted(level_map.items(), key=lambda x: x[1])), + ) parser.add_argument( - "--port", type=int, - help="Specify a port to bind to (default: random)") + "--port", type=int, help="Specify a port to bind to (default: random)" + ) parser.add_argument( - "--remote", action="store_true", - help="Allow connections from addresses other than 127.0.0.1") + "--remote", + action="store_true", + help="Allow connections from addresses other than 127.0.0.1", + ) parser.add_argument( - "--timeout", type=int, - help="Duration in seconds to serve before exiting. Default run forever.") + "--timeout", + type=int, + help="Duration in seconds to serve before exiting. Default run forever.", + ) args = parser.parse_args(argv) # sanity check if not isdir(args.path): @@ -50,6 +55,7 @@ def parse_args(argv=None): args.log_level = log_level return args + ARGS = parse_args() configure_logging(ARGS.log_level) Sapphire.main(ARGS) diff --git a/sapphire/conftest.py b/sapphire/conftest.py index d42ef5fa..a30993a4 100644 --- a/sapphire/conftest.py +++ b/sapphire/conftest.py @@ -42,8 +42,17 @@ def close(self): self.thread = None self._idle.set() - def launch(self, addr, port, files_to_serve, delay=0, in_order=False, indicate_failure=False, - skip_served=True, throttle=0): + def launch( + self, + addr, + port, + files_to_serve, + delay=0, + in_order=False, + indicate_failure=False, + skip_served=True, + throttle=0, + ): assert self._idle.is_set() assert self.thread is None self._idle.clear() @@ -55,12 +64,25 @@ def launch(self, addr, port, files_to_serve, delay=0, in_order=False, indicate_f "in_order": in_order, "indicate_failure": indicate_failure, "skip_served": skip_served, - "throttle": throttle}) + "throttle": throttle, + }, + ) self.thread.start() - def _handle_request(self, addr, port, files_to_request, delay=0, in_order=False, - indicate_failure=False, skip_served=True, throttle=0): - assert isinstance(files_to_request, list), "files_to_request should be a list" + def _handle_request( + self, + addr, + port, + files_to_request, + delay=0, + in_order=False, + indicate_failure=False, + skip_served=True, + throttle=0, + ): + assert isinstance( + files_to_request, list + ), "files_to_request should be a list" if delay: time.sleep(delay) indexes = list(range(len(files_to_request))) @@ -72,14 +94,16 @@ def _handle_request(self, addr, port, files_to_request, delay=0, in_order=False, # check if the file has been served if skip_served and t_file.code is not None: continue - # if t_file.md5_org is set to anything but None the test client will calculate - # the md5 hash + # if t_file.md5_org is set to anything but None the test client + # will calculate the md5 hash data_hash = hashlib.md5() if t_file.md5_org is not None else None target_url = quote(t_file.url) cli = None try: if t_file.custom_request is None: - cli = urlopen("http://%s:%d/%s" % (addr, port, target_url), timeout=10) + cli = urlopen( + "http://%s:%d/%s" % (addr, port, target_url), timeout=10 + ) resp_code = cli.getcode() content_type = cli.info().get("Content-Type") if resp_code == 200: @@ -91,7 +115,8 @@ def _handle_request(self, addr, port, files_to_request, delay=0, in_order=False, data_hash.update(data) if len(data) < self.rx_size: break - if throttle > 0: # try to simulate a slow connection + if throttle > 0: + # try to simulate a slow connection time.sleep(throttle) if data_hash is not None: data_hash = data_hash.hexdigest() @@ -99,16 +124,24 @@ def _handle_request(self, addr, port, files_to_request, delay=0, in_order=False, sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect((addr, port)) - sock.settimeout(10) # safety, so test doesn't hang on failure + # safety, so test doesn't hang on failure + sock.settimeout(10) sock.sendall(t_file.custom_request) - data = sock.recv(self.rx_size) if t_file.custom_request else b"" + data = ( + sock.recv(self.rx_size) + if t_file.custom_request + else b"" + ) finally: sock.close() content_type = None data_length = len(data) try: - resp_code = int(re.match(r"HTTP/1\.\d\s(?P\d+)\s", - data.decode("ascii")).group("code")) + resp_code = int( + re.match( + r"HTTP/1\.\d\s(?P\d+)\s", data.decode("ascii") + ).group("code") + ) except AttributeError: # set code to zero to help testing resp_code = 0 if indicate_failure else None @@ -138,7 +171,8 @@ def _handle_request(self, addr, port, files_to_request, delay=0, in_order=False, exc_type.__name__, exc_obj, exc_tb.tb_lineno, - t_file.url) + t_file.url, + ) if indicate_failure: if not skip_served or t_file.code is None: t_file.code = 0 diff --git a/sapphire/connection_manager.py b/sapphire/connection_manager.py index edc7316a..f3ca796b 100644 --- a/sapphire/connection_manager.py +++ b/sapphire/connection_manager.py @@ -44,7 +44,8 @@ def close(self): exc_type, exc_obj, exc_tb = self._job.exceptions.get() LOG.error( "Unexpected exception:\n%s", - "".join(format_exception(exc_type, exc_obj, exc_tb))) + "".join(format_exception(exc_type, exc_obj, exc_tb)), + ) # re-raise exception from worker once all workers are closed raise exc_obj @@ -54,7 +55,8 @@ def start(self): listener = Thread( target=self.listener, args=(self._socket, self._job, self._workers), - kwargs={"shutdown_delay": self.SHUTDOWN_DELAY}) + kwargs={"shutdown_delay": self.SHUTDOWN_DELAY}, + ) # launch listener thread and handle thread errors for retry in reversed(range(10)): try: @@ -106,7 +108,9 @@ def listener(serv_sock, serv_job, max_workers, shutdown_delay=0): pool_size += 1 # manage worker pool if pool_size >= max_workers: - LOG.debug("pool size: %d, waiting for worker to finish...", pool_size) + LOG.debug( + "pool size: %d, waiting for worker to finish...", pool_size + ) serv_job.worker_complete.wait() serv_job.worker_complete.clear() # remove complete workers diff --git a/sapphire/core.py b/sapphire/core.py index 337e9a04..b2b7d7a8 100644 --- a/sapphire/core.py +++ b/sapphire/core.py @@ -27,7 +27,9 @@ class Sapphire: __slots__ = ("_auto_close", "_max_workers", "_socket", "_timeout") - def __init__(self, allow_remote=False, auto_close=-1, max_workers=10, port=None, timeout=60): + def __init__( + self, allow_remote=False, auto_close=-1, max_workers=10, port=None, timeout=60 + ): self._auto_close = auto_close # call 'window.close()' on 4xx error pages self._max_workers = max_workers # limit worker threads self._socket = Sapphire._create_listening_socket(allow_remote, port) @@ -126,7 +128,14 @@ def port(self): """ return self._socket.getsockname()[1] - def serve_path(self, path, continue_cb=None, forever=False, optional_files=None, server_map=None): + def serve_path( + self, + path, + continue_cb=None, + forever=False, + optional_files=None, + server_map=None, + ): """Serve files in path. On completion a list served files and a status code will be returned. The status codes include: @@ -153,7 +162,8 @@ def serve_path(self, path, continue_cb=None, forever=False, optional_files=None, auto_close=self._auto_close, forever=forever, optional_files=optional_files, - server_map=server_map) + server_map=server_map, + ) if not job.pending: job.finish() LOG.debug("nothing to serve") @@ -195,12 +205,15 @@ def timeout(self, value): @classmethod def main(cls, args): try: - with cls(allow_remote=args.remote, port=args.port, timeout=args.timeout) as serv: + with cls( + allow_remote=args.remote, port=args.port, timeout=args.timeout + ) as serv: LOG.info( "Serving %r @ http://%s:%d/", abspath(args.path), gethostname() if args.remote else "127.0.0.1", - serv.port) + serv.port, + ) status = serv.serve_path(args.path)[0] if status == SERVED_ALL: LOG.info("All test case content was served") diff --git a/sapphire/job.py b/sapphire/job.py index 9328cab4..84157167 100644 --- a/sapphire/job.py +++ b/sapphire/job.py @@ -24,8 +24,8 @@ # job status codes -SERVED_ALL = 0 # all expected requests for required files have been received -SERVED_NONE = 1 # no requests for required files have been received +SERVED_ALL = 0 # all expected requests for required files have been received +SERVED_NONE = 1 # no requests for required files have been received SERVED_REQUEST = 2 # some requests for required files have been received SERVED_TIMEOUT = 3 # timeout occurred @@ -41,14 +41,31 @@ class Job: ".bmp": "image/bmp", ".ico": "image/x-icon", ".wave": "audio/x-wav", - ".webp": "image/webp" + ".webp": "image/webp", } __slots__ = ( - "_complete", "_pending", "_served", "auto_close", "accepting", "base_path", - "exceptions", "forever", "initial_queue_size", "server_map", "worker_complete") - - def __init__(self, base_path, auto_close=-1, forever=False, optional_files=None, server_map=None): + "_complete", + "_pending", + "_served", + "auto_close", + "accepting", + "base_path", + "exceptions", + "forever", + "initial_queue_size", + "server_map", + "worker_complete", + ) + + def __init__( + self, + base_path, + auto_close=-1, + forever=False, + optional_files=None, + server_map=None, + ): self._complete = Event() self._pending = Tracker(files=set(), lock=Lock()) self._served = Tracker(files=defaultdict(int), lock=Lock()) @@ -76,13 +93,15 @@ def _build_queue(self, optional_files): continue file_path = abspath(file_path) if "?" in file_path: - LOG.warning("Cannot add files with '?' in path. Skipping %r", file_path) + LOG.warning( + "Cannot add files with '?' in path. Skipping %r", file_path + ) continue self._pending.files.add(file_path) LOG.debug("required: %r", location) # if nothing was found check if the path exists if not self._pending.files and not isdir(self.base_path): - raise OSError("%r does not exist" % (self.base_path),) + raise OSError("%r does not exist" % (self.base_path,)) if self.server_map: for redirect, resource in self.server_map.redirect.items(): if resource.required: @@ -91,7 +110,8 @@ def _build_queue(self, optional_files): "%s: %r -> %r", "required" if resource.required else "optional", redirect, - resource.target) + resource.target, + ) for dyn_resp, resource in self.server_map.dynamic.items(): if resource.required: self._pending.files.add(dyn_resp) @@ -122,7 +142,9 @@ def check_request(self, request): if request in self.server_map.dynamic: return self.server_map.dynamic[request] # collect possible include matches - includes = tuple(x for x in self.server_map.include if request.startswith(x)) + includes = tuple( + x for x in self.server_map.include if request.startswith(x) + ) if includes: LOG.debug("potential include matches %r", includes) # attempt to find match @@ -130,12 +152,14 @@ def check_request(self, request): while True: if url in includes: LOG.debug("found include match %r", url) - location = request.split(url, 1)[-1].lstrip("/") if url else request + location = ( + request.split(url, 1)[-1].lstrip("/") if url else request + ) # check location points to something if location: target = pathjoin( - self.server_map.include[url].target, - location) + self.server_map.include[url].target, location + ) # if the mapping url is empty check the file exists if url or isfile(target): mime = self.server_map.include[url].mime @@ -145,7 +169,8 @@ def check_request(self, request): Resource.URL_INCLUDE, normpath(target), mime=mime, - required=self.server_map.include[url].required) + required=self.server_map.include[url].required, + ) if "/" in url: url = url.rsplit("/", 1)[0] elif url: diff --git a/sapphire/server_map.py b/sapphire/server_map.py index 8e50886f..8a61b16a 100644 --- a/sapphire/server_map.py +++ b/sapphire/server_map.py @@ -53,7 +53,9 @@ def _check_url(url): raise InvalidURLError("Only alphanumeric characters accepted in URL.") return url - def set_dynamic_response(self, url, callback, mime_type="application/octet-stream", required=False): + def set_dynamic_response( + self, url, callback, mime_type="application/octet-stream", required=False + ): url = self._check_url(url) if not callable(callback): raise TypeError("callback must be callable") @@ -63,10 +65,8 @@ def set_dynamic_response(self, url, callback, mime_type="application/octet-strea raise MapCollisionError("URL collision on %r" % (url,)) LOG.debug("mapping dynamic response %r -> %r (%r)", url, callback, mime_type) self.dynamic[url] = Resource( - Resource.URL_DYNAMIC, - callback, - mime=mime_type, - required=required) + Resource.URL_DYNAMIC, callback, mime=mime_type, required=required + ) def set_include(self, url, target_path): url = self._check_url(url) @@ -85,14 +85,16 @@ def set_include(self, url, target_path): continue if not relpath(target_path, resource.target).startswith(".."): LOG.error("%r mapping includes path %r", existing_url, target_path) - raise MapCollisionError("%r and %r include %r" % (url, existing_url, target_path)) + raise MapCollisionError( + "%r and %r include %r" % (url, existing_url, target_path) + ) if not relpath(resource.target, target_path).startswith(".."): LOG.error("%r mapping includes path %r", url, resource.target) - raise MapCollisionError("%r and %r include %r" % (url, existing_url, resource.target)) + raise MapCollisionError( + "%r and %r include %r" % (url, existing_url, resource.target) + ) LOG.debug("mapping include %r -> %r", url, target_path) - self.include[url] = Resource( - Resource.URL_INCLUDE, - target_path) + self.include[url] = Resource(Resource.URL_INCLUDE, target_path) def set_redirect(self, url, target, required=True): url = self._check_url(url) @@ -102,7 +104,4 @@ def set_redirect(self, url, target, required=True): raise TypeError("target must not be an empty string") if url in self.dynamic or url in self.include: raise MapCollisionError("URL collision on %r" % (url,)) - self.redirect[url] = Resource( - Resource.URL_REDIRECT, - target, - required=required) + self.redirect[url] = Resource(Resource.URL_REDIRECT, target, required=required) diff --git a/sapphire/test_connection_manager.py b/sapphire/test_connection_manager.py index aca1b6bf..f99d7397 100644 --- a/sapphire/test_connection_manager.py +++ b/sapphire/test_connection_manager.py @@ -30,6 +30,7 @@ def test_connection_manager_01(mocker, tmp_path): assert not job.accepting.is_set() assert job.exceptions.empty() + def test_connection_manager_02(mocker): """test ConnectionManager.start() failure""" mocker.patch("sapphire.connection_manager.sleep", autospec=True) @@ -43,6 +44,7 @@ def test_connection_manager_02(mocker): loadmgr.close() assert job.is_complete() + def test_connection_manager_03(mocker, tmp_path): """test ConnectionManager multiple files and requests""" (tmp_path / "test1").touch() @@ -58,7 +60,8 @@ def test_connection_manager_03(mocker, tmp_path): b"GET /test2 HTTP/1.1", b"GET /test1 HTTP/1.1", b"GET /test1 HTTP/1.1", - b"GET /test3 HTTP/1.1") + b"GET /test3 HTTP/1.1", + ) serv_sock = mocker.Mock(spec=socket) serv_sock.accept.return_value = (clnt_sock, None) assert not job.is_complete() @@ -67,6 +70,7 @@ def test_connection_manager_03(mocker, tmp_path): assert clnt_sock.close.call_count == 8 assert job.is_complete() + def test_connection_manager_04(mocker, tmp_path): """test ConnectionManager.wait()""" (tmp_path / "test1").touch() @@ -88,6 +92,7 @@ def test_connection_manager_04(mocker, tmp_path): with ConnectionManager(job, serv_sock, max_workers=10) as loadmgr: assert not loadmgr.wait(1, continue_cb=lambda: False, poll=0.01) + def test_connection_manager_05(mocker, tmp_path): """test ConnectionManager re-raise worker exceptions""" (tmp_path / "test1").touch() @@ -103,6 +108,7 @@ def test_connection_manager_05(mocker, tmp_path): assert job.is_complete() assert job.exceptions.empty() + def test_connection_manager_06(mocker, tmp_path): """test ConnectionManager re-raise launcher exceptions""" (tmp_path / "test1").touch() diff --git a/sapphire/test_job.py b/sapphire/test_job.py index d60ec9ea..29e28682 100644 --- a/sapphire/test_job.py +++ b/sapphire/test_job.py @@ -30,6 +30,7 @@ def test_job_01(tmp_path): assert not any(job.served) assert job.is_complete() + def test_job_02(tmp_path): """test Job proper handling of required and optional files""" opt1_path = tmp_path / "opt_file_1.txt" @@ -42,8 +43,7 @@ def test_job_02(tmp_path): req2_path = tmp_path / "nested" / "req_file_2.txt" req2_path.write_bytes(b"d") job = Job( - str(tmp_path), - optional_files=[opt1_path.name, "nested/%s" % (opt2_path.name,)] + str(tmp_path), optional_files=[opt1_path.name, "nested/%s" % (opt2_path.name,)] ) assert job.status == SERVED_NONE assert not job.is_complete() @@ -74,6 +74,7 @@ def test_job_02(tmp_path): job.finish() assert job.is_complete() + def test_job_03(tmp_path): """test Job redirects""" smap = ServerMap() @@ -90,6 +91,7 @@ def test_job_03(tmp_path): assert job.remove_pending("two") assert job.pending == 0 + def test_job_04(mocker, tmp_path): """test Job includes""" srv_root = tmp_path / "root" @@ -142,6 +144,7 @@ def test_job_04(mocker, tmp_path): assert not job.is_forbidden(str(srv_root / ".." / "test" / "test_file.txt")) assert not job.is_forbidden(str(srv_include / ".." / "root" / "req_file.txt")) + def test_job_05(tmp_path): """test Job.check_request() with tricky includes""" srv_root = tmp_path / "root" @@ -176,17 +179,18 @@ def test_job_05(tmp_path): assert resource.target == str(file_a) # inc and inc subdir collision # TODO: This can fail. How do we detect or support it? - #smap.include.clear() - #(inc_dir / "c").mkdir() - #inc_c_d = (inc_dir / "c" / "d.bin") - #inc_c_d.write_bytes(b"a") - #inc_d = (inc_dir / "d.bin") - #inc_d.write_bytes(b"a") - #smap.include["c"] = Resource(Resource.URL_INCLUDE, str(inc_dir)) - #smap.include[""] = Resource(Resource.URL_INCLUDE, str(inc_dir / "c")) - #resource = job.check_request("c/d.bin") - #assert resource.type == Resource.URL_INCLUDE - #assert resource.target == str(inc_c_d) + # smap.include.clear() + # (inc_dir / "c").mkdir() + # inc_c_d = (inc_dir / "c" / "d.bin") + # inc_c_d.write_bytes(b"a") + # inc_d = (inc_dir / "d.bin") + # inc_d.write_bytes(b"a") + # smap.include["c"] = Resource(Resource.URL_INCLUDE, str(inc_dir)) + # smap.include[""] = Resource(Resource.URL_INCLUDE, str(inc_dir / "c")) + # resource = job.check_request("c/d.bin") + # assert resource.type == Resource.URL_INCLUDE + # assert resource.target == str(inc_c_d) + def test_job_06(tmp_path): """test Job dynamic""" @@ -206,6 +210,7 @@ def test_job_06(tmp_path): assert callable(resource.target) assert isinstance(resource.mime, str) + def test_job_07(tmp_path): """test accessing forbidden files""" srv_root = tmp_path / "root" @@ -223,8 +228,8 @@ def test_job_07(tmp_path): assert not job.is_forbidden(str(test_1)) assert job.is_forbidden(str(srv_root / "../no_access.txt")) -@pytest.mark.skipif(platform.system() == "Windows", - reason="Unsupported on Windows") + +@pytest.mark.skipif(platform.system() == "Windows", reason="Unsupported on Windows") def test_job_08(tmp_path): """test Job with file names containing invalid characters""" test_file = tmp_path / "test.txt" @@ -235,11 +240,13 @@ def test_job_08(tmp_path): assert job.pending == 1 assert job.check_request("test.txt").target == str(test_file) + def test_job_09(): """test Job with missing directory""" with pytest.raises(OSError): Job("missing") + def test_job_10(tmp_path): """test Job.increment_served() and Job.served""" job = Job(str(tmp_path)) @@ -249,6 +256,7 @@ def test_job_10(tmp_path): job.increment_served("/some/include/path/inc.bin") assert "/some/include/path/inc.bin" in job.served + def test_job_11(): """test Job.lookup_mime()""" assert Job.lookup_mime("unknown") == "application/octet-stream" diff --git a/sapphire/test_sapphire.py b/sapphire/test_sapphire.py index b6931bf9..4a155188 100644 --- a/sapphire/test_sapphire.py +++ b/sapphire/test_sapphire.py @@ -56,11 +56,16 @@ def test_sapphire_00(client, tmp_path): assert test.code == 200 assert test.len_srv == test.len_org + def test_sapphire_01(client, tmp_path): """test requesting multiple files (test cleanup code)""" to_serve = list() for i in range(100): - to_serve.append(_create_test("test_%03d.html" % i, tmp_path, data=os.urandom(5), calc_hash=True)) + to_serve.append( + _create_test( + "test_%03d.html" % i, tmp_path, data=os.urandom(5), calc_hash=True + ) + ) with Sapphire(timeout=30) as serv: client.launch("127.0.0.1", serv.port, to_serve) status, files_served = serv.serve_path(str(tmp_path)) @@ -72,6 +77,7 @@ def test_sapphire_01(client, tmp_path): assert t_file.len_srv == t_file.len_org assert t_file.md5_srv == t_file.md5_org + def test_sapphire_02(client, tmp_path): """test serving optional file""" files_to_serve = list() @@ -88,6 +94,7 @@ def test_sapphire_02(client, tmp_path): assert t_file.code == 200 assert t_file.len_srv == t_file.len_org + def test_sapphire_03(client, tmp_path): """test skipping optional file""" files_to_serve = list() @@ -106,6 +113,7 @@ def test_sapphire_03(client, tmp_path): assert t_file.code == 200 assert t_file.len_srv == t_file.len_org + def test_sapphire_04(client, tmp_path): """test requesting invalid file (404)""" files_to_serve = list() @@ -119,6 +127,7 @@ def test_sapphire_04(client, tmp_path): assert files_to_serve[0].code == 404 assert files_to_serve[1].code == 200 + def test_sapphire_05(client, tmp_path): """test requesting a file outside of the server root (403)""" files_to_serve = list() @@ -127,7 +136,9 @@ def test_sapphire_05(client, tmp_path): # add invalid file files_to_serve.append(_TestFile(os.path.abspath(__file__))) # add file in parent of root_dir - files_to_serve.append(_create_test("no_access.html", tmp_path, data=b"no_access", url_prefix="../")) + files_to_serve.append( + _create_test("no_access.html", tmp_path, data=b"no_access", url_prefix="../") + ) assert (tmp_path / "no_access.html").is_file() # add valid test files_to_serve.append(_create_test("test_case.html", root_dir)) @@ -143,6 +154,7 @@ def test_sapphire_05(client, tmp_path): assert files_to_serve[1].code == 403 assert files_to_serve[2].code == 200 + def test_sapphire_06(client, tmp_path): """test serving no files... this should never happen but...""" with Sapphire(timeout=10) as serv: @@ -151,6 +163,7 @@ def test_sapphire_06(client, tmp_path): assert status == SERVED_NONE assert not files_served + def test_sapphire_07(tmp_path): """test timeout of the server""" with Sapphire(timeout=60) as serv: @@ -167,6 +180,7 @@ def test_sapphire_07(tmp_path): assert status == SERVED_TIMEOUT assert not files_served + def test_sapphire_08(client, tmp_path): """test only serving some files (SERVED_REQUEST)""" cb_status = {"count": 0} @@ -184,6 +198,7 @@ def is_running(): assert status == SERVED_REQUEST assert len(files_served) < len(files_to_serve) + def test_sapphire_09(client, tmp_path): """test serving interesting sized files""" tests = [ @@ -196,7 +211,9 @@ def test_sapphire_09(client, tmp_path): ] for test in tests: test["file"] = _TestFile(test["name"]) - t_data = "".join(random.choice("ABCD1234") for _ in range(test["size"])).encode("ascii") + t_data = "".join(random.choice("ABCD1234") for _ in range(test["size"])).encode( + "ascii" + ) (tmp_path / test["file"].url).write_bytes(t_data) test["file"].md5_org = hashlib.md5(t_data).hexdigest() with Sapphire(timeout=10) as serv: @@ -210,6 +227,7 @@ def test_sapphire_09(client, tmp_path): assert test["file"].len_srv == test["size"] assert test["file"].md5_srv == test["file"].md5_org + def test_sapphire_10(client, tmp_path): """test serving a large (100MB) file""" t_file = _TestFile("test_case.html") @@ -229,9 +247,12 @@ def test_sapphire_10(client, tmp_path): assert t_file.len_srv == (100 * 1024 * 1024) assert t_file.md5_srv == t_file.md5_org + def test_sapphire_11(client, tmp_path): """test serving a binary file""" - t_file = _create_test("test_case.html", tmp_path, data=os.urandom(512), calc_hash=True) + t_file = _create_test( + "test_case.html", tmp_path, data=os.urandom(512), calc_hash=True + ) with Sapphire(timeout=10) as serv: client.launch("127.0.0.1", serv.port, [t_file]) assert serv.serve_path(str(tmp_path))[0] == SERVED_ALL @@ -240,12 +261,14 @@ def test_sapphire_11(client, tmp_path): assert t_file.len_srv == t_file.len_org assert t_file.md5_srv == t_file.md5_org + def test_sapphire_12(): """test requested port is used""" test_port = 0x1337 with Sapphire(port=test_port, timeout=1) as serv: assert test_port == serv.port + def test_sapphire_13(client, tmp_path): """test serving multiple content types""" files_to_serve = list() @@ -266,6 +289,7 @@ def test_sapphire_13(client, tmp_path): assert test.content_type == content_type assert len(content_types) == 2 + def test_sapphire_14(tmp_path): """test callback""" cb_status = {"count": 0} @@ -277,9 +301,12 @@ def _test_callback(): with Sapphire(timeout=10) as serv: _create_test("test_case.html", tmp_path) - assert serv.serve_path(str(tmp_path), continue_cb=_test_callback)[0] == SERVED_NONE + assert ( + serv.serve_path(str(tmp_path), continue_cb=_test_callback)[0] == SERVED_NONE + ) assert cb_status["count"] == 2 + def test_sapphire_15(client, tmp_path): """test calling serve_path multiple times""" with Sapphire(timeout=10) as serv: @@ -293,6 +320,7 @@ def test_sapphire_15(client, tmp_path): assert test.len_srv == test.len_org (tmp_path / test.url).unlink() + def test_sapphire_16(client, tmp_path): """test non required mapped redirects""" smap = ServerMap() @@ -305,13 +333,16 @@ def test_sapphire_16(client, tmp_path): assert test.code == 200 assert test.len_srv == test.len_org + def test_sapphire_17(client, tmp_path): """test required mapped redirects""" smap = ServerMap() with Sapphire(timeout=10) as serv: files_to_serve = list() # redir_target will be requested indirectly via the redirect - redir_target = _create_test("redir_test_case.html", tmp_path, data=b"Redirect DATA!") + redir_target = _create_test( + "redir_test_case.html", tmp_path, data=b"Redirect DATA!" + ) redir_test = _TestFile("redirect_test") smap.set_redirect(redir_test.url, redir_target.url, required=True) files_to_serve.append(redir_test) @@ -327,6 +358,7 @@ def test_sapphire_17(client, tmp_path): assert redir_test.code == 200 assert redir_test.len_srv == redir_target.len_org + def test_sapphire_18(client, tmp_path): """test include directories and permissions""" inc1_path = tmp_path / "inc1" @@ -345,21 +377,27 @@ def test_sapphire_18(client, tmp_path): nest_path = inc1_path / "nested" nest_path.mkdir() # add file in a nested dir in inc1 - nest = _create_test("nested_file.html", nest_path, data=b"blah... .nested", url_prefix="nested/") + nest = _create_test( + "nested_file.html", nest_path, data=b"blah... .nested", url_prefix="nested/" + ) assert nest_path / "nested_file.html" files_to_serve.append(nest) # test 404 in nested dir in inc1 nest_404 = _TestFile("nested/nested_file_404.html") files_to_serve.append(nest_404) # test path mounted somewhere other than / - inc2 = _create_test("included_file2.html", inc2_path, data=b"blah....2", url_prefix="inc_test/") + inc2 = _create_test( + "included_file2.html", inc2_path, data=b"blah....2", url_prefix="inc_test/" + ) files_to_serve.append(inc2) # test 404 in include dir inc404 = _TestFile("inc_test/included_file_404.html") assert not (nest_path / "included_file_404.html").is_file() files_to_serve.append(inc404) # test 403 - inc403 = _create_test("no_access.html", tmp_path, data=b"no_access", url_prefix="inc_test/../") + inc403 = _create_test( + "no_access.html", tmp_path, data=b"no_access", url_prefix="inc_test/../" + ) assert (tmp_path / "no_access.html").is_file() files_to_serve.append(inc403) # test file (used to keep sever job alive) @@ -384,6 +422,7 @@ def test_sapphire_18(client, tmp_path): assert inc404.code == 404 assert inc403.code == 403 + def test_sapphire_19(client, tmp_path): """test dynamic response - not required""" _data = b"dynamic response -- TEST DATA!" @@ -403,6 +442,7 @@ def test_sapphire_19(client, tmp_path): assert test_dr.len_srv == test_dr.len_org assert test_dr.md5_srv == test_dr.md5_org + def test_sapphire_20(client, tmp_path): """test dynamic response - required""" _data = b"dynamic response -- TEST DATA!" @@ -410,7 +450,9 @@ def test_sapphire_20(client, tmp_path): test_dr.len_org = len(_data) test_dr.md5_org = hashlib.md5(_data).hexdigest() smap = ServerMap() - smap.set_dynamic_response("dyn_test", lambda: _data, mime_type="text/plain", required=True) + smap.set_dynamic_response( + "dyn_test", lambda: _data, mime_type="text/plain", required=True + ) with Sapphire(timeout=10) as serv: client.launch("127.0.0.1", serv.port, [test_dr], in_order=True) assert serv.serve_path(str(tmp_path), server_map=smap)[0] == SERVED_ALL @@ -419,6 +461,7 @@ def test_sapphire_20(client, tmp_path): assert test_dr.len_srv == test_dr.len_org assert test_dr.md5_srv == test_dr.md5_org + def test_sapphire_21(client_factory, tmp_path): """test pending_files == 0 in worker thread""" client_defer = client_factory(rx_size=2) @@ -429,7 +472,9 @@ def test_sapphire_21(client_factory, tmp_path): with Sapphire(timeout=10) as serv: # this test needs to wait just long enough to have the required file served # but not too long or the connection will be closed by the server - client_defer.launch("127.0.0.1", serv.port, [test_defer], delay=0.1, indicate_failure=True) + client_defer.launch( + "127.0.0.1", serv.port, [test_defer], delay=0.1, indicate_failure=True + ) client = client_factory(rx_size=2) client.launch("127.0.0.1", serv.port, [test], throttle=0.1) assert serv.serve_path(str(tmp_path), optional_files=optional)[0] == SERVED_ALL @@ -438,6 +483,7 @@ def test_sapphire_21(client_factory, tmp_path): assert test.code == 200 assert test_defer.code == 0 + def test_sapphire_22(client, tmp_path): """test handling an invalid request""" bad_test = _TestFile("bad.html") @@ -451,6 +497,7 @@ def test_sapphire_22(client, tmp_path): assert test.code == 200 assert bad_test.code == 400 + def test_sapphire_23(client, tmp_path): """test handling an empty request""" bad_test = _TestFile("bad.html") @@ -458,12 +505,19 @@ def test_sapphire_23(client, tmp_path): optional = [bad_test.url] test = _create_test("test_case.html", tmp_path) with Sapphire(timeout=10) as serv: - client.launch("127.0.0.1", serv.port, [bad_test, test], indicate_failure=True, in_order=True) + client.launch( + "127.0.0.1", + serv.port, + [bad_test, test], + indicate_failure=True, + in_order=True, + ) assert serv.serve_path(str(tmp_path), optional_files=optional)[0] == SERVED_ALL assert client.wait(timeout=10) assert test.code == 200 assert bad_test.code == 0 + def test_sapphire_24(client_factory, tmp_path): """test requesting multiple files via multiple connections""" to_serve = list() @@ -476,7 +530,9 @@ def test_sapphire_24(client_factory, tmp_path): for _ in range(max_workers): # number of clients to spawn clients.append(client_factory(rx_size=1)) for client in clients: - client.launch("127.0.0.1", serv.port, to_serve, in_order=True, throttle=0.05) + client.launch( + "127.0.0.1", serv.port, to_serve, in_order=True, throttle=0.05 + ) status, files_served = serv.serve_path(str(tmp_path)) # call serv.close() instead of waiting for the clients to timeout serv.close() @@ -490,8 +546,10 @@ def test_sapphire_24(client_factory, tmp_path): assert t_file.code == 200 assert t_file.len_srv == t_file.len_org + def test_sapphire_25(client_factory, tmp_path): """test all request types via multiple connections""" + def _dyn_test_cb(): return b"A" if random.getrandbits(1) else b"AA" @@ -500,7 +558,9 @@ def _dyn_test_cb(): to_serve = list() for i in range(50): # add required files - to_serve.append(_create_test("test_%03d.html" % i, tmp_path, data=b"A" * ((i % 2) + 1))) + to_serve.append( + _create_test("test_%03d.html" % i, tmp_path, data=b"A" * ((i % 2) + 1)) + ) # add a missing files to_serve.append(_TestFile("missing_%03d.html" % i)) # add optional files @@ -510,10 +570,14 @@ def _dyn_test_cb(): # add redirects redir_target = _create_test("redir_%03d.html" % i, tmp_path, data=b"AA") to_serve.append(_TestFile("redir_%03d" % i)) - smap.set_redirect(to_serve[-1].url, redir_target.url, required=random.getrandbits(1) > 0) + smap.set_redirect( + to_serve[-1].url, redir_target.url, required=random.getrandbits(1) > 0 + ) # add dynamic responses to_serve.append(_TestFile("dynm_%03d" % i)) - smap.set_dynamic_response(to_serve[-1].url, _dyn_test_cb, mime_type="text/plain") + smap.set_dynamic_response( + to_serve[-1].url, _dyn_test_cb, mime_type="text/plain" + ) clients = list() for _ in range(100): # number of clients to spawn clients.append(client_factory(rx_size=1)) @@ -521,6 +585,7 @@ def _dyn_test_cb(): clients[-1].launch("127.0.0.1", serv.port, to_serve, throttle=throttle) assert serv.serve_path(str(tmp_path), server_map=smap)[0] == SERVED_ALL + def test_sapphire_26(client, tmp_path): """test dynamic response with bad callbacks""" test_dr = _TestFile("dynm_test") @@ -532,10 +597,13 @@ def test_sapphire_26(client, tmp_path): with pytest.raises(TypeError): serv.serve_path(str(tmp_path), server_map=smap) + def test_sapphire_27(client, tmp_path): """test serving to a slow client""" t_data = "".join(random.choice("ABCD1234") for _ in range(0x19000)) # 100KB - t_file = _create_test("test_case.html", tmp_path, data=t_data.encode("ascii"), calc_hash=True) + t_file = _create_test( + "test_case.html", tmp_path, data=t_data.encode("ascii"), calc_hash=True + ) # rx_size 10KB and throttle to 0.25 sec, which will be ~50KB/s # also taking 2.5 seconds to complete will hopefully find problems # with any assumptions that were made @@ -548,19 +616,25 @@ def test_sapphire_27(client, tmp_path): assert t_file.len_srv == t_file.len_org assert t_file.md5_srv == t_file.md5_org + def test_sapphire_28(client, tmp_path): """test timeout while requesting multiple files""" files_to_serve = list() t_data = "".join(random.choice("ABCD1234") for _ in range(1024)).encode("ascii") for i in range(50): - files_to_serve.append(_create_test("test_case_%03d.html" % i, tmp_path, data=t_data)) + files_to_serve.append( + _create_test("test_case_%03d.html" % i, tmp_path, data=t_data) + ) client.rx_size = 512 with Sapphire(timeout=1) as serv: # minimum timeout is 1 second - client.launch("127.0.0.1", serv.port, files_to_serve, indicate_failure=True, throttle=0.1) + client.launch( + "127.0.0.1", serv.port, files_to_serve, indicate_failure=True, throttle=0.1 + ) status, files_served = serv.serve_path(str(tmp_path)) assert status == SERVED_TIMEOUT assert len(files_served) < len(files_to_serve) + def test_sapphire_29(client_factory, tmp_path): """test Sapphire.serve_path() with forever=True""" clients = list() @@ -571,10 +645,15 @@ def test_sapphire_29(client_factory, tmp_path): clients.append(client_factory()) for client in clients: client.launch("127.0.0.1", serv.port, [test], skip_served=False) + def _test_callback(): with test.lock: return test.requested < 3 - assert serv.serve_path(str(tmp_path), continue_cb=_test_callback, forever=True)[0] == SERVED_ALL + + assert ( + serv.serve_path(str(tmp_path), continue_cb=_test_callback, forever=True)[0] + == SERVED_ALL + ) for client in clients: assert client.wait(timeout=10) client.close() @@ -582,19 +661,22 @@ def _test_callback(): assert test.code == 200 assert test.len_srv == test.len_org + def test_sapphire_30(client, tmp_path): """test interesting file names""" to_serve = [ # space in file name _create_test("test case.html", tmp_path), # non-alphanumeric chars (valid characters to use on filesystem) - _create_test("!@#$%^&(_+-=[]),;'~`{}", tmp_path)] + _create_test("!@#$%^&(_+-=[]),;'~`{}", tmp_path), + ] with Sapphire(timeout=10) as serv: client.launch("127.0.0.1", serv.port, to_serve) assert serv.serve_path(str(tmp_path))[0] == SERVED_ALL assert client.wait(timeout=10) assert all(t_file.code == 200 for t_file in to_serve) + def test_sapphire_31(client, tmp_path): """test interesting path string""" all_bytes = "".join(chr(i) for i in range(256)) @@ -602,13 +684,17 @@ def test_sapphire_31(client, tmp_path): # should not trigger crash _TestFile(all_bytes), # used to keep server running - _create_test("a.html", tmp_path)] + _create_test("a.html", tmp_path), + ] with Sapphire(timeout=10) as serv: client.launch("127.0.0.1", serv.port, to_serve, in_order=True) - assert serv.serve_path(str(tmp_path), optional_files=[all_bytes])[0] == SERVED_ALL + assert ( + serv.serve_path(str(tmp_path), optional_files=[all_bytes])[0] == SERVED_ALL + ) assert client.wait(timeout=10) assert all(t_file.code is not None for t_file in to_serve) + def test_sapphire_32(mocker): """test Sapphire._create_listening_socket()""" fake_sleep = mocker.patch("sapphire.core.sleep", autospec=True) @@ -637,6 +723,7 @@ def test_sapphire_32(mocker): assert fake_sock.return_value.listen.call_count == 1 assert fake_sleep.call_count == 1 + def test_sapphire_33(mocker): """test Sapphire.clear_backlog()""" mocker.patch("sapphire.core.socket", autospec=True) @@ -650,13 +737,10 @@ def test_sapphire_33(mocker): assert serv._socket.settimeout.call_count == 2 assert pending.close.call_count == 1 + def test_main_01(mocker, tmp_path): """test Sapphire.main()""" - args = mocker.Mock( - path=str(tmp_path), - port=4536, - remote=False, - timeout=None) + args = mocker.Mock(path=str(tmp_path), port=4536, remote=False, timeout=None) fake_srv = mocker.patch("sapphire.core.Sapphire.serve_path", autospec=True) fake_srv.return_value = (SERVED_ALL, None) Sapphire.main(args) diff --git a/sapphire/test_server_map.py b/sapphire/test_server_map.py index a25c16c0..5bb5fa9b 100644 --- a/sapphire/test_server_map.py +++ b/sapphire/test_server_map.py @@ -16,10 +16,13 @@ def test_servermap_01(): assert not srv_map.include assert not srv_map.redirect + def test_servermap_02(tmp_path): """test ServerMap dynamic responses""" + def fake_cb(): pass + srv_map = ServerMap() srv_map.set_dynamic_response("url_01", fake_cb, mime_type="test/type") assert len(srv_map.dynamic) == 1 @@ -36,6 +39,7 @@ def fake_cb(): with pytest.raises(MapCollisionError): srv_map.set_redirect("url_01", "test_file") + def test_servermap_03(tmp_path): """test ServerMap includes""" srv_map = ServerMap() @@ -47,12 +51,12 @@ def test_servermap_03(tmp_path): assert "url_01" in srv_map.include assert srv_map.include["url_01"].target == str(tmp_path) # overwrite existing - inc1 = (tmp_path / "includes" / "a") + inc1 = tmp_path / "includes" / "a" inc1.mkdir(parents=True) srv_map.set_include("url_01", str(inc1)) assert srv_map.include["url_01"].target == str(inc1) # add another - inc2 = (tmp_path / "includes" / "b") + inc2 = tmp_path / "includes" / "b" inc2.mkdir() srv_map.set_include("url_02", str(inc2)) assert len(srv_map.include) == 2 @@ -65,11 +69,12 @@ def test_servermap_03(tmp_path): # test overlapping includes with pytest.raises(MapCollisionError, match=r"'url_01' and '\w+' include"): srv_map.set_include("url_01", str(tmp_path)) - inc3 = (tmp_path / "includes" / "b" / "c") + inc3 = tmp_path / "includes" / "b" / "c" inc3.mkdir() with pytest.raises(MapCollisionError, match=r"'url_01' and '\w+' include"): srv_map.set_include("url_01", str(inc3)) + def test_servermap_04(tmp_path): """test ServerMap redirects""" srv_map = ServerMap() @@ -88,6 +93,7 @@ def test_servermap_04(tmp_path): with pytest.raises(MapCollisionError): srv_map.set_dynamic_response("url_01", lambda: 0, mime_type="test/type") + def test_servermap_05(): """test ServerMap._check_url()""" assert ServerMap._check_url("test") == "test" diff --git a/sapphire/test_worker.py b/sapphire/test_worker.py index a910bd6c..feb6ae92 100644 --- a/sapphire/test_worker.py +++ b/sapphire/test_worker.py @@ -34,16 +34,16 @@ def test_worker_01(mocker): assert worker._thread is None assert worker.done + def test_worker_02(mocker): """test simple Worker fails to close""" - worker = Worker( - mocker.Mock(spec=socket.socket), - mocker.Mock(spec=threading.Thread)) + worker = Worker(mocker.Mock(spec=socket.socket), mocker.Mock(spec=threading.Thread)) # it is assumed that launch() has already been called at this point worker._thread.is_alive.return_value = True with pytest.raises(WorkerError, match="Worker thread failed to join!"): worker.close() + def test_worker_03(mocker): """test Worker.launch() fail cases""" serv_con = mocker.Mock(spec=socket.socket) @@ -63,6 +63,7 @@ def test_worker_03(mocker): assert serv_job.accepting.clear.call_count == 0 assert serv_job.accepting.set.call_count == 1 + def test_worker_04(mocker, tmp_path): """test Worker.launch()""" (tmp_path / "testfile").touch() @@ -81,6 +82,7 @@ def test_worker_04(mocker, tmp_path): assert serv_sock.accept.call_count == 1 assert clnt_sock.close.call_count == 2 + def test_worker_05(mocker): """test Worker.handle_request() socket errors""" serv_con = mocker.Mock(spec=socket.socket) @@ -91,17 +93,20 @@ def test_worker_05(mocker): assert serv_con.sendall.call_count == 0 assert serv_con.close.call_count == 1 + def test_response_data_01(): """test _200_header()""" output = Worker._200_header(10, "text/html") assert b"Content-Length: 10" in output assert b"Content-Type: text/html" in output + def test_response_data_02(): """test _307_redirect()""" output = Worker._307_redirect("http://some.test.url") assert b"Location: http://some.test.url" in output + def test_response_data_03(): """test _4xx_page() without close timeout""" output = Worker._4xx_page(400, "Bad Request") @@ -109,6 +114,7 @@ def test_response_data_03(): assert b"HTTP/1.1 400 Bad Request" in output assert b"400!" in output + def test_response_data_04(): """test _4xx_page() with close timeout""" output = Worker._4xx_page(404, "Not Found", close=10) diff --git a/sapphire/worker.py b/sapphire/worker.py index 268b481d..a930e144 100644 --- a/sapphire/worker.py +++ b/sapphire/worker.py @@ -42,18 +42,22 @@ def __init__(self, conn, thread): @staticmethod def _200_header(c_length, c_type, encoding="ascii"): assert c_type is not None - data = "HTTP/1.1 200 OK\r\n" \ - "Cache-Control: max-age=0, no-cache\r\n" \ - "Content-Length: %d\r\n" \ - "Content-Type: %s\r\n" \ - "Connection: close\r\n\r\n" % (c_length, c_type) + data = ( + "HTTP/1.1 200 OK\r\n" + "Cache-Control: max-age=0, no-cache\r\n" + "Content-Length: %d\r\n" + "Content-Type: %s\r\n" + "Connection: close\r\n\r\n" % (c_length, c_type) + ) return data.encode(encoding) @staticmethod def _307_redirect(redirct_to, encoding="ascii"): - data = "HTTP/1.1 307 Temporary Redirect\r\n" \ - "Location: %s\r\n" \ - "Connection: close\r\n\r\n" % (redirct_to,) + data = ( + "HTTP/1.1 307 Temporary Redirect\r\n" + "Location: %s\r\n" + "Connection: close\r\n\r\n" % (redirct_to,) + ) return data.encode(encoding) @staticmethod @@ -61,14 +65,18 @@ def _4xx_page(code, hdr_msg, close=-1, encoding="ascii"): if close < 0: content = "

%d!

" % (code,) else: - content = "\n" \ - "\n" \ - "

%d! - Calling window.close() in %d seconds

\n" \ - "\n" % (close * 1000, code, close) - data = "HTTP/1.1 %d %s\r\n" \ - "Content-Length: %d\r\n" \ - "Content-Type: text/html\r\n" \ - "Connection: close\r\n\r\n%s" % (code, hdr_msg, len(content), content) + content = ( + "\n" + '\n' + "

%d! - Calling window.close() in %d seconds

\n" + "\n" % (close * 1000, code, close) + ) + data = ( + "HTTP/1.1 %d %s\r\n" + "Content-Length: %d\r\n" + "Content-Type: text/html\r\n" + "Connection: close\r\n\r\n%s" % (code, hdr_msg, len(content), content) + ) return data.encode(encoding) def close(self): @@ -102,7 +110,11 @@ def handle_request(cls, conn, serv_job): if request is None: serv_job.accepting.set() conn.sendall(cls._4xx_page(400, "Bad Request", serv_job.auto_close)) - LOG.debug("400 request length %d (%d to go)", len(raw_request), serv_job.pending) + LOG.debug( + "400 request length %d (%d to go)", + len(raw_request), + serv_job.pending, + ) return request = unquote_plus(request.group("request").decode("ascii")) @@ -134,7 +146,9 @@ def handle_request(cls, conn, serv_job): if resource.type in (Resource.URL_FILE, Resource.URL_INCLUDE): LOG.debug("target %r", resource.target) # isfile() check for Resource.URL_FILE happens in serv_job.check_request() - if resource.type == Resource.URL_INCLUDE and not isfile(resource.target): + if resource.type == Resource.URL_INCLUDE and not isfile( + resource.target + ): conn.sendall(cls._4xx_page(404, "Not Found", serv_job.auto_close)) LOG.debug("404 %r (%d to go)", request, serv_job.pending) return @@ -147,7 +161,12 @@ def handle_request(cls, conn, serv_job): return elif resource.type == Resource.URL_REDIRECT: conn.sendall(cls._307_redirect(resource.target)) - LOG.debug("307 %r -> %r (%d to go)", request, resource.target, serv_job.pending) + LOG.debug( + "307 %r -> %r (%d to go)", + request, + resource.target, + serv_job.pending, + ) return elif resource.type == Resource.URL_DYNAMIC: data = resource.target() @@ -156,13 +175,17 @@ def handle_request(cls, conn, serv_job): raise TypeError("dynamic request callback must return 'bytes'") conn.sendall(cls._200_header(len(data), resource.mime)) conn.sendall(data) - LOG.debug("200 %r - dynamic request (%d to go)", request, serv_job.pending) + LOG.debug( + "200 %r - dynamic request (%d to go)", request, serv_job.pending + ) return # at this point we know "resource.target" maps to a file on disk # serve the file data_size = stat(resource.target).st_size - LOG.debug("sending: %s bytes, mime: %r", format(data_size, ","), resource.mime) + LOG.debug( + "sending: %s bytes, mime: %r", format(data_size, ","), resource.mime + ) with open(resource.target, "rb") as in_fp: conn.sendall(cls._200_header(data_size, resource.mime)) offset = 0 diff --git a/tox.ini b/tox.ini index 8c342b20..276e3a91 100644 --- a/tox.ini +++ b/tox.ini @@ -32,7 +32,7 @@ allowlist_externals = bash commands = isort {toxinidir} -# black {toxinidir} + black {toxinidir} # codespell trips over the regex in 'sapphire/worker.py' saying 'sHTTP ==> https' # https://github.com/codespell-project/codespell/issues/1774 # ignoring it is broken so we need to ignore the file From ea532d9042f19b4e525a5ca1542879840bd26ac9 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 18 Mar 2021 15:01:08 -0700 Subject: [PATCH 230/531] Enable flake8 --- grizzly/common/runner.py | 15 +++++++++------ grizzly/common/stack_hasher.py | 7 ++++++- grizzly/common/status_reporter.py | 14 ++++++++------ grizzly/common/test_reporter.py | 6 ++++-- grizzly/reduce/args.py | 4 ++-- grizzly/reduce/core.py | 4 ++-- sapphire/connection_manager.py | 4 ++-- sapphire/core.py | 2 +- sapphire/worker.py | 2 +- tox.ini | 2 +- 10 files changed, 36 insertions(+), 24 deletions(-) diff --git a/grizzly/common/runner.py b/grizzly/common/runner.py index 971c73e7..b8b77c75 100644 --- a/grizzly/common/runner.py +++ b/grizzly/common/runner.py @@ -18,13 +18,16 @@ LOG = getLogger(__name__) -# _IdleChecker is used to help determine if the target is hung (actively using CPU) -# or if it has not made expected the HTTP requests for other reasons (idle). -# This will allow the framework to move on without interrupting execution of -# long running test cases. -# This is not perfect! It is to be used AFTER the test case timeout -# (initial_delay) has elapsed. + class _IdleChecker: + """_IdleChecker is used to help determine if the target is hung (actively using CPU) + or if it has not made expected the HTTP requests for other reasons (idle). + This will allow the framework to move on without interrupting execution of long + running test cases. + This is not perfect! It is to be used AFTER the test case timeout (initial_delay) + has elapsed. + """ + __slots__ = ("_check_cb", "_init_delay", "_poll_delay", "_threshold", "_next_poll") def __init__(self, check_cb, threshold, initial_delay, poll_delay=1): diff --git a/grizzly/common/stack_hasher.py b/grizzly/common/stack_hasher.py index 1374ceac..b7857ae5 100644 --- a/grizzly/common/stack_hasher.py +++ b/grizzly/common/stack_hasher.py @@ -178,7 +178,12 @@ def _parse_rust(cls, input_line): # Don't bother with the file offset stuff atm # m = cls._re_rust_file.match(input_line) if frame is None else None # if m is not None: - # frame = {"function":None, "mode":cls.MODE_RUST, "offset":None, "stack_line":None} + # frame = { + # "function": None, + # "mode": cls.MODE_RUST, + # "offset": None, + # "stack_line": None, + # } # input_line = m.group("line").strip() # if ":" in input_line: # frame["location"], frame["offset"] = input_line.rsplit(":", 1) diff --git a/grizzly/common/status_reporter.py b/grizzly/common/status_reporter.py index 13ebff2b..b986eb71 100644 --- a/grizzly/common/status_reporter.py +++ b/grizzly/common/status_reporter.py @@ -72,13 +72,13 @@ def has_results(self): @classmethod def load(cls, tb_path=None): - """Read Grizzly status reports and create a StatusReporter object + """Read Grizzly status reports and create a StatusReporter object. Args: - tb_path (str): Directory to scan for files containing Python tracebacks + tb_path (str): Directory to scan for files containing Python tracebacks. Returns: - StatusReporter: Contains status reports and traceback reports that were found + StatusReporter: Contains available status reports and traceback reports. """ if tb_path is not None and not os.path.isdir(tb_path): raise OSError("%r is not a directory" % (tb_path,)) @@ -307,10 +307,11 @@ def _tracebacks(path, ignore_kbi=True, max_preceeding=5): Args: path (str): Directory containing log files. ignore_kbi (bool): Do not include KeyboardInterupts in results - max_preceeding (int): Maximum number of lines preceding traceback to include. + max_preceeding (int): Maximum number of lines preceding traceback to + include. Returns: - list: A list of TracebackReports + list: A list of TracebackReports. """ tracebacks = list() for screen_log in StatusReporter._scan(path, re.compile(r"screenlog\.\d+")): @@ -345,7 +346,8 @@ def from_file(cls, input_log, max_preceeding=5): Args: input_log (str): File to parse. - max_preceeding (int): Number of lines to collect leading up to the traceback. + max_preceeding (int): Number of lines to collect leading up to the + traceback. Returns: TracebackReport: Contains data from input_log. diff --git a/grizzly/common/test_reporter.py b/grizzly/common/test_reporter.py index b9400138..81da417c 100644 --- a/grizzly/common/test_reporter.py +++ b/grizzly/common/test_reporter.py @@ -163,10 +163,12 @@ def test_report_07(tmp_path): log_fp.write(b"GPU|||\n") log_fp.write(b"Crash|DUMP_REQUESTED|0x7f57ac9e2e14|0\n") log_fp.write( - b"0|0|foo.so|google_breakpad::ExceptionHandler::WriteMinidump|bar.cc:234|674|0xc\n" + b"0|0|foo.so|google_breakpad::ExceptionHandler::WriteMinidump|" + b"bar.cc:234|674|0xc\n" ) log_fp.write( - b"0|1|foo.so|google_breakpad::ExceptionHandler::WriteMinidump|bar.cc:4a2|645|0x8\n" + b"0|1|foo.so|google_breakpad::ExceptionHandler::WriteMinidump|" + b"bar.cc:4a2|645|0x8\n" ) with (tmp_path / "log_minidump_03.txt").open("wb") as log_fp: log_fp.write(b"GPU|||\n") diff --git a/grizzly/reduce/args.py b/grizzly/reduce/args.py index 15499906..d4b8608b 100644 --- a/grizzly/reduce/args.py +++ b/grizzly/reduce/args.py @@ -108,8 +108,8 @@ def __init__(self): # instead of a local testcase. # This is not possible with the public argparse API. # - # refs: https://stackoverflow.com/questions/32807319/disable-remove-argument-in-argparse - # https://bugs.python.org/issue19462 + # refs: stackoverflow.com/questions/32807319/disable-remove-argument-in-argparse + # bugs.python.org/issue19462 # look up the action for the positional `input` arg action = None diff --git a/grizzly/reduce/core.py b/grizzly/reduce/core.py index 633784e9..31dbeeb9 100644 --- a/grizzly/reduce/core.py +++ b/grizzly/reduce/core.py @@ -251,7 +251,7 @@ def run_reliability_analysis(self, stats): # Don't test without harness if harness found > 50% crashes continue if last_test_only and len(self.testcases) == 1: - # Only set `last_test_only` if we have more than one testcase to begin with + # Only set `last_test_only` if we initially have more than one testcase continue if not use_harness and (not last_test_only and len(self.testcases) > 1): # Can't run without harness if we have more than one testcase @@ -520,7 +520,7 @@ def run(self, repeat=1, min_results=1): and self._signature_desc is None ): self._signature_desc = ( - first_expected.report.crash_info.createShortSignature() + first_expected.report.crash_info.createShortSignature() # noqa: E501 ) served = None if success and not self._any_crash: diff --git a/sapphire/connection_manager.py b/sapphire/connection_manager.py index f3ca796b..daf6dbf1 100644 --- a/sapphire/connection_manager.py +++ b/sapphire/connection_manager.py @@ -115,8 +115,8 @@ def listener(serv_sock, serv_job, max_workers, shutdown_delay=0): serv_job.worker_complete.clear() # remove complete workers LOG.debug("trimming worker pool") - # sometimes the thread that triggered the event doesn't quite cleanup in time - # so add a retry (10x with 0.5 second sleep on failure) + # sometimes the thread that triggered the event doesn't quite + # cleanup in time, so retry (10x with 0.5 second sleep on failure) for _ in range(10): worker_pool = list(w for w in worker_pool if not w.done) pool_size = len(worker_pool) diff --git a/sapphire/core.py b/sapphire/core.py index b2b7d7a8..caf8cb20 100644 --- a/sapphire/core.py +++ b/sapphire/core.py @@ -65,7 +65,7 @@ def _create_listening_socket(cls, remote, port=None, retries=20): sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) sock.settimeout(cls.LISTEN_TIMEOUT) # find an unused port and avoid blocked ports - # see: dxr.mozilla.org/mozilla-central/source/netwerk/base/nsIOService.cpp + # see: searchfox.org/mozilla-central/source/netwerk/base/nsIOService.cpp sock.bind((addr, port or randint(0x2000, 0xFFFF))) sock.listen(5) except OSError as soc_e: diff --git a/sapphire/worker.py b/sapphire/worker.py index a930e144..9fea2d9e 100644 --- a/sapphire/worker.py +++ b/sapphire/worker.py @@ -145,7 +145,7 @@ def handle_request(cls, conn, serv_job): return if resource.type in (Resource.URL_FILE, Resource.URL_INCLUDE): LOG.debug("target %r", resource.target) - # isfile() check for Resource.URL_FILE happens in serv_job.check_request() + # isfile() check for Resource.URL_FILE done by serv_job.check_request() if resource.type == Resource.URL_INCLUDE and not isfile( resource.target ): diff --git a/tox.ini b/tox.ini index 276e3a91..be7ea615 100644 --- a/tox.ini +++ b/tox.ini @@ -40,7 +40,7 @@ commands = # pylint {toxinidir}/grizzly # pylint {toxinidir}/loki # pylint {toxinidir}/sapphire -# flake8 {toxinidir} + flake8 {toxinidir} deps = black codespell From c76693e4d7f182b3a60644721aa88c95d1446e22 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 18 Mar 2021 17:05:27 -0700 Subject: [PATCH 231/531] Enable pylint --- grizzly/adapters/NoOpAdapter/__init__.py | 2 + grizzly/adapters/__init__.py | 2 - grizzly/args.py | 2 +- grizzly/common/reporter.py | 13 ++- grizzly/common/stack_hasher.py | 125 ++++++++++++----------- grizzly/common/storage.py | 8 +- grizzly/common/test_adapter.py | 2 +- grizzly/common/test_reporter.py | 4 +- grizzly/common/test_storage.py | 2 +- grizzly/reduce/core.py | 2 + grizzly/reduce/test_reduce.py | 4 - grizzly/target/target.py | 8 +- grizzly/target/test_target.py | 2 +- grizzly/test_main.py | 2 +- pyproject.toml | 8 +- sapphire/worker.py | 1 + tox.ini | 6 +- 17 files changed, 100 insertions(+), 93 deletions(-) diff --git a/grizzly/adapters/NoOpAdapter/__init__.py b/grizzly/adapters/NoOpAdapter/__init__.py index 271d49d1..21d9049a 100644 --- a/grizzly/adapters/NoOpAdapter/__init__.py +++ b/grizzly/adapters/NoOpAdapter/__init__.py @@ -1,6 +1,8 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. +# pylint: disable=invalid-name + from grizzly.common import Adapter __author__ = "Tyson Smith" diff --git a/grizzly/adapters/__init__.py b/grizzly/adapters/__init__.py index b71ae222..248f3226 100644 --- a/grizzly/adapters/__init__.py +++ b/grizzly/adapters/__init__.py @@ -66,8 +66,6 @@ def load(path=None, skip_failures=True): % (cls.NAME, __adapters__[cls.NAME].__name__, cls.__name__) ) __adapters__[cls.NAME] = cls - else: - LOG.debug("ignored %r", sub) LOG.debug("%d adapters loaded", len(__adapters__)) diff --git a/grizzly/args.py b/grizzly/args.py index cd7a635f..b53830a7 100644 --- a/grizzly/args.py +++ b/grizzly/args.py @@ -159,7 +159,7 @@ def parse_args(self, argv=None): def sanity_check(self, args): if hasattr(super(), "sanity_check"): - super().sanity_check(args) + super().sanity_check(args) # pylint: disable=no-member if "binary" not in self._sanity_skip and not isfile(args.binary): self.parser.error("file not found: %r" % args.binary) diff --git a/grizzly/common/reporter.py b/grizzly/common/reporter.py index 9159c931..e90fe91b 100644 --- a/grizzly/common/reporter.py +++ b/grizzly/common/reporter.py @@ -477,6 +477,7 @@ class FilesystemReporter(Reporter): def __init__(self, report_path, major_bucket=True): self.major_bucket = major_bucket + self.min_space = FilesystemReporter.DISK_SPACE_ABORT assert isinstance(report_path, str) and report_path self.report_path = report_path @@ -507,7 +508,7 @@ def _submit_report(self, report, test_cases): move(report.path, log_path) # avoid filling the disk free_space = disk_usage(log_path).free - if free_space < self.DISK_SPACE_ABORT: + if free_space < self.min_space: raise RuntimeError( "Running low on disk space (%0.1fMB)" % (free_space / 1048576.0,) ) @@ -533,6 +534,7 @@ class FuzzManagerReporter(Reporter): def __init__(self, tool=None): self._extra_metadata = {} self.force_report = False + self.max_reports = FuzzManagerReporter.MAX_REPORTS self.quality = self.QUAL_UNREDUCED self.tool = tool # optional tool name @@ -652,7 +654,7 @@ def _submit_report(self, report, test_cases): raise RuntimeError("Failed to create FM signature") # limit the number of times we report per cycle cache_metadata["_grizzly_seen_count"] += 1 - if cache_metadata["_grizzly_seen_count"] >= self.MAX_REPORTS: + if cache_metadata["_grizzly_seen_count"] >= self.max_reports: # we will still report this one, but no more cache_metadata["frequent"] = True metadata_file = cache_sig_file.replace(".signature", ".metadata") @@ -751,11 +753,12 @@ def _process_rr_trace(self, report): s3_bucket = getenv("GRZ_S3_BUCKET") assert s3_bucket is not None # check for existing minor hash in S3 - s3 = resource("s3") + s3_res = resource("s3") s3_key = "rr-%s.tar.bz2" % (report.minor,) s3_url = "http://%s.s3.amazonaws.com/%s" % (s3_bucket, s3_key) try: - s3.Object(s3_bucket, s3_key).load() # HEAD, doesn't fetch the whole object + # HEAD, doesn't fetch the whole object + s3_res.Object(s3_bucket, s3_key).load() except ClientError as exc: if exc.response["Error"]["Code"] == "404": # The object does not exist. @@ -773,7 +776,7 @@ def _process_rr_trace(self, report): # Upload to S3 rr_arc = self.compress_rr_trace(trace_path, report.path) - s3.meta.client.upload_file( + s3_res.meta.client.upload_file( rr_arc, s3_bucket, s3_key, ExtraArgs={"ACL": "public-read"} ) unlink(rr_arc) diff --git a/grizzly/common/stack_hasher.py b/grizzly/common/stack_hasher.py index b7857ae5..93321a44 100644 --- a/grizzly/common/stack_hasher.py +++ b/grizzly/common/stack_hasher.py @@ -104,18 +104,18 @@ def from_line(cls, input_line, parse_mode=None): def _parse_gdb(cls, input_line): if "#" not in input_line: return None - m = cls._re_gdb.match(input_line) - if m is None: + match = cls._re_gdb.match(input_line) + if match is None: return None - input_line = m.group("line").strip() + input_line = match.group("line").strip() if not input_line: return None - sframe = cls(mode=cls.MODE_GDB, stack_line=m.group("num")) + sframe = cls(mode=cls.MODE_GDB, stack_line=match.group("num")) # sframe.offset = m.group("off") # ignore binary offset for now # find function/method name - m = cls._re_func_name.match(input_line) - if m is not None: - sframe.function = m.group("func") + match = cls._re_func_name.match(input_line) + if match is not None: + sframe.function = match.group("func") # find file name and line number if ") at " in input_line: input_line = input_line.split(") at ")[-1] @@ -163,28 +163,30 @@ def _parse_minidump(cls, input_line): def _parse_rr(cls, input_line): if "rr(" not in input_line: return None - m = cls._re_rr.match(input_line) - if m is None: + match = cls._re_rr.match(input_line) + if match is None: return None - return cls(location=m.group("loc"), mode=cls.MODE_RR, offset=m.group("off")) + return cls( + location=match.group("loc"), mode=cls.MODE_RR, offset=match.group("off") + ) @classmethod def _parse_rust(cls, input_line): - m = cls._re_rust_frame.match(input_line) - if m is None: + match = cls._re_rust_frame.match(input_line) + if match is None: return None - sframe = cls(mode=cls.MODE_RUST, stack_line=m.group("num")) - sframe.function = m.group("line").strip().rsplit("::h", 1)[0] + sframe = cls(mode=cls.MODE_RUST, stack_line=match.group("num")) + sframe.function = match.group("line").strip().rsplit("::h", 1)[0] # Don't bother with the file offset stuff atm - # m = cls._re_rust_file.match(input_line) if frame is None else None - # if m is not None: + # match = cls._re_rust_file.match(input_line) if frame is None else None + # if match is not None: # frame = { # "function": None, # "mode": cls.MODE_RUST, # "offset": None, # "stack_line": None, # } - # input_line = m.group("line").strip() + # input_line = match.group("line").strip() # if ":" in input_line: # frame["location"], frame["offset"] = input_line.rsplit(":", 1) # else: @@ -195,17 +197,17 @@ def _parse_rust(cls, input_line): def _parse_sanitizer(cls, input_line): if "#" not in input_line: return None - m = cls._re_sanitizer.match(input_line) - if m is None: + match = cls._re_sanitizer.match(input_line) + if match is None: return None - sframe = cls(mode=cls.MODE_SANITIZER, stack_line=m.group("num")) - input_line = m.group("line") + sframe = cls(mode=cls.MODE_SANITIZER, stack_line=match.group("num")) + input_line = match.group("line") # check if line is symbolized - if m.group("in"): + if match.group("in"): # find function/method name - m = cls._re_func_name.match(input_line) - if m is not None: - sframe.function = m.group("func") + match = cls._re_func_name.match(input_line) + if match is not None: + sframe.function = match.group("func") if input_line.startswith("("): input_line = input_line.strip("()") # find location (file name or module) and offset (line # or offset) @@ -221,11 +223,11 @@ def _parse_sanitizer(cls, input_line): def _parse_tsan(cls, input_line): if "#" not in input_line: return None - m = cls._re_tsan.match(input_line) - if m is None: + match = cls._re_tsan.match(input_line) + if match is None: return None - sframe = cls(mode=cls.MODE_TSAN, stack_line=m.group("num")) - input_line = m.group("line") + sframe = cls(mode=cls.MODE_TSAN, stack_line=match.group("num")) + input_line = match.group("line") location = basename(input_line) # try to parse file name and line number if location: @@ -236,13 +238,13 @@ def _parse_tsan(cls, input_line): sframe.offset = location.pop(0) # use module name if file name cannot be found if not sframe.location: - sframe.location = m.group("mod") + sframe.location = match.group("mod") # use module offset if line number cannot be found if not sframe.offset: - sframe.offset = m.group("off") - m = cls._re_func_name.match(input_line) - if m is not None: - function = m.group("func") + sframe.offset = match.group("off") + match = cls._re_func_name.match(input_line) + if match is not None: + function = match.group("func") if function and function != "": sframe.function = function return sframe @@ -251,15 +253,15 @@ def _parse_tsan(cls, input_line): def _parse_valgrind(cls, input_line): if "== " not in input_line: return None - m = cls._re_valgrind.match(input_line) - if m is None: + match = cls._re_valgrind.match(input_line) + if match is None: return None - input_line = m.group("line") + input_line = match.group("line") if input_line is None: # pragma: no cover # this should not happen LOG.warning("failure in _parse_valgrind()") return None - sframe = cls(function=m.group("func"), mode=cls.MODE_VALGRIND) + sframe = cls(function=match.group("func"), mode=cls.MODE_VALGRIND) try: location, sframe.offset = input_line.split(":") sframe.location = location.strip() @@ -291,7 +293,7 @@ def __str__(self): def _calculate_hash(self, major=False): if not self.frames or (major and self._major_depth < 1): return None - h = sha1() + shash = sha1() if self._height_limit is None: offset = 0 else: @@ -300,16 +302,16 @@ def _calculate_hash(self, major=False): if major and depth > self._major_depth: break if frame.location is not None: - h.update(frame.location.encode("utf-8", errors="ignore")) + shash.update(frame.location.encode("utf-8", errors="ignore")) if frame.function is not None: - h.update(frame.function.encode("utf-8", errors="ignore")) + shash.update(frame.function.encode("utf-8", errors="ignore")) if major and depth > 1: # only add the offset from the top frame when calculating # the major hash and skip the rest continue if frame.offset is not None: - h.update(frame.offset.encode("utf-8", errors="ignore")) - return h.hexdigest() + shash.update(frame.offset.encode("utf-8", errors="ignore")) + return shash.hexdigest() def from_file(self, file_name): # pragma: no cover raise NotImplementedError() # TODO @@ -406,24 +408,25 @@ def minor(self): from argparse import ArgumentParser from os import getenv # pylint: disable=ungrouped-imports - parser = ArgumentParser() - parser.add_argument("input", help="") - args = parser.parse_args() - # set output verbosity if getenv("DEBUG"): - log_level = DEBUG - log_fmt = "[%(levelname).1s] %(message)s" + basicConfig( + format="[%(levelname).1s] %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + level=DEBUG, + ) else: - log_level = INFO - log_fmt = "%(message)s" - basicConfig(format=log_fmt, datefmt="%Y-%m-%d %H:%M:%S", level=log_level) - - with open(args.input, "rb") as fp: - stack = Stack.from_text(fp.read().decode("utf-8", errors="ignore")) - - for frame in stack.frames: - LOG.info(frame) - LOG.info("Minor: %s", stack.minor) - LOG.info("Major: %s", stack.major) - LOG.info("Frames: %d", len(stack.frames)) + basicConfig(format="%(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=INFO) + + def main(args): + with open(args.input, "rb") as in_fp: + stack = Stack.from_text(in_fp.read().decode("utf-8", errors="ignore")) + for frame in stack.frames: + LOG.info(frame) + LOG.info("Minor: %s", stack.minor) + LOG.info("Major: %s", stack.major) + LOG.info("Frames: %d", len(stack.frames)) + + parser = ArgumentParser() + parser.add_argument("input", help="File to scan for stack trace") + main(parser.parse_args()) diff --git a/grizzly/common/storage.py b/grizzly/common/storage.py index d924a453..8549bee7 100644 --- a/grizzly/common/storage.py +++ b/grizzly/common/storage.py @@ -562,8 +562,8 @@ def clone(self): cloned = type(self)(self._file_name) self._fp.seek(0) copyfileobj( - self._fp, cloned._fp, self.XFER_BUF - ) # pylint: disable=protected-access + self._fp, cloned._fp, self.XFER_BUF # pylint: disable=protected-access + ) return cloned def close(self): @@ -651,8 +651,8 @@ def from_file(cls, input_file, file_name=None): t_file = cls(file_name) with open(input_file, "rb") as src_fp: copyfileobj( - src_fp, t_file._fp, cls.XFER_BUF - ) # pylint: disable=protected-access + src_fp, t_file._fp, cls.XFER_BUF # pylint: disable=protected-access + ) return t_file @property diff --git a/grizzly/common/test_adapter.py b/grizzly/common/test_adapter.py index 3055f25c..46e78814 100644 --- a/grizzly/common/test_adapter.py +++ b/grizzly/common/test_adapter.py @@ -47,7 +47,7 @@ def test_adapter_03(tmp_path): harness_file = tmp_path / "harness.html" test_data = b"default_harness_data" harness_file.write_bytes(test_data) - adpt.HARNESS_FILE = str(harness_file) + adpt.HARNESS_FILE = str(harness_file) # pylint: disable=invalid-name adpt.enable_harness() assert adpt.get_harness() == test_data # external harness diff --git a/grizzly/common/test_reporter.py b/grizzly/common/test_reporter.py index 81da417c..58a4ad22 100644 --- a/grizzly/common/test_reporter.py +++ b/grizzly/common/test_reporter.py @@ -396,7 +396,7 @@ def test_filesystem_reporter_03(tmp_path): (log_path / "log_stderr.txt").write_bytes(b"STDERR log") (log_path / "log_stdout.txt").write_bytes(b"STDOUT log") reporter = FilesystemReporter(str(tmp_path / "reports")) - reporter.DISK_SPACE_ABORT = 2 ** 50 + reporter.min_space = 2 ** 50 with pytest.raises(RuntimeError, match="Running low on disk space"): reporter.submit([], Report(str(log_path), "fake_bin")) @@ -493,7 +493,7 @@ def test_fuzzmanager_reporter_03(mocker, tmp_path): (log_path / "log_stderr.txt").touch() (log_path / "log_stdout.txt").touch() reporter = FuzzManagerReporter("fake_bin") - reporter.MAX_REPORTS = 1 + reporter.max_reports = 1 reporter.submit([], Report(str(log_path), "fake_bin")) assert fake_collector.return_value.submit.call_count == 1 meta_data = (tmp_path / "fm_file.metadata").read_text() diff --git a/grizzly/common/test_storage.py b/grizzly/common/test_storage.py index 2fe40977..2868275d 100644 --- a/grizzly/common/test_storage.py +++ b/grizzly/common/test_storage.py @@ -259,7 +259,7 @@ def test_testcase_10(tmp_path): loaded = TestCase.load_single(str(tmp_path), False) try: for prop in TestCase.__slots__: - if prop.startswith("_") or "redirect_page": + if prop.startswith("_") or prop == "redirect_page": continue assert getattr(loaded, prop) == getattr(org, prop) assert org._existing_paths == loaded._existing_paths diff --git a/grizzly/reduce/core.py b/grizzly/reduce/core.py index 31dbeeb9..6e83da7d 100644 --- a/grizzly/reduce/core.py +++ b/grizzly/reduce/core.py @@ -520,6 +520,7 @@ def run(self, repeat=1, min_results=1): and self._signature_desc is None ): self._signature_desc = ( + # pylint: disable=line-too-long first_expected.report.crash_info.createShortSignature() # noqa: E501 ) served = None @@ -700,6 +701,7 @@ def main(cls, args): Returns: int: 0 for success. non-0 indicates a problem. """ + # pylint: disable=too-many-return-statements configure_logging(args.log_level) setlocale(LC_ALL, "") if args.fuzzmanager: diff --git a/grizzly/reduce/test_reduce.py b/grizzly/reduce/test_reduce.py index 4da90b0c..dd9928eb 100644 --- a/grizzly/reduce/test_reduce.py +++ b/grizzly/reduce/test_reduce.py @@ -914,10 +914,6 @@ def replay_run(_testcases, _time_limit, **_): idle_delay=idle_input, static_timeout=static_timeout, ) - mgr.IDLE_DELAY_MIN = 10 - mgr.IDLE_DELAY_DURATION_MULTIPLIER = 1.5 - mgr.ITER_TIMEOUT_MIN = 10 - mgr.ITER_TIMEOUT_DURATION_MULTIPLIER = 2 if isinstance(result, type) and issubclass(result, BaseException): with raises(result): mgr.run() diff --git a/grizzly/target/target.py b/grizzly/target/target.py index 50ec0576..e1d20107 100644 --- a/grizzly/target/target.py +++ b/grizzly/target/target.py @@ -86,7 +86,7 @@ def __enter__(self): def __exit__(self, *exc): self.cleanup() - def add_abort_token(self, token): # pylint: disable=no-self-use,unused-argument + def add_abort_token(self, _token): # pylint: disable=no-self-use LOG.warning("add_abort_token() not implemented!") @abstractmethod @@ -109,7 +109,7 @@ def create_report(self, is_hang=False): def detect_failure(self, ignored): pass - def dump_coverage(self): # pylint: disable=no-self-use + def dump_coverage(self, _timeout=0): # pylint: disable=no-self-use LOG.warning("dump_coverage() is not supported!") @abstractmethod @@ -117,12 +117,12 @@ def handle_hang(self, ignore_idle=True): pass # TODO: move to monitor? - def is_idle(self, threshold): # pylint: disable=no-self-use,unused-argument + def is_idle(self, _threshold): # pylint: disable=no-self-use LOG.debug("Target.is_idle() not implemented! returning False") return False @abstractmethod - def launch(self): + def launch(self, _location, _env_mod=None): pass def log_size(self): # pylint: disable=no-self-use diff --git a/grizzly/target/test_target.py b/grizzly/target/test_target.py index bfd39b22..572b78da 100644 --- a/grizzly/target/test_target.py +++ b/grizzly/target/test_target.py @@ -26,7 +26,7 @@ def detect_failure(self, ignored): def handle_hang(self, ignore_idle=True): pass - def launch(self): + def launch(self, _location, _env_mod=None): pass @property diff --git a/grizzly/test_main.py b/grizzly/test_main.py index 198136e8..70de6665 100644 --- a/grizzly/test_main.py +++ b/grizzly/test_main.py @@ -31,7 +31,7 @@ def __init__(self): self.memory = 0 self.platform = "fake-target" self.prefs = None - self.rr = False + self.rr = False # pylint: disable=invalid-name self.relaunch = 1000 self.s3_fuzzmanager = False self.time_limit = None diff --git a/pyproject.toml b/pyproject.toml index 6a2166d2..e8c92190 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,11 +28,13 @@ max-line-length = 88 [tool.pylint.messages_control] disable = [ - "C0330", - "C0326", - "bad-continuation", + "duplicate-code", "fixme", "import-error", + # need to finish adding docs... + "missing-class-docstring", + "missing-function-docstring", + "missing-module-docstring", "subprocess-run-check", "too-few-public-methods", "too-many-arguments", diff --git a/sapphire/worker.py b/sapphire/worker.py index 9fea2d9e..d221bf57 100644 --- a/sapphire/worker.py +++ b/sapphire/worker.py @@ -97,6 +97,7 @@ def done(self): @classmethod def handle_request(cls, conn, serv_job): + # pylint: disable=too-many-return-statements finish_job = False # call finish() on return try: # receive all the incoming data diff --git a/tox.ini b/tox.ini index be7ea615..3557d1f7 100644 --- a/tox.ini +++ b/tox.ini @@ -37,9 +37,9 @@ commands = # https://github.com/codespell-project/codespell/issues/1774 # ignoring it is broken so we need to ignore the file codespell --skip=".git,.tox,htmlcov,results,./sapphire/worker.py" {toxinidir} -# pylint {toxinidir}/grizzly -# pylint {toxinidir}/loki -# pylint {toxinidir}/sapphire + pylint {toxinidir}/grizzly + pylint {toxinidir}/loki + pylint {toxinidir}/sapphire flake8 {toxinidir} deps = black From 548860af251099666ce706e5790d880f3557ef1f Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 18 Mar 2021 17:18:03 -0700 Subject: [PATCH 232/531] Add pre-commit config --- .pre-commit-config.yaml | 52 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..2489c387 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,52 @@ +repos: + - repo: https://github.com/pycqa/isort + rev: 5.7.0 + hooks: + - id: isort + - repo: https://github.com/asottile/yesqa + rev: v1.2.2 + hooks: + - id: yesqa + - repo: https://github.com/ambv/black + rev: 20.8b1 + hooks: + - id: black + - repo: https://github.com/pycqa/pylint + rev: pylint-2.7.2 + hooks: + - id: pylint + - repo: https://gitlab.com/pycqa/flake8 + rev: 3.9.0 + hooks: + - id: flake8 + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.4.0 + hooks: + - id: check-ast + - id: check-docstring-first + - id: check-executables-have-shebangs + - id: check-merge-conflict + - id: check-symlinks + - id: debug-statements + - id: trailing-whitespace + - id: check-yaml + - id: mixed-line-ending + - id: name-tests-test + args: ['--django'] + - id: check-json + - id: requirements-txt-fixer + - repo: https://github.com/codespell-project/codespell + rev: v2.0.0 + hooks: + - id: codespell + exclude_types: [json] + - repo: https://github.com/marco-c/taskcluster_yml_validator + rev: v0.0.7 + hooks: + - id: taskcluster_yml + - repo: meta + hooks: + - id: check-useless-excludes + +default_language_version: + python: python3 From 827f36ed0c622b24f96bec476ad3078030d3cd60 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Fri, 19 Mar 2021 13:17:09 -0700 Subject: [PATCH 233/531] [ci] Switch to TaskCluster --- .taskcluster.yml | 174 +++++++++++++++++++++++++++++++++++++++++++++++ .travis.yml | 37 ---------- README.md | 2 +- 3 files changed, 175 insertions(+), 38 deletions(-) create mode 100644 .taskcluster.yml delete mode 100644 .travis.yml diff --git a/.taskcluster.yml b/.taskcluster.yml new file mode 100644 index 00000000..aa953a56 --- /dev/null +++ b/.taskcluster.yml @@ -0,0 +1,174 @@ +version: 1 +policy: + pullRequests: collaborators +tasks: + $let: + user: ${event.sender.login} + + fetch_rev: + $if: 'tasks_for == "github-pull-request"' + then: ${event.pull_request.head.sha} + else: + $if: 'tasks_for == "github-push"' + then: ${event.after} + else: ${event.release.tag_name} + + fetch_ref: + $if: 'tasks_for == "github-pull-request"' + then: ${event.pull_request.head.sha} + else: + $if: 'tasks_for == "github-push"' + then: ${event.after} + else: "refs/tags/${event.release.tag_name}:refs/tags/${event.release.tag_name}" + + http_repo: + $if: 'tasks_for == "github-pull-request"' + then: ${event.pull_request.base.repo.clone_url} + else: ${event.repository.clone_url} + + codecov_secret: + codecov-grizzly + + pypi_secret: + pypi-grizzly + + project_name: + Grizzly + + in: + $if: 'tasks_for in ["github-push", "github-release"] || (tasks_for == "github-pull-request" && event["action"] in ["opened", "reopened", "synchronize"])' + then: + $flatten: + - $map: [] + #- {msys: 'NmOU83KwRJGjk-btMbOOPA', toxenv: 'py38', name: 'tests python 3.8'} + each(build): + taskId: {$eval: as_slugid(build.toxenv + '-win')} + provisionerId: proj-fuzzing + workerType: ci-windows + created: {$fromNow: ''} + deadline: {$fromNow: '1 hour'} + scopes: + - secrets:get:project/fuzzing/${codecov_secret} + dependencies: + - ${build.msys} + payload: + env: + MSYSTEM: MINGW64 + TOXENV: ${build.toxenv} + CODECOV_SECRET: ${codecov_secret} + FETCH_REF: ${fetch_ref} + FETCH_REV: ${fetch_rev} + CLONE_REPO: ${http_repo} + mounts: + - format: tar.bz2 + content: + taskId: ${build.msys} + artifact: public/msys2.tar.bz2 + directory: . + command: + - "set HOME=%CD%" + - "set ARTIFACTS=%CD%" + - "set PATH=%CD%\\msys64\\MINGW64\\bin;%PATH%" + - "set PATH=%CD%\\msys64\\usr\\bin;%PATH%" + - >- + bash -x -e -c " + . py-ci.sh; + clone; + tox; + tox_codecov;" + features: + taskclusterProxy: true + maxRunTime: 900 + metadata: + name: ${project_name} ${build.name} (windows) + description: ${project_name} ${build.name} (windows) + owner: '${user}@users.noreply.github.com' + source: ${http_repo}/raw/${fetch_rev}/.taskcluster.yml + - $map: + - {image: ci-py-36, toxenv: 'py36', name: 'tests python 3.6'} + - {image: ci-py-37, toxenv: 'py37', name: 'tests python 3.7'} + - {image: ci-py-38, toxenv: 'py38', name: 'tests python 3.8'} + - {image: ci-py-39, toxenv: 'py39', name: 'tests python 3.9'} + - {image: ci-py-39, toxenv: 'lint', name: 'lint'} + each(build): + taskId: {$eval: as_slugid(build.toxenv)} + provisionerId: proj-fuzzing + workerType: ci + created: {$fromNow: ''} + deadline: {$fromNow: '1 hour'} + scopes: + - secrets:get:project/fuzzing/${codecov_secret} + payload: + maxRunTime: 900 + image: + type: indexed-image + path: public/${build.image}.tar.zst + namespace: project.fuzzing.orion.${build.image}.master + env: + TOXENV: ${build.toxenv} + CODECOV_SECRET: ${codecov_secret} + FETCH_REF: ${fetch_ref} + FETCH_REV: ${fetch_rev} + CLONE_REPO: ${http_repo} + features: + taskclusterProxy: true + command: + - /bin/bash + - '--login' + - '-x' + - '-e' + - '-c' + - >- + . py-ci.sh; + clone; + tox; + if [[ "${build.toxenv}" != "lint" ]]; then tox_codecov; fi; + metadata: + name: ${project_name} ${build.name} + description: ${project_name} ${build.name} + owner: '${user}@users.noreply.github.com' + source: ${http_repo}/raw/${fetch_rev}/.taskcluster.yml + - $if: 'tasks_for in ["github-release"]' + then: + - provisionerId: proj-fuzzing + workerType: ci + dependencies: + - {$eval: as_slugid("py36")} + - {$eval: as_slugid("py37")} + - {$eval: as_slugid("py38")} + #- {$eval: as_slugid("py38-win")} + - {$eval: as_slugid("py39")} + - {$eval: as_slugid("lint")} + created: {$fromNow: ''} + deadline: {$fromNow: '1 hour'} + scopes: + - secrets:get:project/fuzzing/${pypi_secret} + payload: + maxRunTime: 900 + image: + type: indexed-image + path: public/ci-py-38.tar.zst + namespace: project.fuzzing.orion.ci-py-38.master + features: + taskclusterProxy: true + env: + TOXENV: pypi + FETCH_REF: ${fetch_ref} + FETCH_REV: ${fetch_rev} + CLONE_REPO: ${http_repo} + PYPI_SECRET: ${pypi_secret} + command: + - /bin/bash + - '--login' + - '-x' + - '-e' + - '-c' + - >- + . py-ci.sh; + clone; + tox_pypi; + metadata: + name: ${project_name} PyPI upload + description: ${project_name} PyPI upload + owner: '${user}@users.noreply.github.com' + source: ${http_repo}/raw/${fetch_rev}/.taskcluster.yml diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 0980405f..00000000 --- a/.travis.yml +++ /dev/null @@ -1,37 +0,0 @@ -dist: bionic -cache: pip -language: python -os: linux -python: - - 3.6 - - 3.7 - - 3.8 - - 3.9 -jobs: - include: - - os: windows - language: shell - before_install: - - choco install python --version 3.9 - env: PATH=/c/Python39:/c/Python39/Scripts:$PATH -before_install: - - pip3 install --upgrade setuptools pip -install: - - pip3 install -e .[all] -before_script: - - pip3 install --upgrade pytest pytest-mock pytest-pylint pytest-cov codecov -script: - - pytest -after_success: - - codecov -deploy: - provider: pypi - distributions: sdist bdist_wheel - username: mozillasecurity - password: - secure: V3f+OpShnYZdxHSB8i6OwUMbFeqQ8NnfmYzm8s/YH4awSbn5TqjL1wQn7cqWR7trIUgUzp+RGafHy5agk7pTUM6Pj2tLb3w336c6KJmeub0vfifRFZJySjXTFN09DX64J2FD4KeiTIyOUemgQSU0jxCpwJXQrgdInHC3EEolE0loAydVdKWwYy6W5u+BPkY64zTHsEconDmc6TMqpKTI5UN4Iy/FPKTtJ4ifHUTv0uowI5JxbjgxYpzXmqD9XgtHo8k5jwPsu0QS7LuPNnDUN8sCFQ5WPJtnkE9SLfrNkCYnSzFrxJIfKbJBDZRjNp9v2Uz2jwCvvTyGKLQeVmVacLB4IwmT0ENXiDk7wtqVEO9lPXlvRVVt0w3xV5tOV8jfNcW8dookuZSzGtZ2sxWYwcro7uzNc36le6suhmJTq6w0uoj9ubcHnrbY2A181roAWPEtyCBf25lpM+C8jGmCgXEci9LhCZH79jrSgVds4jiXd+rXMo/qU86ye/6j/o7l5fnJmw/Or7dA3JsHIvYgBy9Ryh52uhCeggIcgFohgmhcRyLD5zkdLfzqNl+kY0nI12qeSAmceP8JCiUc0LX9/3e6B4X/knJ4s/ArbF5X/V4GzorE6tNxcpfwTMh4QnzraQgSAt/JhJz1QeYMMKJ0SVzAkGePNqDCWXHFGFoS+FE= - on: - branch: master - tags: true - os: linux - python: 3.8 diff --git a/README.md b/README.md index 241482a8..c6c74dbe 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ Grizzly ======= -[![Build Status](https://travis-ci.com/MozillaSecurity/grizzly.svg?branch=master)](https://travis-ci.com/MozillaSecurity/grizzly) +[![Task Status](https://community-tc.services.mozilla.com/api/github/v1/repository/MozillaSecurity/grizzly/master/badge.svg)](https://community-tc.services.mozilla.com/api/github/v1/repository/MozillaSecurity/grizzly/master/latest) [![codecov](https://codecov.io/gh/MozillaSecurity/grizzly/branch/master/graph/badge.svg)](https://codecov.io/gh/MozillaSecurity/grizzly) [![Matrix](https://img.shields.io/badge/dynamic/json?color=green&label=chat&query=%24.chunk[%3F(%40.canonical_alias%3D%3D%22%23fuzzing%3Amozilla.org%22)].num_joined_members&suffix=%20users&url=https%3A%2F%2Fmozilla.modular.im%2F_matrix%2Fclient%2Fr0%2FpublicRooms&style=flat&logo=matrix)](https://riot.im/app/#/room/#fuzzing:mozilla.org) [![PyPI](https://img.shields.io/pypi/v/grizzly-framework)](https://pypi.org/project/grizzly-framework) From 495e5cd525faf84890cf3bd4705e1fa29edf2e18 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Fri, 19 Mar 2021 13:59:30 -0700 Subject: [PATCH 234/531] [ci] Add missing deps in tox --- setup.cfg | 5 ++--- tox.ini | 2 ++ 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/setup.cfg b/setup.cfg index a0eb7d35..9a61d72b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -20,9 +20,11 @@ url = https://github.com/MozillaSecurity/grizzly [options] include_package_data = True install_requires = + cssbeautifier fasteners ffpuppet FuzzManager + jsbeautifier lithium-reducer >= 0.5 prefpicker psutil >= 4.4.0 @@ -60,8 +62,5 @@ grizzly_reduce_strategies = dev = pre-commit tox -reduce = - cssbeautifier - jsbeautifier s3 = boto3 diff --git a/tox.ini b/tox.ini index 3557d1f7..fa4cb42e 100644 --- a/tox.ini +++ b/tox.ini @@ -9,6 +9,8 @@ deps = pytest pytest-cov pytest-mock +extras = + s3 passenv = BUILD_CACHE CI From 901966d1f4e62e5d5ac1af92b5cb89e24c37df62 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Mon, 22 Mar 2021 14:38:14 -0700 Subject: [PATCH 235/531] [tests] Add ServerMap test coverage --- sapphire/test_server_map.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/sapphire/test_server_map.py b/sapphire/test_server_map.py index 5bb5fa9b..f7c9e517 100644 --- a/sapphire/test_server_map.py +++ b/sapphire/test_server_map.py @@ -2,7 +2,6 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -# pylint: disable=protected-access import pytest @@ -19,21 +18,22 @@ def test_servermap_01(): def test_servermap_02(tmp_path): """test ServerMap dynamic responses""" - - def fake_cb(): - pass - srv_map = ServerMap() - srv_map.set_dynamic_response("url_01", fake_cb, mime_type="test/type") + srv_map.set_dynamic_response("url_01", lambda: 0, mime_type="test/type") assert len(srv_map.dynamic) == 1 assert "url_01" in srv_map.dynamic assert srv_map.dynamic["url_01"].mime == "test/type" assert callable(srv_map.dynamic["url_01"].target) assert srv_map.dynamic["url_01"].type == Resource.URL_DYNAMIC - srv_map.set_dynamic_response("url_02", fake_cb, mime_type="foo") + srv_map.set_dynamic_response("url_02", lambda: 0, mime_type="foo") assert len(srv_map.dynamic) == 2 assert not srv_map.include assert not srv_map.redirect + with pytest.raises(TypeError, match="callback must be callable"): + srv_map.set_dynamic_response("x", None) + with pytest.raises(TypeError, match="mime_type must be of type 'str'"): + srv_map.set_dynamic_response("x", lambda: 0, None) + # test detecting collisions with pytest.raises(MapCollisionError): srv_map.set_include("url_01", str(tmp_path)) with pytest.raises(MapCollisionError): @@ -62,6 +62,7 @@ def test_servermap_03(tmp_path): assert len(srv_map.include) == 2 assert not srv_map.dynamic assert not srv_map.redirect + # test detecting collisions with pytest.raises(MapCollisionError, match="URL collision on 'url_01'"): srv_map.set_redirect("url_01", "test_file") with pytest.raises(MapCollisionError): @@ -88,6 +89,11 @@ def test_servermap_04(tmp_path): assert not srv_map.redirect["url_02"].required assert not srv_map.dynamic assert not srv_map.include + with pytest.raises(TypeError, match="target must not be an empty string"): + srv_map.set_redirect("x", "") + with pytest.raises(TypeError, match="target must be of type 'str'"): + srv_map.set_redirect("x", None) + # test detecting collisions with pytest.raises(MapCollisionError): srv_map.set_include("url_01", str(tmp_path)) with pytest.raises(MapCollisionError): @@ -96,6 +102,7 @@ def test_servermap_04(tmp_path): def test_servermap_05(): """test ServerMap._check_url()""" + # pylint: disable=protected-access assert ServerMap._check_url("test") == "test" assert ServerMap._check_url("") == "" # only alphanumeric is allowed From ee41c0127a66961c2936f80658c923d011d17cac Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Sun, 28 Mar 2021 13:34:27 -0700 Subject: [PATCH 236/531] Use os.scandir() in status_reporter.py --- grizzly/common/status_reporter.py | 13 ++++++------- grizzly/common/test_status_reporter.py | 2 +- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/grizzly/common/status_reporter.py b/grizzly/common/status_reporter.py index b986eb71..76f5128f 100644 --- a/grizzly/common/status_reporter.py +++ b/grizzly/common/status_reporter.py @@ -111,15 +111,13 @@ def _results(self, max_len=85): @staticmethod def _scan(path, fname_pattern): - abs_path = os.path.abspath(path) - for fname in os.listdir(abs_path): - if fname_pattern.match(fname) is None: + for entry in os.scandir(path): + if fname_pattern.match(entry.name) is None: continue - full_path = os.path.join(abs_path, fname) - if not os.path.isfile(full_path): + if not entry.is_file(): continue - if os.path.getsize(full_path) > 0: - yield full_path + if entry.stat().st_size: + yield entry.path def _specific(self): """Merged and generate formatted output of status reports. @@ -283,6 +281,7 @@ def _sys_info(): try: txt.append(" %s\n" % (str(os.getloadavg()),)) except AttributeError: + # os.getloadavg() is not available on all platforms txt.append("\n") mem_usage = psutil.virtual_memory() txt.append(" Memory : ") diff --git a/grizzly/common/test_status_reporter.py b/grizzly/common/test_status_reporter.py index 3bd082ff..47e03259 100644 --- a/grizzly/common/test_status_reporter.py +++ b/grizzly/common/test_status_reporter.py @@ -94,7 +94,7 @@ def test_status_reporter_04(tmp_path): test_path.touch() assert not any(StatusReporter._scan(str(tmp_path), re_filter)) test_path.write_bytes(b"test") - assert tuple(StatusReporter._scan(str(tmp_path), re_filter)) + assert any(StatusReporter._scan(str(tmp_path), re_filter)) def test_status_reporter_05(tmp_path): From 3ae9b31e7ca6c27d2e0067c377c2ec3f4f7258ea Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Sun, 28 Mar 2021 17:45:27 -0700 Subject: [PATCH 237/531] Status reporter updates * Update imports * Increase test coverage * Fix nits --- grizzly/common/status_reporter.py | 73 ++++++++++++---------- grizzly/common/test_status_reporter.py | 84 ++++++++++++++++++-------- 2 files changed, 98 insertions(+), 59 deletions(-) diff --git a/grizzly/common/status_reporter.py b/grizzly/common/status_reporter.py index 76f5128f..aec54429 100644 --- a/grizzly/common/status_reporter.py +++ b/grizzly/common/status_reporter.py @@ -4,16 +4,23 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. """Manage Grizzly status reports.""" -import argparse -import logging -import os -import re +from argparse import ArgumentParser from collections import defaultdict from datetime import timedelta from functools import partial -from time import gmtime, localtime, strftime, time +from logging import DEBUG, INFO, basicConfig -import psutil +try: + from os import getloadavg +except ImportError: # pragma: no cover + # os.getloadavg() is not available on all platforms + getloadavg = None +from os import SEEK_CUR, getenv, scandir +from os.path import isdir +from re import match +from time import gmtime, strftime, time + +from psutil import cpu_count, cpu_percent, disk_usage, virtual_memory from .status import Status @@ -80,8 +87,6 @@ def load(cls, tb_path=None): Returns: StatusReporter: Contains available status reports and traceback reports. """ - if tb_path is not None and not os.path.isdir(tb_path): - raise OSError("%r is not a directory" % (tb_path,)) tracebacks = None if tb_path is None else cls._tracebacks(tb_path) return cls(list(Status.loadall()), tracebacks=tracebacks) @@ -111,8 +116,8 @@ def _results(self, max_len=85): @staticmethod def _scan(path, fname_pattern): - for entry in os.scandir(path): - if fname_pattern.match(entry.name) is None: + for entry in scandir(path): + if match(fname_pattern, entry.name) is None: continue if not entry.is_file(): continue @@ -274,29 +279,28 @@ def _sys_info(): txt.append( "CPU & Load : %d @ %0.1f%%" % ( - psutil.cpu_count(), - psutil.cpu_percent(interval=StatusReporter.CPU_POLL_INTERVAL), + cpu_count(), + cpu_percent(interval=StatusReporter.CPU_POLL_INTERVAL), ) ) - try: - txt.append(" %s\n" % (str(os.getloadavg()),)) - except AttributeError: - # os.getloadavg() is not available on all platforms + if getloadavg is not None: + txt.append(" %s\n" % (str(getloadavg()),)) + else: txt.append("\n") - mem_usage = psutil.virtual_memory() + mem_usage = virtual_memory() txt.append(" Memory : ") if mem_usage.available < 1073741824: # < 1GB txt.append("%dMB" % (mem_usage.available / 1048576,)) else: txt.append("%0.1fGB" % (mem_usage.available / 1073741824.0,)) txt.append(" of %0.1fGB free\n" % (mem_usage.total / 1073741824.0,)) - disk_usage = psutil.disk_usage("/") + usage = disk_usage("/") txt.append(" Disk : ") - if disk_usage.free < 1073741824: # < 1GB - txt.append("%dMB" % (disk_usage.free / 1048576,)) + if usage.free < 1073741824: # < 1GB + txt.append("%dMB" % (usage.free / 1048576,)) else: - txt.append("%0.1fGB" % (disk_usage.free / 1073741824.0,)) - txt.append(" of %0.1fGB free" % (disk_usage.total / 1073741824.0,)) + txt.append("%0.1fGB" % (usage.free / 1073741824.0,)) + txt.append(" of %0.1fGB free" % (usage.total / 1073741824.0,)) return "".join(txt) @staticmethod @@ -313,7 +317,7 @@ def _tracebacks(path, ignore_kbi=True, max_preceeding=5): list: A list of TracebackReports. """ tracebacks = list() - for screen_log in StatusReporter._scan(path, re.compile(r"screenlog\.\d+")): + for screen_log in StatusReporter._scan(path, r"screenlog\.\d+"): tbr = TracebackReport.from_file(screen_log, max_preceeding=max_preceeding) if tbr is None: continue @@ -363,7 +367,7 @@ def from_file(cls, input_log, max_preceeding=5): break if len(chunk) == cls.READ_LIMIT: # seek back to avoid missing beginning of token - in_fp.seek(len(token) * -1, os.SEEK_CUR) + in_fp.seek(len(token) * -1, SEEK_CUR) else: # no traceback here, move along return None @@ -390,7 +394,7 @@ def from_file(cls, input_log, max_preceeding=5): # stop at first empty line tb_end = min(line_num, line_count) break - if re.match(r"^\w+(\.\w+)*\:\s|^\w+(Interrupt|Error)$", log_line): + if match(r"^\w+(\.\w+)*\:\s|^\w+(Interrupt|Error)$", log_line): is_kbi = log_line.startswith("KeyboardInterrupt") # stop after error message tb_end = min(line_num + 1, line_count) @@ -430,15 +434,16 @@ def main(args=None): Returns: None """ - log_level = logging.INFO - log_fmt = "[%(asctime)s] %(message)s" - if bool(os.getenv("DEBUG")): # pragma: no cover - log_level = logging.DEBUG + if bool(getenv("DEBUG")): # pragma: no cover + log_level = DEBUG log_fmt = "%(levelname).1s %(name)s [%(asctime)s] %(message)s" - logging.basicConfig(format=log_fmt, datefmt="%Y-%m-%d %H:%M:%S", level=log_level) + else: + log_level = INFO + log_fmt = "[%(asctime)s] %(message)s" + basicConfig(format=log_fmt, datefmt="%Y-%m-%d %H:%M:%S", level=log_level) modes = ("status",) - parser = argparse.ArgumentParser(description="Grizzly status report generator") + parser = ArgumentParser(description="Grizzly status report generator") parser.add_argument("--dump", help="File to write report to") parser.add_argument( "--mode", @@ -456,9 +461,11 @@ def main(args=None): help="Scan path for Python tracebacks found in screenlog.# files", ) args = parser.parse_args(args) - if args.mode not in modes: parser.error("Invalid mode %r" % args.mode) + if args.tracebacks and not isdir(args.tracebacks): + parser.error("--tracebacks must be a directory") + reporter = StatusReporter.load(tb_path=args.tracebacks) if args.dump: reporter.dump_summary(args.dump) @@ -468,7 +475,7 @@ def main(args=None): return 0 print( "Grizzly Status - %s - Instance report frequency: %ds\n" - % (strftime("%Y/%m/%d %X", localtime()), Status.REPORT_FREQ) + % (strftime("%Y/%m/%d %X"), Status.REPORT_FREQ) ) print("[Reports]") reporter.print_specific() diff --git a/grizzly/common/test_status_reporter.py b/grizzly/common/test_status_reporter.py index 47e03259..62e985cd 100644 --- a/grizzly/common/test_status_reporter.py +++ b/grizzly/common/test_status_reporter.py @@ -5,13 +5,16 @@ """test Grizzly status reporter""" # pylint: disable=protected-access -import re from itertools import count +from re import match +from unittest.mock import Mock -import pytest +from pytest import mark, raises from .status_reporter import Status, StatusReporter, TracebackReport, main +GBYTES = 1_073_741_824 + def _fake_sys_info(): return ( @@ -44,10 +47,6 @@ def test_status_reporter_02(tmp_path): # missing reports path st_rpt = StatusReporter.load() assert not st_rpt.reports - # missing tb path - Status.PATH = str(tmp_path) - with pytest.raises(OSError): - StatusReporter.load(tb_path="no_dir") # empty reports and tb paths st_rpt = StatusReporter.load(tb_path=str(tmp_path)) assert isinstance(st_rpt.reports, list) @@ -56,25 +55,57 @@ def test_status_reporter_02(tmp_path): assert not st_rpt.tracebacks -def test_status_reporter_03(mocker): +@mark.parametrize( + "disk, memory, getloadavg", + [ + (Mock(free=12, total=GBYTES), Mock(available=12, total=GBYTES), None), + ( + Mock(free=10.23 * GBYTES, total=100 * GBYTES), + Mock(available=1.1 * GBYTES, total=2 * GBYTES), + None, + ), + ( + Mock(free=12, total=GBYTES), + Mock(available=12, total=GBYTES), + lambda: "(0.12, 0.34, 0.56)", + ), + ], +) +def test_status_reporter_03(mocker, disk, memory, getloadavg): """test StatusReporter._sys_info()""" - gbs = 1024 * 1024 * 1024 - fake_psutil = mocker.patch("grizzly.common.status_reporter.psutil", autospec=True) - fake_psutil.cpu_count.return_value = 4 - fake_psutil.cpu_percent.return_value = 10.0 - fake_psutil.virtual_memory.return_value = mocker.Mock(available=12, total=gbs) - fake_psutil.disk_usage.return_value = mocker.Mock(free=12, total=gbs) - sysinfo = StatusReporter._sys_info() - assert "MB" in sysinfo - fake_psutil.virtual_memory.return_value = mocker.Mock( - available=1.1 * gbs, total=2 * gbs + mocker.patch( + "grizzly.common.status_reporter.cpu_count", autospec=True, return_value=4 + ) + mocker.patch( + "grizzly.common.status_reporter.cpu_percent", autospec=True, return_value=10 + ) + mocker.patch( + "grizzly.common.status_reporter.disk_usage", autospec=True, return_value=disk + ) + mocker.patch( + "grizzly.common.status_reporter.virtual_memory", + autospec=True, + return_value=memory, ) - fake_psutil.disk_usage.return_value = mocker.Mock(free=10.23 * gbs, total=100 * gbs) + if getloadavg is None: + # simulate platform that does not have os.getloadavg() + mocker.patch("grizzly.common.status_reporter.getloadavg", None) + else: + mocker.patch( + "grizzly.common.status_reporter.getloadavg", + autospec=True, + side_effect=getloadavg, + ) sysinfo = StatusReporter._sys_info() - assert "MB" not in sysinfo + if disk.free < GBYTES or memory.available < GBYTES: + assert "MB" in sysinfo + else: + assert "MB" not in sysinfo lines = sysinfo.split("\n") assert len(lines) == 3 assert "CPU & Load : " in lines[0] + if getloadavg is not None: + assert lines[0].endswith("(0.12, 0.34, 0.56)") assert "Memory : " in lines[1] assert "Disk : " in lines[2] # verify alignment @@ -85,16 +116,15 @@ def test_status_reporter_03(mocker): def test_status_reporter_04(tmp_path): """test StatusReporter._scan()""" - re_filter = re.compile("TEST_FILE") (tmp_path / "somefile.txt").touch() test_path = tmp_path / "TEST_FILE" test_path.mkdir() - assert not any(StatusReporter._scan(str(tmp_path), re_filter)) + assert not any(StatusReporter._scan(str(tmp_path), "TEST_FILE")) test_path.rmdir() test_path.touch() - assert not any(StatusReporter._scan(str(tmp_path), re_filter)) + assert not any(StatusReporter._scan(str(tmp_path), "TEST_FILE")) test_path.write_bytes(b"test") - assert any(StatusReporter._scan(str(tmp_path), re_filter)) + assert any(StatusReporter._scan(str(tmp_path), "TEST_FILE")) def test_status_reporter_05(tmp_path): @@ -142,7 +172,7 @@ def test_status_reporter_05(tmp_path): # verify alignment position = len(lines[0].split(":")[0]) for line in lines: - assert re.match(r"\S\s:\s\S", line[position - 2 :]) + assert match(r"\S\s:\s\S", line[position - 2 :]) def test_status_reporter_06(mocker, tmp_path): @@ -536,6 +566,8 @@ def test_main_03(tmp_path): def test_main_04(): - """test main() with invalid mode""" - with pytest.raises(SystemExit): + """test main() with invalid args""" + with raises(SystemExit): main(["--mode", "invalid"]) + with raises(SystemExit): + main(["--tracebacks", "missing"]) From 034e98f5028fbc6fe14e940eaa453ca4fcd8e219 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Mon, 29 Mar 2021 14:11:50 -0700 Subject: [PATCH 238/531] [tests] pre-commit deps --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2489c387..56370125 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -12,7 +12,7 @@ repos: hooks: - id: black - repo: https://github.com/pycqa/pylint - rev: pylint-2.7.2 + rev: pylint-2.7.3 hooks: - id: pylint - repo: https://gitlab.com/pycqa/flake8 From f1a8936383d7cad86916b085ad7e2d1ade461913 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Mon, 29 Mar 2021 14:12:05 -0700 Subject: [PATCH 239/531] [tests] Update coverage exclusion list --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index e8c92190..d467d9d6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,9 @@ omit = [ [tool.coverage.report] exclude_lines = [ + "@(abc.)?abstract*", + "except ImportError:", + "if __name__ == .__main__.:", "pragma: no cover", ] From 2ad2fb708b90a5e568dd6a4e060a7cd7950a85de Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 30 Mar 2021 13:25:15 -0700 Subject: [PATCH 240/531] Make Adapters plug-ins * Use plug-in code for both Adapters and Targets * Update 'no-op' Adapter --- grizzly/__main__.py | 14 -- grizzly/adapter/__init__.py | 10 + grizzly/{common => adapter}/adapter.py | 15 +- .../no_op_adapter}/__init__.py | 3 +- grizzly/{common => adapter}/test_adapter.py | 29 +-- grizzly/adapters/__init__.py | 73 -------- grizzly/args.py | 26 +-- grizzly/common/__init__.py | 3 - grizzly/common/plugins.py | 57 ++++++ grizzly/common/test_plugins.py | 81 ++++++++ grizzly/common/test_utils.py | 2 +- grizzly/main.py | 15 +- grizzly/reduce/core.py | 8 +- grizzly/reduce/test_main.py | 10 +- grizzly/replay/replay.py | 6 +- grizzly/replay/test_main.py | 12 +- grizzly/session.py | 2 +- grizzly/target/__init__.py | 56 ------ grizzly/target/test_target_loader.py | 131 ------------- grizzly/test_args.py | 35 ++-- grizzly/test_main.py | 177 +++++++++++------- grizzly/test_session.py | 19 +- setup.cfg | 5 +- 23 files changed, 349 insertions(+), 440 deletions(-) create mode 100644 grizzly/adapter/__init__.py rename grizzly/{common => adapter}/adapter.py (93%) rename grizzly/{adapters/NoOpAdapter => adapter/no_op_adapter}/__init__.py (96%) rename grizzly/{common => adapter}/test_adapter.py (78%) delete mode 100644 grizzly/adapters/__init__.py create mode 100644 grizzly/common/plugins.py create mode 100644 grizzly/common/test_plugins.py delete mode 100644 grizzly/target/test_target_loader.py diff --git a/grizzly/__main__.py b/grizzly/__main__.py index 2b437d58..54151d3d 100644 --- a/grizzly/__main__.py +++ b/grizzly/__main__.py @@ -2,10 +2,6 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -from logging import DEBUG, basicConfig -from os import getenv - -from .adapters import load from .args import GrizzlyArgs from .main import main @@ -13,14 +9,4 @@ __credits__ = ["Tyson Smith", "Jesse Schwartzentruber"] -# TODO: This can go away once Adapters are loaded using -# setuptools entrypoints. It is only needed to get log output from -# load() because it is called before parse arguments (which -# is where basicConfig should be called). -if getenv("DEBUG"): - basicConfig( - format="%(asctime)s %(levelname).1s %(name)s | %(message)s", level=DEBUG - ) -# load Adapters -load() raise SystemExit(main(GrizzlyArgs().parse_args())) diff --git a/grizzly/adapter/__init__.py b/grizzly/adapter/__init__.py new file mode 100644 index 00000000..68e44031 --- /dev/null +++ b/grizzly/adapter/__init__.py @@ -0,0 +1,10 @@ +# coding=utf-8 +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +from .adapter import Adapter, AdapterError + +__all__ = ( + "Adapter", + "AdapterError", +) diff --git a/grizzly/common/adapter.py b/grizzly/adapter/adapter.py similarity index 93% rename from grizzly/common/adapter.py rename to grizzly/adapter/adapter.py index 663403dc..0d88bc4d 100644 --- a/grizzly/common/adapter.py +++ b/grizzly/adapter/adapter.py @@ -31,15 +31,14 @@ class Adapter(metaclass=ABCMeta): fuzz (dict): Available as a safe scratch pad for the end-user. monitor (TargetMonitor): Used to provide Target status information to the adapter. + name (str): Name of the adapter. remaining (int): Can be used to indicate the number of TestCases remaining to process. """ - HARNESS_FILE = pathjoin(dirname(__file__), "harness.html") + HARNESS_FILE = pathjoin(dirname(__file__), "..", "common", "harness.html") # Only report test cases with served content. IGNORE_UNSERVED = True - # This is used as the identifier when launching Grizzly. Must be a unique string. - NAME = None # Maximum iterations between Target relaunches (<1 use default) RELAUNCH = 0 # Maximum execution time per test (used as minimum timeout). The iteration is @@ -47,14 +46,16 @@ class Adapter(metaclass=ABCMeta): # close it. TIME_LIMIT = 30 - __slots__ = ("_harness", "fuzz", "monitor", "remaining") + __slots__ = ("_harness", "fuzz", "monitor", "name", "remaining") - def __init__(self): - if not isinstance(self.NAME, str): - raise AdapterError("%s.NAME must be a string" % (type(self).__name__,)) + def __init__(self, name): + assert isinstance(name, str) + if not name: + raise AdapterError("name must not be empty") self._harness = None self.fuzz = dict() self.monitor = None + self.name = name self.remaining = None def cleanup(self): diff --git a/grizzly/adapters/NoOpAdapter/__init__.py b/grizzly/adapter/no_op_adapter/__init__.py similarity index 96% rename from grizzly/adapters/NoOpAdapter/__init__.py rename to grizzly/adapter/no_op_adapter/__init__.py index 21d9049a..698ac5b8 100644 --- a/grizzly/adapters/NoOpAdapter/__init__.py +++ b/grizzly/adapter/no_op_adapter/__init__.py @@ -1,9 +1,8 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -# pylint: disable=invalid-name -from grizzly.common import Adapter +from grizzly.adapter import Adapter __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] diff --git a/grizzly/common/test_adapter.py b/grizzly/adapter/test_adapter.py similarity index 78% rename from grizzly/common/test_adapter.py rename to grizzly/adapter/test_adapter.py index 46e78814..cc6b6473 100644 --- a/grizzly/common/test_adapter.py +++ b/grizzly/adapter/test_adapter.py @@ -8,28 +8,19 @@ class SimpleAdapter(Adapter): - NAME = "simple" - def generate(self, testcase, server_map): pass def test_adapter_01(): - """test a bad Adapter""" - - class BadAdapter(SimpleAdapter): - NAME = None - - with pytest.raises(AdapterError, match="BadAdapter.NAME must be a string"): - BadAdapter() - - -def test_adapter_02(): """test a simple Adapter""" - adpt = SimpleAdapter() + with pytest.raises(AdapterError, match="name must not be empty"): + SimpleAdapter("") + adpt = SimpleAdapter("simple") assert isinstance(adpt.fuzz, dict) assert not adpt.fuzz assert adpt.monitor is None + assert adpt.name == "simple" assert adpt.remaining is None assert adpt.get_harness() is None adpt.setup(None, None) @@ -40,16 +31,12 @@ def test_adapter_02(): adpt.cleanup() -def test_adapter_03(tmp_path): +def test_adapter_02(tmp_path): """test Adapter.enable_harness()""" - adpt = SimpleAdapter() + adpt = SimpleAdapter("a") # built-in harness - harness_file = tmp_path / "harness.html" - test_data = b"default_harness_data" - harness_file.write_bytes(test_data) - adpt.HARNESS_FILE = str(harness_file) # pylint: disable=invalid-name adpt.enable_harness() - assert adpt.get_harness() == test_data + assert adpt.get_harness() # external harness ext_harness_file = tmp_path / "ext_harness.html" test_data = b"external_harness_data" @@ -58,7 +45,7 @@ def test_adapter_03(tmp_path): assert adpt.get_harness() == test_data -def test_adapter_04(tmp_path): +def test_adapter_03(tmp_path): """test Adapter.scan_path()""" # empty path assert not any(SimpleAdapter.scan_path(str(tmp_path))) diff --git a/grizzly/adapters/__init__.py b/grizzly/adapters/__init__.py deleted file mode 100644 index 248f3226..00000000 --- a/grizzly/adapters/__init__.py +++ /dev/null @@ -1,73 +0,0 @@ -from importlib import import_module -from logging import getLogger -from os import listdir -from os.path import abspath, dirname, isfile -from os.path import join as pathjoin -from sys import exc_info -from sys import path as syspath -from traceback import extract_tb - -from grizzly.common import Adapter - -LOG = getLogger(__name__) - -__all__ = ("get", "load", "names") -__adapters__ = dict() - - -def get(name): - return __adapters__.get(name.lower(), None) - - -def load(path=None, skip_failures=True): - assert not __adapters__, "adapters have already been loaded" - if path is None: - path = dirname(__file__) - path = abspath(path) - LOG.debug("loading adapters from %r", path) - syspath.append(path) - for sub in listdir(path): - if not isfile(pathjoin(path, sub, "__init__.py")): - continue - LOG.debug("scanning %r", sub) - try: - lib = import_module(sub) - except Exception: # pylint: disable=broad-except - if not skip_failures: - raise - exc_type, exc_obj, exc_tb = exc_info() - tbinfo = extract_tb(exc_tb)[-1] - LOG.debug( - "raised %s: %s (%s:%d)", - exc_type.__name__, - exc_obj, - tbinfo[0], - tbinfo[1], - ) - continue - for clsname in dir(lib): - cls = getattr(lib, clsname) - if isinstance(cls, type) and issubclass(cls, Adapter): - if clsname == "Adapter": - continue - LOG.debug("sanity checking %r", clsname) - if not isinstance(cls.NAME, str): - raise RuntimeError( - "%s.NAME must be 'str' not %r" - % (cls.__name__, type(cls.NAME).__name__) - ) - if cls.NAME.lower() != cls.NAME: - raise RuntimeError( - "%s.NAME %r must be lowercase" % (cls.__name__, cls.NAME) - ) - if cls.NAME in __adapters__: - raise RuntimeError( - "Name collision! %r is used by %r and %r" - % (cls.NAME, __adapters__[cls.NAME].__name__, cls.__name__) - ) - __adapters__[cls.NAME] = cls - LOG.debug("%d adapters loaded", len(__adapters__)) - - -def names(): - return __adapters__.keys() diff --git a/grizzly/args.py b/grizzly/args.py index b53830a7..52826251 100644 --- a/grizzly/args.py +++ b/grizzly/args.py @@ -7,9 +7,8 @@ from os import listdir from os.path import exists, isdir, isfile -from .adapters import names as adapter_names +from .common.plugins import scan as scan_plugins from .common.utils import TIMEOUT_DELAY -from .target import available as available_targets # ref: https://stackoverflow.com/questions/12268602/sort-argparse-help-alphabetically @@ -94,7 +93,7 @@ def __init__(self): "--platform", default="ffpuppet", help="Platforms available: %s (default: %%(default)s)" - % ", ".join(available_targets()), + % ", ".join(scan_plugins("grizzly_targets")), ) self.launcher_grp.add_argument("-p", "--prefs", help="prefs.js file to use") self.launcher_grp.add_argument( @@ -160,6 +159,9 @@ def parse_args(self, argv=None): def sanity_check(self, args): if hasattr(super(), "sanity_check"): super().sanity_check(args) # pylint: disable=no-member + targets = scan_plugins("grizzly_targets") + if not targets: + self.parser.error("No Target platforms are installed") if "binary" not in self._sanity_skip and not isfile(args.binary): self.parser.error("file not found: %r" % args.binary) @@ -200,7 +202,7 @@ def sanity_check(self, args): if not isdir(ext) or (isfile(ext) and ext.endswith(".xpi")): self.parser.error("Extension must be a folder or .xpi") - if args.platform.lower() not in set(available_targets()): + if args.platform not in targets: self.parser.error("Unsupported platform %r" % args.platform) if args.prefs and not isfile(args.prefs): @@ -220,10 +222,10 @@ def sanity_check(self, args): class GrizzlyArgs(CommonArgs): def __init__(self): super().__init__() - self._adapters = sorted(adapter_names()) self._sanity_skip.add("tool") self.parser.add_argument( - "adapter", help="Available adapters: %s" % ", ".join(self._adapters) + "adapter", + help="Available adapters: %s" % ", ".join(scan_plugins("grizzly_adapters")), ) self.parser.add_argument( "--enable-profiling", @@ -274,14 +276,12 @@ def __init__(self): def sanity_check(self, args): super().sanity_check(args) + adapters = scan_plugins("grizzly_adapters") + if not adapters: + self.parser.error("No Adapters are installed") - if args.adapter.lower() not in self._adapters: - msg = ["Adapter %r does not exist." % args.adapter.lower()] - if self._adapters: - msg.append("Available adapters: %s" % ", ".join(self._adapters)) - else: - msg.append("No adapters available.") - self.parser.error(" ".join(msg)) + if args.adapter not in adapters: + self.parser.error("Adapter %r is not installed" % (args.adapter,)) if args.collect < 1: self.parser.error("--collect must be greater than 0") diff --git a/grizzly/common/__init__.py b/grizzly/common/__init__.py index f88daeb6..edde63bd 100644 --- a/grizzly/common/__init__.py +++ b/grizzly/common/__init__.py @@ -3,7 +3,6 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -from .adapter import Adapter, AdapterError from .iomanager import IOManager, ServerMap from .reporter import ( FilesystemReporter, @@ -18,8 +17,6 @@ from .utils import grz_tmp __all__ = ( - "Adapter", - "AdapterError", "FilesystemReporter", "FuzzManagerReporter", "grz_tmp", diff --git a/grizzly/common/plugins.py b/grizzly/common/plugins.py new file mode 100644 index 00000000..e17ca9f0 --- /dev/null +++ b/grizzly/common/plugins.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +from logging import getLogger + +from pkg_resources import iter_entry_points + +__all__ = ("load", "scan", "PluginLoadError") + + +LOG = getLogger(__name__) + + +class PluginLoadError(Exception): + """Raised if loading a plug-in fails""" + + +def load(name, group, base_type): + """Load a plug-in. + + Args: + name (str): + group (str): + base_type (*): Used to validate loaded objects. + Returns: + *: Python object. + """ + for entry in iter_entry_points(group): + if entry.name == name: + LOG.debug("loading %r (%s)", name, base_type.__name__) + plugin = entry.load() + break + else: + raise PluginLoadError("%r not found in %r" % (name, group)) + if not issubclass(plugin, base_type): + raise PluginLoadError("%r doesn't inherit from %s" % (name, base_type.__name__)) + return plugin + + +def scan(group): + """Scan for available plug-ins. + + Args: + group (str): Entry point group to scan. + + Returns: + list: Names of available entry points. + """ + found = list() + LOG.debug("scanning %r", group) + for entry in iter_entry_points(group): + if entry.name in found: + # not sure if this can even happen + raise PluginLoadError("Duplicate entry %r in %r" % (entry.name, group)) + found.append(entry.name) + return found diff --git a/grizzly/common/test_plugins.py b/grizzly/common/test_plugins.py new file mode 100644 index 00000000..e660d125 --- /dev/null +++ b/grizzly/common/test_plugins.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +from pkg_resources import EntryPoint +from pytest import raises + +from .plugins import PluginLoadError, load, scan + + +class FakeType1: + pass + + +class FakeType2: + pass + + +def test_load_01(mocker): + """test load() - nothing to load""" + mocker.patch( + "grizzly.common.plugins.iter_entry_points", autospec=True, return_value=[] + ) + with raises(PluginLoadError, match="'test-name' not found in 'test-group'"): + load("test-name", "test-group", FakeType1) + + +def test_load_02(mocker): + """test load() - successful load""" + entry = mocker.Mock(set_spec=EntryPoint) + entry.name = "test-name" + entry.load.return_value = FakeType1 + mocker.patch( + "grizzly.common.plugins.iter_entry_points", autospec=True, return_value=[entry] + ) + assert load("test-name", "test-group", FakeType1) + + +def test_load_03(mocker): + """test load() - invalid type""" + entry = mocker.Mock(set_spec=EntryPoint) + entry.name = "test-name" + entry.load.return_value = FakeType1 + mocker.patch( + "grizzly.common.plugins.iter_entry_points", autospec=True, return_value=[entry] + ) + with raises(PluginLoadError, match="'test-name' doesn't inherit from FakeType2"): + load("test-name", "test-group", FakeType2) + + +def test_scan_01(mocker): + """test scan() - no entries found""" + mocker.patch( + "grizzly.common.plugins.iter_entry_points", autospec=True, return_value=[] + ) + assert not scan("test_group") + + +def test_scan_02(mocker): + """test scan() - duplicate entry""" + entry = mocker.Mock(set_spec=EntryPoint) + entry.name = "test_entry" + mocker.patch( + "grizzly.common.plugins.iter_entry_points", + autospec=True, + return_value=[entry, entry], + ) + with raises(PluginLoadError, match="Duplicate entry 'test_entry' in 'test_group'"): + scan("test_group") + + +def test_scan_03(mocker): + """test scan() - success""" + entry = mocker.Mock(set_spec=EntryPoint) + entry.name = "test-name" + mocker.patch( + "grizzly.common.plugins.iter_entry_points", + autospec=True, + return_value=[entry], + ) + assert "test-name" in scan("test_group") diff --git a/grizzly/common/test_utils.py b/grizzly/common/test_utils.py index 594a65a7..ec823ef7 100644 --- a/grizzly/common/test_utils.py +++ b/grizzly/common/test_utils.py @@ -6,7 +6,7 @@ from .utils import grz_tmp -def test_testcase_01(mocker, tmp_path): +def test_grz_tmp_01(mocker, tmp_path): """test grz_tmp()""" mocker.patch( "grizzly.common.utils.gettempdir", autospec=True, return_value=str(tmp_path) diff --git a/grizzly/main.py b/grizzly/main.py index 3eea11b0..e9403840 100644 --- a/grizzly/main.py +++ b/grizzly/main.py @@ -8,7 +8,8 @@ from sapphire import Sapphire -from .adapters import get as get_adapter +from .adapter import Adapter +from .common.plugins import load as load_plugin from .common.reporter import ( FilesystemReporter, FuzzManagerReporter, @@ -16,8 +17,7 @@ ) from .common.utils import TIMEOUT_DELAY from .session import Session -from .target import TargetLaunchError, TargetLaunchTimeout -from .target import load as load_target +from .target import Target, TargetLaunchError, TargetLaunchTimeout __author__ = "Tyson Smith" __credits__ = ["Tyson Smith", "Jesse Schwartzentruber"] @@ -27,6 +27,7 @@ def configure_logging(log_level): + # TODO: move this to grizzly.common.utils if getenv("DEBUG") == "1": log_level = DEBUG if log_level == DEBUG: @@ -52,7 +53,7 @@ def main(args): LOG.info("Running with Xvfb") if args.rr: LOG.info("Running with RR") - elif args.valgrind: + if args.valgrind: LOG.info("Running with Valgrind. This will be SLOW!") adapter = None @@ -60,7 +61,7 @@ def main(args): target = None try: LOG.debug("initializing Adapter %r", args.adapter) - adapter = get_adapter(args.adapter)() + adapter = load_plugin(args.adapter, "grizzly_adapters", Adapter)(args.adapter) # test time limit and timeout sanity checking if args.time_limit: @@ -88,8 +89,8 @@ def main(args): else: relaunch = args.relaunch - LOG.debug("initializing the Target") - target = load_target(args.platform)( + LOG.debug("initializing the Target %r", args.platform) + target = load_plugin(args.platform, "grizzly_targets", Target)( args.binary, args.extension, args.launch_timeout, diff --git a/grizzly/reduce/core.py b/grizzly/reduce/core.py index 6e83da7d..51dbeaef 100644 --- a/grizzly/reduce/core.py +++ b/grizzly/reduce/core.py @@ -19,14 +19,14 @@ from sapphire import Sapphire from ..common.fuzzmanager import CrashEntry +from ..common.plugins import load as load_plugin from ..common.reporter import FilesystemReporter, FuzzManagerReporter from ..common.storage import TestCaseLoadFailure, TestFile from ..common.utils import ConfigError, grz_tmp from ..main import configure_logging from ..replay import ReplayManager from ..session import Session -from ..target import TargetLaunchError, TargetLaunchTimeout -from ..target import load as load_target +from ..target import Target, TargetLaunchError, TargetLaunchTimeout from .exceptions import GrizzlyReduceBaseException, NotReproducible from .stats import ReductionStats from .strategies import STRATEGIES @@ -767,7 +767,7 @@ def main(cls, args): args.repeat = max(args.min_crashes, args.repeat) relaunch = min(args.relaunch, args.repeat) LOG.debug("initializing the Target") - target = load_target(args.platform)( + target = load_plugin(args.platform, "grizzly_targets", Target)( args.binary, args.extension, args.launch_timeout, @@ -844,7 +844,7 @@ def main(cls, args): LOG.error(exc.msg) return exc.code - except Exception: # noqa pylint: disable=broad-except + except Exception: # pylint: disable=broad-except LOG.exception("Exception during reduction!") return Session.EXIT_ERROR diff --git a/grizzly/reduce/test_main.py b/grizzly/reduce/test_main.py index d97f7363..e83ab372 100644 --- a/grizzly/reduce/test_main.py +++ b/grizzly/reduce/test_main.py @@ -76,9 +76,9 @@ def test_args_02(tmp_path): 1, ), ("grizzly.reduce.core.ReduceManager.run", TargetLaunchTimeout, None, {}, 1), - ("grizzly.reduce.core.load_target", KeyboardInterrupt, None, {}, 1), + ("grizzly.reduce.core.load_plugin", KeyboardInterrupt, None, {}, 1), ( - "grizzly.reduce.core.load_target", + "grizzly.reduce.core.load_plugin", GrizzlyReduceBaseException(""), None, {}, @@ -109,7 +109,7 @@ def test_main_exit(mocker, patch_func, side_effect, return_value, kwargs, result QUAL_NO_TESTCASE=7, QUAL_REDUCER_ERROR=9, ) - mocker.patch("grizzly.reduce.core.load_target", autospec=True) + mocker.patch("grizzly.reduce.core.load_plugin", autospec=True) mocker.patch("grizzly.reduce.core.Sapphire", autospec=True) # setup args args = mocker.Mock( @@ -137,7 +137,7 @@ def test_main_exit(mocker, patch_func, side_effect, return_value, kwargs, result def test_main_launch_error(mocker, exc_type): mocker.patch("grizzly.reduce.core.FuzzManagerReporter", autospec=True) reporter = mocker.patch("grizzly.reduce.core.FilesystemReporter", autospec=True) - mocker.patch("grizzly.reduce.core.load_target", autospec=True) + mocker.patch("grizzly.reduce.core.load_plugin", autospec=True) mocker.patch("grizzly.reduce.core.ReplayManager.load_testcases") mocker.patch( "grizzly.reduce.core.ReplayManager.time_limits", return_value=(None, 10) @@ -173,7 +173,7 @@ def test_main_launch_error(mocker, exc_type): def test_testcase_prefs(mocker, tmp_path, result): """test that prefs from testcase are used if --prefs not specified and --prefs overrides""" - load_target = mocker.patch("grizzly.reduce.core.load_target") + load_target = mocker.patch("grizzly.reduce.core.load_plugin") mocker.patch("grizzly.reduce.core.ReduceManager.run", autospec=True) mocker.patch("grizzly.reduce.core.Sapphire", autospec=True) rmtree_mock = mocker.patch("grizzly.reduce.core.rmtree", autospec=True) diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 9a45b462..200736f5 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -13,6 +13,7 @@ from sapphire import Sapphire, ServerMap +from ..common.plugins import load as load_plugin from ..common.reporter import FilesystemReporter, FuzzManagerReporter, Report from ..common.runner import Runner, RunResult from ..common.status import Status @@ -20,8 +21,7 @@ from ..common.utils import TIMEOUT_DELAY, ConfigError, grz_tmp from ..main import configure_logging from ..session import Session -from ..target import TargetLaunchError, TargetLaunchTimeout -from ..target import load as load_target +from ..target import Target, TargetLaunchError, TargetLaunchTimeout __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] @@ -596,7 +596,7 @@ def main(cls, args): relaunch, ) LOG.debug("initializing the Target") - target = load_target(args.platform)( + target = load_plugin(args.platform, "grizzly_targets", Target)( args.binary, args.extension, args.launch_timeout, diff --git a/grizzly/replay/test_main.py b/grizzly/replay/test_main.py index a5770f8e..0532778f 100644 --- a/grizzly/replay/test_main.py +++ b/grizzly/replay/test_main.py @@ -91,7 +91,7 @@ def test_main_01(mocker, tmp_path): ["test.html"], ) # passed to mocked Target.detect_failure # setup Target - load_target = mocker.patch("grizzly.replay.replay.load_target") + load_target = mocker.patch("grizzly.replay.replay.load_plugin") target = mocker.Mock(spec=Target, binary="bin", launch_timeout=30) target.RESULT_FAILURE = Target.RESULT_FAILURE target.RESULT_IGNORED = Target.RESULT_IGNORED @@ -155,7 +155,7 @@ def test_main_02(mocker, tmp_path): ["test.html"], ) # passed to mocked Target.detect_failure # setup Target - load_target = mocker.patch("grizzly.replay.replay.load_target") + load_target = mocker.patch("grizzly.replay.replay.load_plugin") target = mocker.Mock(spec=Target, binary="bin", launch_timeout=30) target.RESULT_NONE = Target.RESULT_NONE target.detect_failure.return_value = Target.RESULT_NONE @@ -191,7 +191,7 @@ def test_main_03(mocker): """test ReplayManager.main() error cases""" fake_sig = mocker.patch("grizzly.replay.replay.CrashSignature", autospec=True) mocker.patch("grizzly.replay.replay.FuzzManagerReporter", autospec=True) - fake_load_target = mocker.patch("grizzly.replay.replay.load_target", autospec=True) + fake_load_target = mocker.patch("grizzly.replay.replay.load_plugin", autospec=True) mocker.patch("grizzly.replay.replay.Sapphire", autospec=True) fake_tc = mocker.patch("grizzly.replay.replay.TestCase", autospec=True) # setup args @@ -255,7 +255,7 @@ def test_main_04(mocker, tmp_path): mocker.patch("grizzly.replay.replay.Sapphire", autospec=True) mocker.patch("grizzly.replay.replay.TestCase", autospec=True) target = mocker.Mock(spec=Target, launch_timeout=30) - load_target = mocker.patch("grizzly.replay.replay.load_target", autospec=True) + load_target = mocker.patch("grizzly.replay.replay.load_plugin", autospec=True) load_target.return_value.return_value = target fake_tmp = tmp_path / "grz_tmp" fake_tmp.mkdir() @@ -309,7 +309,7 @@ def test_main_05(mocker, tmp_path): target.detect_failure.return_value = Target.RESULT_FAILURE target.monitor.is_healthy.return_value = False target.save_logs = _fake_save_logs - load_target = mocker.patch("grizzly.replay.replay.load_target") + load_target = mocker.patch("grizzly.replay.replay.load_plugin") load_target.return_value.return_value = target # setup args args = mocker.Mock( @@ -409,7 +409,7 @@ def test_main_06(mocker, tmp_path, arg_timelimit, arg_timeout, test_timelimit, r target = mocker.Mock(spec=Target, binary="bin", launch_timeout=30) target.RESULT_NONE = Target.RESULT_NONE target.detect_failure.return_value = Target.RESULT_NONE - load_target = mocker.patch("grizzly.replay.replay.load_target") + load_target = mocker.patch("grizzly.replay.replay.load_plugin") load_target.return_value.return_value = target # create test to load test = TestCase("test.html", None, None) diff --git a/grizzly/session.py b/grizzly/session.py index 15df2ec8..d27b352c 100644 --- a/grizzly/session.py +++ b/grizzly/session.py @@ -131,7 +131,7 @@ def display_status(self, log_limiter): def generate_testcase(self, time_limit): LOG.debug("calling iomanager.create_testcase()") - test = self.iomanager.create_testcase(self.adapter.NAME, time_limit) + test = self.iomanager.create_testcase(self.adapter.name, time_limit) LOG.debug("calling self.adapter.generate()") with self.status.measure("generate"): self.adapter.generate(test, self.iomanager.server_map) diff --git a/grizzly/target/__init__.py b/grizzly/target/__init__.py index 1b65654f..97cd4fd4 100644 --- a/grizzly/target/__init__.py +++ b/grizzly/target/__init__.py @@ -2,12 +2,6 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -from logging import getLogger -from sys import exc_info -from traceback import extract_tb - -from pkg_resources import iter_entry_points - from .target import ( Target, TargetError, @@ -21,57 +15,7 @@ "TargetError", "TargetLaunchError", "TargetLaunchTimeout", - "available", - "load", "sanitizer_opts", ) __author__ = "Tyson Smith" __credits__ = ["Tyson Smith", "Jesse Schwartzentruber"] - -LOG = getLogger(__name__) -TARGETS = dict() - - -def _load_targets(): - global TARGETS # pylint: disable=global-statement - TARGETS = {} - for entry_point in iter_entry_points("grizzly_targets"): - LOG.debug("scanning target %r", entry_point.name) - try: - target = entry_point.load() - except Exception: # pylint: disable=broad-except - exc_type, exc_obj, exc_tb = exc_info() - tbinfo = extract_tb(exc_tb)[-1] - LOG.warning( - "Target %r raised an exception %s: %s (%s:%d)", - entry_point.name, - exc_type.__name__, - exc_obj, - tbinfo[0], - tbinfo[1], - ) - continue - if not issubclass(target, Target): - LOG.warning( - "Target %r doesn't inherit from grizzly.target.Target, skipping.", - entry_point.name, - ) - elif entry_point.name in TARGETS: - raise RuntimeError( - "Target %r already exists as %r. (duplicate: %r)" - % (entry_point.name, TARGETS[entry_point.name], target) - ) - else: - TARGETS[entry_point.name] = target - - -def available(): - if not TARGETS: - _load_targets() - return TARGETS.keys() - - -def load(name): - if not TARGETS: - _load_targets() - return TARGETS[name] diff --git a/grizzly/target/test_target_loader.py b/grizzly/target/test_target_loader.py deleted file mode 100644 index 2d82663a..00000000 --- a/grizzly/target/test_target_loader.py +++ /dev/null @@ -1,131 +0,0 @@ -# coding=utf-8 -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -""" -unittests for target plugin loading -""" - -import pytest - -from grizzly.target import Target, available, load - - -class _FakeTarget1(Target): # pylint: disable=abstract-method - pass - - -class _FakeTarget2(Target): # pylint: disable=abstract-method - pass - - -def test_target_load_01(mocker): - """If no targets are available, available() should return nothing.""" - mocker.patch("grizzly.target.TARGETS", None) - mocker.patch("grizzly.target.iter_entry_points", lambda _: []) - assert not available() - - -def test_target_load_02(mocker): - """Loading targets works.""" - mocker.patch("grizzly.target.TARGETS", None) - - class _FakeEntryPoint1: - name = "test1" - - @staticmethod - def load(): - return _FakeTarget1 - - class _FakeEntryPoint2: - name = "test2" - - @staticmethod - def load(): - return _FakeTarget2 - - mocker.patch( - "grizzly.target.iter_entry_points", - lambda _: [_FakeEntryPoint1, _FakeEntryPoint2], - ) - assert set(available()) == {"test1", "test2"} - assert load("test1") is _FakeTarget1 - assert load("test2") is _FakeTarget2 - - -def test_target_load_03(mocker): - """Non-Target will be skipped.""" - mocker.patch("grizzly.target.TARGETS", None) - - class _FakeEntryPoint1: - name = "test1" - - @staticmethod - def load(): - return Target - - class _FakeEntryPoint2: - name = "test2" - - @staticmethod - def load(): - return object - - mocker.patch( - "grizzly.target.iter_entry_points", - lambda _: [_FakeEntryPoint1, _FakeEntryPoint2], - ) - assert set(available()) == {"test1"} - assert load("test1") is Target - - -def test_target_load_04(mocker): - """test load() with name collision""" - mocker.patch("grizzly.target.TARGETS", None) - - class _FakeEntryPoint1: - name = "test" - - @staticmethod - def load(): - return _FakeTarget1 - - class _FakeEntryPoint2: - name = "test" - - @staticmethod - def load(): - return _FakeTarget2 - - mocker.patch( - "grizzly.target.iter_entry_points", - lambda _: [_FakeEntryPoint1, _FakeEntryPoint2], - ) - with pytest.raises(RuntimeError, match=r"Target (.)test\1 already exists"): - available() - - -def test_target_load_05(mocker): - """test load() with broken Target""" - mocker.patch("grizzly.target.TARGETS", None) - - class _FakeEntryPoint1: - name = "test1" - - @staticmethod - def load(): - return Target - - class _FakeEntryPoint2: - name = "test2" - - @staticmethod - def load(): - raise Exception("boo!") - - mocker.patch( - "grizzly.target.iter_entry_points", - lambda _: [_FakeEntryPoint1, _FakeEntryPoint2], - ) - assert set(available()) == {"test1"} - assert load("test1") is Target diff --git a/grizzly/test_args.py b/grizzly/test_args.py index e91ce4ec..d85c0b6f 100644 --- a/grizzly/test_args.py +++ b/grizzly/test_args.py @@ -8,7 +8,7 @@ from .args import GrizzlyArgs -def test_grizzly_args_01(capsys, tmp_path): +def test_grizzly_args_01(capsys, mocker, tmp_path): """test GrizzlyArgs.parse_args()""" # test help with raises(SystemExit): @@ -18,13 +18,19 @@ def test_grizzly_args_01(capsys, tmp_path): # test success fake_bin = tmp_path / "fake.bin" fake_bin.touch() - argp = GrizzlyArgs() - argp._adapters = ["test_adapter"] - assert argp.parse_args(argv=[str(fake_bin), "test_adapter"]) + mocker.patch( + "grizzly.args.scan_plugins", + autospec=True, + side_effect=(["targ1"], ["adpt1"], ["targ1"], ["adpt1"]), + ) + assert GrizzlyArgs().parse_args( + argv=[str(fake_bin), "adpt1", "--platform", "targ1"] + ) -def test_grizzly_args_03(capsys): +def test_grizzly_args_02(capsys, mocker): """test GrizzlyArgs.parse_args() handling binary""" + mocker.patch("grizzly.args.scan_plugins", autospec=True, return_value=["blah"]) # test missing required args with raises(SystemExit): GrizzlyArgs().parse_args(argv=[]) @@ -37,22 +43,23 @@ def test_grizzly_args_03(capsys): assert "error: file not found: 'missing_bin'" in err -def test_grizzly_args_04(capsys, tmp_path): - """test GrizzlyArgs.parse_args() handling adapter""" +def test_grizzly_args_03(capsys, mocker, tmp_path): + """test GrizzlyArgs.parse_args() handling Adapter""" + scan_plugins = mocker.patch("grizzly.args.scan_plugins", autospec=True) fake_bin = tmp_path / "fake.bin" fake_bin.touch() - # no adapters + # no adapters installed + scan_plugins.side_effect = (["targ1"], [], ["targ1"], []) with raises(SystemExit): - GrizzlyArgs().parse_args(argv=[str(fake_bin), "missing"]) + GrizzlyArgs().parse_args(argv=[str(fake_bin), "adpt", "--platform", "targ1"]) _, err = capsys.readouterr() - assert "error: Adapter 'missing' does not exist. No adapters available." in err + assert "error: No Adapters are installed" in err # invalid adapter name - argp = GrizzlyArgs() - argp._adapters = ["a1", "b2"] + scan_plugins.side_effect = (["targ1"], ["a1", "a2"], ["targ1"], ["a1", "a2"]) with raises(SystemExit): - argp.parse_args(argv=[str(fake_bin), "missing"]) + GrizzlyArgs().parse_args(argv=[str(fake_bin), "missing", "--platform", "targ1"]) _, err = capsys.readouterr() - assert "error: Adapter 'missing' does not exist. Available adapters: a1, b2" in err + assert "error: Adapter 'missing' is not installed" in err # TODO: Add CommonArgs tests diff --git a/grizzly/test_main.py b/grizzly/test_main.py index 70de6665..06627883 100644 --- a/grizzly/test_main.py +++ b/grizzly/test_main.py @@ -7,7 +7,7 @@ from sapphire import Sapphire -from .common import Adapter +from .adapter import Adapter from .main import main from .session import Session from .target import Target, TargetLaunchError @@ -42,91 +42,135 @@ def __init__(self): self.xvfb = False -# TODO: these could use call_count checks - - -def test_main_01(mocker): +@mark.parametrize( + "cov, adpt_relaunch, limit, verbose", + [ + # successful run + (False, 0, 0, True), + # successful run (with limit) + (False, 0, 10, True), + # successful run (with coverage) + (True, 0, 0, False), + # relaunch 1 + (False, 1, 0, False), + # relaunch 10 + (False, 10, 0, False), + ], +) +def test_main_01(mocker, cov, adpt_relaunch, limit, verbose): """test main()""" - fake_adapter = mocker.Mock(spec=Adapter) - fake_adapter.NAME = "fake" + fake_adapter = mocker.NonCallableMock(spec_set=Adapter) + fake_adapter.RELAUNCH = adpt_relaunch fake_adapter.TIME_LIMIT = 10 - mocker.patch("grizzly.main.get_adapter", return_value=lambda: fake_adapter) - mocker.patch.dict( - "grizzly.target.TARGETS", values={"fake-target": mocker.Mock(spec=Target)} + fake_target = mocker.NonCallableMock(spec_set=Target) + plugin_loader = mocker.patch("grizzly.main.load_plugin", autospec=True) + plugin_loader.side_effect = ( + mocker.Mock(spec_set=Adapter, return_value=fake_adapter), + mocker.Mock(spec_set=Target, return_value=fake_target), ) fake_session = mocker.patch("grizzly.main.Session", autospec=True) - fake_session.return_value.server = mocker.Mock(spec=Sapphire) + fake_session.return_value.server = mocker.Mock(spec_set=Sapphire) fake_session.EXIT_SUCCESS = Session.EXIT_SUCCESS args = FakeArgs() args.adapter = "fake" - args.input = "fake" args.ignore = ["fake", "fake"] + args.limit = limit args.prefs = "fake" + args.rr = True args.valgrind = True args.xvfb = True - # successful run (with coverage) - fake_adapter.RELAUNCH = 10 - args.coverage = True + args.verbose = verbose + if not verbose: + args.log_level = 20 + args.coverage = cov assert main(args) == Session.EXIT_SUCCESS - assert fake_session.mock_calls[0][-1]["coverage"] - assert fake_session.mock_calls[0][-1]["relaunch"] == 10 - fake_session.reset_mock() - # successful run (without coverage) - fake_adapter.RELAUNCH = 1 - args.coverage = False - assert main(args) == Session.EXIT_SUCCESS - assert not fake_session.mock_calls[0][-1]["coverage"] - assert fake_session.mock_calls[0][-1]["relaunch"] == 1 - fake_session.reset_mock() - # with FM + assert fake_session.mock_calls[0][-1]["coverage"] == cov + if adpt_relaunch: + assert fake_session.mock_calls[0][-1]["relaunch"] == adpt_relaunch + else: + # check default + assert fake_session.mock_calls[0][-1]["relaunch"] == 1000 + assert fake_session.return_value.run.call_count == 1 + assert fake_target.cleanup.call_count == 1 + + +@mark.parametrize( + "reporter", + [ + # Default reporter + None, + # FuzzManager Reporter + "FuzzManager", + # S3FuzzManager Reporter + "S3FuzzManager", + ], +) +def test_main_02(mocker, reporter): + """test main() - test reporters""" + fake_adapter = mocker.NonCallableMock(spec_set=Adapter) fake_adapter.RELAUNCH = 0 - fake_reporter = mocker.patch("grizzly.main.FuzzManagerReporter", autospec=True) - fake_reporter.sanity_check.return_value = True - args.coverage = True - args.input = None - args.log_level = None - args.fuzzmanager = True - args.rr = True - assert main(args) == Session.EXIT_SUCCESS - assert fake_session.mock_calls[0][-1]["coverage"] - assert fake_session.mock_calls[0][-1]["relaunch"] == 1000 - fake_session.reset_mock() - # with S3FM (with iteration limit) - fake_reporter = mocker.patch("grizzly.main.S3FuzzManagerReporter", autospec=True) - fake_reporter.sanity_check.return_value = True - args.fuzzmanager = False - args.limit = 10 - args.s3_fuzzmanager = True + fake_adapter.TIME_LIMIT = 10 + fake_target = mocker.NonCallableMock(spec_set=Target) + plugin_loader = mocker.patch("grizzly.main.load_plugin", autospec=True) + plugin_loader.side_effect = ( + mocker.Mock(spec_set=Adapter, return_value=fake_adapter), + mocker.Mock(spec_set=Target, return_value=fake_target), + ) + fake_session = mocker.patch("grizzly.main.Session", autospec=True) + fake_session.return_value.server = mocker.Mock(spec_set=Sapphire) + fake_session.EXIT_SUCCESS = Session.EXIT_SUCCESS + args = FakeArgs() + args.adapter = "fake" + if reporter == "FuzzManager": + fake_reporter = mocker.patch("grizzly.main.FuzzManagerReporter", autospec=True) + fake_reporter.sanity_check.return_value = True + args.fuzzmanager = True + elif reporter == "S3FuzzManager": + fake_reporter = mocker.patch( + "grizzly.main.S3FuzzManagerReporter", autospec=True + ) + fake_reporter.sanity_check.return_value = True + args.s3_fuzzmanager = True assert main(args) == Session.EXIT_SUCCESS + assert fake_target.cleanup.call_count == 1 -def test_main_02(mocker): - """test main() exit codes""" - fake_adapter = mocker.Mock(spec=Adapter) - fake_adapter.TIME_LIMIT = 10 +@mark.parametrize( + "exit_code, to_raise", + [ + # test user abort + (Session.EXIT_ABORT, KeyboardInterrupt()), + # test launch failure + (Session.EXIT_LAUNCH_FAILURE, TargetLaunchError("test", None)), + ], +) +def test_main_03(mocker, exit_code, to_raise): + """test main() - exit codes""" + fake_adapter = mocker.NonCallableMock(spec_set=Adapter, name="fake") fake_adapter.RELAUNCH = 0 - mocker.patch("grizzly.main.get_adapter", return_value=lambda: fake_adapter) - mocker.patch.dict( - "grizzly.target.TARGETS", values={"fake-target": mocker.Mock(spec=Target)} + fake_adapter.TIME_LIMIT = 10 + fake_target = mocker.NonCallableMock(spec_set=Target) + plugin_loader = mocker.patch("grizzly.main.load_plugin", autospec=True) + plugin_loader.side_effect = ( + mocker.Mock(spec_set=Adapter, return_value=fake_adapter), + mocker.Mock(spec_set=Target, return_value=fake_target), ) fake_session = mocker.patch("grizzly.main.Session", autospec=True) fake_session.EXIT_SUCCESS = Session.EXIT_SUCCESS fake_session.EXIT_ABORT = Session.EXIT_ABORT fake_session.EXIT_ARGS = fake_session.EXIT_ARGS = Session.EXIT_ARGS fake_session.EXIT_LAUNCH_FAILURE = Session.EXIT_LAUNCH_FAILURE - fake_session.return_value.server = mocker.Mock(spec=Sapphire) + fake_session.return_value.server = mocker.Mock(spec_set=Sapphire) args = FakeArgs() args.adapter = "fake" args.input = "fake" - fake_session.return_value.run.side_effect = KeyboardInterrupt - assert main(args) == Session.EXIT_ABORT - # test TargetLaunchError - fake_session.return_value.run.side_effect = TargetLaunchError("test", None) - assert main(args) == Session.EXIT_LAUNCH_FAILURE + fake_session.return_value.run.side_effect = to_raise + assert main(args) == exit_code + assert fake_target.cleanup.call_count == 1 @mark.parametrize( - "arg_testlimit, arg_timeout, result", + "arg_testlimit, arg_timeout, exit_code", [ # use default test time limit and timeout values (None, None, Session.EXIT_SUCCESS), @@ -140,22 +184,23 @@ def test_main_02(mocker): (11, 10, Session.EXIT_ARGS), ], ) -def test_main_03(mocker, arg_testlimit, arg_timeout, result): - """test main() time-limit and timeout""" - fake_adapter = mocker.Mock(spec=Adapter) - fake_adapter.NAME = "fake" +def test_main_04(mocker, arg_testlimit, arg_timeout, exit_code): + """test main() - time-limit and timeout""" + fake_adapter = mocker.NonCallableMock(spec_set=Adapter, name="fake") fake_adapter.RELAUNCH = 1 fake_adapter.TIME_LIMIT = 10 - mocker.patch("grizzly.main.get_adapter", return_value=lambda: fake_adapter) - mocker.patch.dict( - "grizzly.target.TARGETS", values={"fake-target": mocker.Mock(spec=Target)} + fake_target = mocker.NonCallableMock(spec_set=Target) + plugin_loader = mocker.patch("grizzly.main.load_plugin", autospec=True) + plugin_loader.side_effect = ( + mocker.Mock(spec_set=Adapter, return_value=fake_adapter), + mocker.Mock(spec_set=Target, return_value=fake_target), ) fake_session = mocker.patch("grizzly.main.Session", autospec=True) - fake_session.return_value.server = mocker.Mock(spec=Sapphire) + fake_session.return_value.server = mocker.Mock(spec_set=Sapphire) fake_session.EXIT_ARGS = Session.EXIT_ARGS fake_session.EXIT_SUCCESS = Session.EXIT_SUCCESS args = FakeArgs() args.adapter = "fake" args.time_limit = arg_testlimit args.timeout = arg_timeout - assert main(args) == result + assert main(args) == exit_code diff --git a/grizzly/test_session.py b/grizzly/test_session.py index 6ab65908..01818c56 100644 --- a/grizzly/test_session.py +++ b/grizzly/test_session.py @@ -10,26 +10,25 @@ from sapphire import SERVED_ALL, SERVED_NONE, SERVED_TIMEOUT, Sapphire -from .common import Adapter, Report, Reporter, RunResult, Status +from .adapter import Adapter +from .common import Report, Reporter, RunResult, Status from .session import LogOutputLimiter, Session, SessionError from .target import Target, TargetLaunchError class SimpleAdapter(Adapter): - NAME = "simple" - def __init__(self, use_harness, remaining=None): - super().__init__() + super().__init__("simple") self.remaining = remaining self._use_harness = use_harness - def setup(self, input_path, server_map): + def setup(self, input_path, _server_map): if self._use_harness: self.enable_harness() self.fuzz["input"] = input_path - def generate(self, testcase, server_map): - assert testcase.adapter_name == self.NAME + def generate(self, testcase, _server_map): + assert testcase.adapter_name == self.name testcase.input_fname = self.fuzz["input"] testcase.add_from_data("test", testcase.landing_page) if self.remaining is not None: @@ -206,9 +205,7 @@ def test_session_04(mocker, tmp_path): """test Adapter creating invalid test case""" class FuzzAdapter(Adapter): - NAME = "fuzz" - - def generate(self, testcase, server_map): + def generate(self, _testcase, _server_map): pass Status.PATH = str(tmp_path) @@ -216,7 +213,7 @@ def generate(self, testcase, server_map): server.serve_path.return_value = (SERVED_NONE, []) target = mocker.Mock(spec=Target, launch_timeout=30, prefs=None) target.monitor.launches = 1 - with Session(FuzzAdapter(), None, server, target) as session: + with Session(FuzzAdapter("fuzz"), None, server, target) as session: with raises(SessionError, match="Test case is missing landing page"): session.run([], 10) diff --git a/setup.cfg b/setup.cfg index 9a61d72b..d7ea5725 100644 --- a/setup.cfg +++ b/setup.cfg @@ -30,8 +30,7 @@ install_requires = psutil >= 4.4.0 packages = grizzly - grizzly.adapters - grizzly.adapters.NoOpAdapter + grizzly.adapter grizzly.common grizzly.reduce grizzly.reduce.strategies @@ -45,6 +44,8 @@ zip_safe = False [options.entry_points] console_scripts = grizzly.status = grizzly.common.status_reporter:main +grizzly_adapters = + no-op = grizzly.adapter.no_op_adapter:NoOpAdapter grizzly_targets = ffpuppet = grizzly.target.puppet_target:PuppetTarget grizzly_reduce_strategies = From bfa8a42001afe51868a75321b0ad2ba3b17dcf82 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 30 Mar 2021 13:27:12 -0700 Subject: [PATCH 241/531] [tests] Update coverage exclusion list --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index d467d9d6..785c7467 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,7 +17,7 @@ omit = [ [tool.coverage.report] exclude_lines = [ "@(abc.)?abstract*", - "except ImportError:", + "except ImportError(.*):", "if __name__ == .__main__.:", "pragma: no cover", ] From b12ff6a833ed226c3df681982d02c90e153bf71c Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 30 Mar 2021 13:48:36 -0700 Subject: [PATCH 242/531] [tests] Add no-op Adapter test --- grizzly/adapter/no_op_adapter/test_no_op.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 grizzly/adapter/no_op_adapter/test_no_op.py diff --git a/grizzly/adapter/no_op_adapter/test_no_op.py b/grizzly/adapter/no_op_adapter/test_no_op.py new file mode 100644 index 00000000..cb40661d --- /dev/null +++ b/grizzly/adapter/no_op_adapter/test_no_op.py @@ -0,0 +1,19 @@ +# coding=utf-8 +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +from grizzly.common import TestCase + +from . import NoOpAdapter + + +def test_no_op_01(): + """test a simple Adapter""" + adapter = NoOpAdapter("no-op") + adapter.setup(None, None) + test = TestCase("a", "b", adapter.name) + assert not test.data_size + assert not test.contains("a") + adapter.generate(test, None) + assert test.contains("a") From 08fa611a43ae4f2ee397a67ad814d41addbe0f4f Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 30 Mar 2021 15:59:02 -0700 Subject: [PATCH 243/531] Move configure_logging() to common.utils --- grizzly/common/__init__.py | 3 ++- grizzly/common/test_utils.py | 27 ++++++++++++++++++++++++++- grizzly/common/utils.py | 27 ++++++++++++++++++++++++--- grizzly/main.py | 19 +++---------------- grizzly/reduce/core.py | 3 +-- grizzly/replay/replay.py | 3 +-- 6 files changed, 57 insertions(+), 25 deletions(-) diff --git a/grizzly/common/__init__.py b/grizzly/common/__init__.py index edde63bd..bb15f8ce 100644 --- a/grizzly/common/__init__.py +++ b/grizzly/common/__init__.py @@ -14,11 +14,12 @@ from .runner import Runner, RunResult from .status import Status from .storage import TestCase, TestCaseLoadFailure, TestFile, TestFileExists -from .utils import grz_tmp +from .utils import configure_logging, grz_tmp __all__ = ( "FilesystemReporter", "FuzzManagerReporter", + "configure_logging", "grz_tmp", "IOManager", "Report", diff --git a/grizzly/common/test_utils.py b/grizzly/common/test_utils.py index ec823ef7..afa617ed 100644 --- a/grizzly/common/test_utils.py +++ b/grizzly/common/test_utils.py @@ -2,8 +2,11 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. +from logging import DEBUG, INFO -from .utils import grz_tmp +from pytest import mark + +from .utils import configure_logging, grz_tmp def test_grz_tmp_01(mocker, tmp_path): @@ -22,3 +25,25 @@ def test_grz_tmp_01(mocker, tmp_path): path = grz_tmp("test1", "test2") assert path == str(tmp_path / "grizzly" / "test1" / "test2") assert (tmp_path / "grizzly" / "test1" / "test2").is_dir() + + +@mark.parametrize( + "env, log_level", + [ + # default log level + ("0", INFO), + # debug log level + ("0", DEBUG), + # enable debug log level via env + ("1", INFO), + # enable debug log level via env + ("TRUE", INFO), + ], +) +def test_configure_logging_01(mocker, env, log_level): + """test configure_logging()""" + config = mocker.patch("grizzly.common.utils.basicConfig", autospec=True) + mocker.patch("grizzly.common.utils.getenv", autospec=True, return_value=env) + configure_logging(log_level) + assert config.call_count == 1 + assert config.call_args.kwargs["level"] == (DEBUG if env != "0" else log_level) diff --git a/grizzly/common/utils.py b/grizzly/common/utils.py index cea57c09..e0fbf9cf 100644 --- a/grizzly/common/utils.py +++ b/grizzly/common/utils.py @@ -2,12 +2,12 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. - -from os import makedirs +from logging import DEBUG, basicConfig +from os import getenv, makedirs from os.path import join as pathjoin from tempfile import gettempdir -__all__ = ("ConfigError", "grz_tmp", "TIMEOUT_DELAY") +__all__ = ("ConfigError", "configure_logging", "grz_tmp", "TIMEOUT_DELAY") __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] @@ -24,6 +24,27 @@ def __init__(self, message, exit_code): self.exit_code = exit_code +def configure_logging(log_level): + """Configure log output level and formatting. + + Args: + log_level (int): Set log level. + + Returns: + None + """ + # allow force enabling log_level via environment + if getenv("DEBUG", "0").lower() in ("1", "true"): + log_level = DEBUG + if log_level == DEBUG: + date_fmt = None + log_fmt = "%(asctime)s %(levelname).1s %(name)s | %(message)s" + else: + date_fmt = "%Y-%m-%d %H:%M:%S" + log_fmt = "[%(asctime)s] %(message)s" + basicConfig(format=log_fmt, datefmt=date_fmt, level=log_level) + + def grz_tmp(*subdir): path = pathjoin(gettempdir(), "grizzly", *subdir) makedirs(path, exist_ok=True) diff --git a/grizzly/main.py b/grizzly/main.py index e9403840..50e98118 100644 --- a/grizzly/main.py +++ b/grizzly/main.py @@ -2,8 +2,8 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -from logging import DEBUG, basicConfig, getLogger -from os import getcwd, getenv, getpid +from logging import DEBUG, getLogger +from os import getcwd, getpid from os.path import join as pathjoin from sapphire import Sapphire @@ -15,7 +15,7 @@ FuzzManagerReporter, S3FuzzManagerReporter, ) -from .common.utils import TIMEOUT_DELAY +from .common.utils import TIMEOUT_DELAY, configure_logging from .session import Session from .target import Target, TargetLaunchError, TargetLaunchTimeout @@ -26,19 +26,6 @@ LOG = getLogger(__name__) -def configure_logging(log_level): - # TODO: move this to grizzly.common.utils - if getenv("DEBUG") == "1": - log_level = DEBUG - if log_level == DEBUG: - date_fmt = None - log_fmt = "%(asctime)s %(levelname).1s %(name)s | %(message)s" - else: - date_fmt = "%Y-%m-%d %H:%M:%S" - log_fmt = "[%(asctime)s] %(message)s" - basicConfig(format=log_fmt, datefmt=date_fmt, level=log_level) - - def main(args): configure_logging(args.log_level) LOG.info("Starting Grizzly (%d)", getpid()) diff --git a/grizzly/reduce/core.py b/grizzly/reduce/core.py index 51dbeaef..c6d96004 100644 --- a/grizzly/reduce/core.py +++ b/grizzly/reduce/core.py @@ -22,8 +22,7 @@ from ..common.plugins import load as load_plugin from ..common.reporter import FilesystemReporter, FuzzManagerReporter from ..common.storage import TestCaseLoadFailure, TestFile -from ..common.utils import ConfigError, grz_tmp -from ..main import configure_logging +from ..common.utils import ConfigError, configure_logging, grz_tmp from ..replay import ReplayManager from ..session import Session from ..target import Target, TargetLaunchError, TargetLaunchTimeout diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 200736f5..5341c19f 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -18,8 +18,7 @@ from ..common.runner import Runner, RunResult from ..common.status import Status from ..common.storage import TestCase, TestCaseLoadFailure, TestFile -from ..common.utils import TIMEOUT_DELAY, ConfigError, grz_tmp -from ..main import configure_logging +from ..common.utils import TIMEOUT_DELAY, ConfigError, configure_logging, grz_tmp from ..session import Session from ..target import Target, TargetLaunchError, TargetLaunchTimeout From acbf445d72ef852aaac7bd77e6dd51e09824830b Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 30 Mar 2021 16:04:04 -0700 Subject: [PATCH 244/531] Update plug-in comments and help --- grizzly/args.py | 9 +++++---- grizzly/common/plugins.py | 11 ++++++----- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/grizzly/args.py b/grizzly/args.py index 52826251..683d83a2 100644 --- a/grizzly/args.py +++ b/grizzly/args.py @@ -92,8 +92,8 @@ def __init__(self): self.launcher_grp.add_argument( "--platform", default="ffpuppet", - help="Platforms available: %s (default: %%(default)s)" - % ", ".join(scan_plugins("grizzly_targets")), + help="Installed Platforms (Targets): %s (default: %%(default)s)" + % ", ".join(sorted(scan_plugins("grizzly_targets"))), ) self.launcher_grp.add_argument("-p", "--prefs", help="prefs.js file to use") self.launcher_grp.add_argument( @@ -161,7 +161,7 @@ def sanity_check(self, args): super().sanity_check(args) # pylint: disable=no-member targets = scan_plugins("grizzly_targets") if not targets: - self.parser.error("No Target platforms are installed") + self.parser.error("No Platforms (Targets) are installed") if "binary" not in self._sanity_skip and not isfile(args.binary): self.parser.error("file not found: %r" % args.binary) @@ -225,7 +225,8 @@ def __init__(self): self._sanity_skip.add("tool") self.parser.add_argument( "adapter", - help="Available adapters: %s" % ", ".join(scan_plugins("grizzly_adapters")), + help="Installed Adapters: %s" + % ", ".join(sorted(scan_plugins("grizzly_adapters"))), ) self.parser.add_argument( "--enable-profiling", diff --git a/grizzly/common/plugins.py b/grizzly/common/plugins.py index e17ca9f0..ba1cde90 100644 --- a/grizzly/common/plugins.py +++ b/grizzly/common/plugins.py @@ -20,12 +20,13 @@ def load(name, group, base_type): """Load a plug-in. Args: - name (str): - group (str): - base_type (*): Used to validate loaded objects. + name (str): Name of entry point to load. + group (str): Group containing entry point. + base_type (type): Used to validate loaded objects. Returns: *: Python object. """ + assert isinstance(base_type, type) for entry in iter_entry_points(group): if entry.name == name: LOG.debug("loading %r (%s)", name, base_type.__name__) @@ -39,13 +40,13 @@ def load(name, group, base_type): def scan(group): - """Scan for available plug-ins. + """Scan for installed plug-ins. Args: group (str): Entry point group to scan. Returns: - list: Names of available entry points. + list: Names of installed entry points. """ found = list() LOG.debug("scanning %r", group) From 7099e1c8aa48ddf5220e099fff682e9e3527f91a Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 31 Mar 2021 16:27:55 -0700 Subject: [PATCH 245/531] Update and add tests for CommonArgs and GrizzlyArgs * This also includes some refactoring --- grizzly/args.py | 47 +++++------ grizzly/reduce/test_main.py | 6 +- grizzly/replay/args.py | 23 ++---- grizzly/replay/replay.py | 6 +- grizzly/replay/test_args.py | 61 +++++++++++++++ grizzly/replay/test_main.py | 61 +-------------- grizzly/test_args.py | 150 ++++++++++++++++++++++++++++++++---- 7 files changed, 232 insertions(+), 122 deletions(-) create mode 100644 grizzly/replay/test_args.py diff --git a/grizzly/args.py b/grizzly/args.py index 683d83a2..faf5ea62 100644 --- a/grizzly/args.py +++ b/grizzly/args.py @@ -4,7 +4,6 @@ # file, You can obtain one at http://mozilla.org/MPL/2.0/. from argparse import ArgumentParser, HelpFormatter from logging import CRITICAL, DEBUG, ERROR, INFO, WARNING -from os import listdir from os.path import exists, isdir, isfile from .common.plugins import scan as scan_plugins @@ -34,7 +33,6 @@ class CommonArgs: IGNORE = ("log-limit", "timeout") def __init__(self): - super().__init__() # log levels for console logging self._level_map = { "CRIT": CRITICAL, @@ -103,6 +101,9 @@ def __init__(self): help="Number of iterations performed before relaunching the browser" " (default: %(default)s)", ) + self.launcher_grp.add_argument( + "--rr", action="store_true", help="Use rr (Linux only)" + ) self.launcher_grp.add_argument( "--time-limit", type=int, @@ -157,40 +158,32 @@ def parse_args(self, argv=None): return args def sanity_check(self, args): - if hasattr(super(), "sanity_check"): - super().sanity_check(args) # pylint: disable=no-member targets = scan_plugins("grizzly_targets") if not targets: self.parser.error("No Platforms (Targets) are installed") if "binary" not in self._sanity_skip and not isfile(args.binary): - self.parser.error("file not found: %r" % args.binary) + self.parser.error("file not found: %r" % (args.binary,)) # sanitize ignore list args.ignore = {arg.lower() for arg in args.ignore} for ignore in args.ignore: if ignore not in self.IGNORABLE: - self.parser.error("Unrecognized ignore value: %s" % ignore) - - if "input" not in self._sanity_skip and args.input: - if not exists(args.input): - self.parser.error("%r does not exist" % args.input) - elif isdir(args.input) and not listdir(args.input): - self.parser.error("%r is empty" % args.input) + self.parser.error("Unrecognized ignore value %r" % (ignore,)) # check log level log_level = self._level_map.get(args.log_level.upper(), None) if log_level is None: - self.parser.error("Invalid log-level %r" % args.log_level) + self.parser.error("Invalid log-level %r" % (args.log_level,)) args.log_level = log_level if args.log_limit < 0: self.parser.error("--log-limit must be >= 0") - args.log_limit *= 1048576 + args.log_limit *= 1_048_576 if args.memory < 0: - self.parser.error("-m/--memory must be >= 0") - args.memory *= 1048576 + self.parser.error("--memory must be >= 0") + args.memory *= 1_048_576 if args.relaunch < 1: self.parser.error("--relaunch must be >= 1") @@ -198,21 +191,24 @@ def sanity_check(self, args): if args.extension: for ext in args.extension: if not exists(ext): - self.parser.error("%r does not exist" % ext) + self.parser.error("%r does not exist" % (ext,)) if not isdir(ext) or (isfile(ext) and ext.endswith(".xpi")): self.parser.error("Extension must be a folder or .xpi") if args.platform not in targets: - self.parser.error("Unsupported platform %r" % args.platform) + self.parser.error("Platform %r not installed" % (args.platform,)) if args.prefs and not isfile(args.prefs): - self.parser.error("-p/--prefs not found %r" % args.prefs) + self.parser.error("--prefs file not found") + + if args.rr and args.valgrind: + self.parser.error("--rr and --valgrind are mutually exclusive") if args.time_limit is not None and args.time_limit < 1: - self.parser.error("--time-limit must be at least 1") + self.parser.error("--time-limit must be >= 1") if args.timeout is not None and args.timeout < 1: - self.parser.error("--timeout must be at least 1") + self.parser.error("--timeout must be >= 1") if "tool" not in self._sanity_skip: if args.tool is not None and not args.fuzzmanager: @@ -256,9 +252,6 @@ def __init__(self): self.launcher_grp.add_argument( "--coverage", action="store_true", help="Enable coverage collection" ) - self.launcher_grp.add_argument( - "--rr", action="store_true", help="Use RR (Linux only)" - ) self.reporter_grp.add_argument( "-c", @@ -292,6 +285,9 @@ def sanity_check(self, args): "--fuzzmanager and --s3-fuzzmanager are mutually exclusive" ) + if args.input and not exists(args.input): + self.parser.error("%r does not exist" % (args.input,)) + if args.limit < 0: self.parser.error("--limit must be >= 0 (0 = no limit)") @@ -299,6 +295,3 @@ def sanity_check(self, args): self.parser.error( "--tool can only be given with --fuzzmanager/--s3-fuzzmanager" ) - - if args.rr and args.valgrind: - self.parser.error("'--rr' and '--valgrind' cannot be used together") diff --git a/grizzly/reduce/test_main.py b/grizzly/reduce/test_main.py index e83ab372..c558ea87 100644 --- a/grizzly/reduce/test_main.py +++ b/grizzly/reduce/test_main.py @@ -24,10 +24,10 @@ def test_args_01(capsys, tmp_path, mocker): """test args in common with grizzly.replay""" # pylint: disable=import-outside-toplevel - from ..replay.test_main import test_args_01 as real_test + from ..replay.test_args import test_replay_args_01 as real_test - mocker.patch("grizzly.replay.test_main.ReplayArgs", new=ReduceArgs) - real_test(capsys, tmp_path) + mocker.patch("grizzly.replay.test_args.ReplayArgs", new=ReduceArgs) + real_test(capsys, mocker, tmp_path) def test_args_02(tmp_path): diff --git a/grizzly/replay/args.py b/grizzly/replay/args.py index 6757ae11..9a110c2e 100644 --- a/grizzly/replay/args.py +++ b/grizzly/replay/args.py @@ -2,7 +2,7 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -from os.path import isfile +from os.path import exists, isfile from ..args import CommonArgs @@ -76,10 +76,6 @@ def __init__(self): "0 == Oldest, n-1 == Newest (default: run all testcases)", ) - self.launcher_grp.add_argument( - "--rr", action="store_true", help="Use RR (Linux only)" - ) - self.reporter_grp.add_argument( "--include-test", action="store_true", @@ -90,22 +86,19 @@ def sanity_check(self, args): super().sanity_check(args) if args.any_crash and args.sig is not None: - self.parser.error("signature is ignored when running with '--any-crash'") + self.parser.error("signature is ignored when running with --any-crash") if args.idle_threshold and args.idle_delay <= 0: - self.parser.error("'--idle-delay' value must be positive") + self.parser.error("--idle-delay value must be positive") - if args.min_crashes < 1: - self.parser.error("'--min-crashes' value must be positive") + if "input" not in self._sanity_skip and not exists(args.input): + self.parser.error("%r does not exist" % (args.input,)) - if args.no_harness: - args.relaunch = 1 + if args.min_crashes < 1: + self.parser.error("--min-crashes value must be positive") if args.repeat < 1: - self.parser.error("'--repeat' value must be positive") - - if args.rr and args.valgrind: - self.parser.error("'--rr' and '--valgrind' cannot be used together") + self.parser.error("--repeat value must be positive") if args.sig is not None and not isfile(args.sig): self.parser.error("signature file not found: %r" % (args.sig,)) diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 5341c19f..71807200 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -587,7 +587,11 @@ def main(cls, args): ) LOG.info("Using time limit: %ds, timeout: %ds", time_limit, timeout) repeat = max(args.min_crashes, args.repeat) - relaunch = min(args.relaunch, repeat) + if args.no_harness: + LOG.debug("no-harness enabled, forcing relaunch=1") + relaunch = 1 + else: + relaunch = min(args.relaunch, repeat) LOG.info( "Repeat: %d, Minimum crashes: %d, Relaunch %d", repeat, diff --git a/grizzly/replay/test_args.py b/grizzly/replay/test_args.py new file mode 100644 index 00000000..8935faf0 --- /dev/null +++ b/grizzly/replay/test_args.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +""" +unit tests for grizzly.replay.args +""" +from pytest import mark, raises + +from .args import ReplayArgs + + +def test_replay_args_01(capsys, mocker, tmp_path): + """test parsing args""" + mocker.patch("grizzly.args.scan_plugins", autospec=True, return_value=["targ1"]) + # missing args tests + with raises(SystemExit): + ReplayArgs().parse_args([]) + # specified prefs.js missing + exe = tmp_path / "binary" + exe.touch() + # test success + ReplayArgs().parse_args([str(exe), str(exe), "--platform", "targ1"]) + # test missing input + with raises(SystemExit): + ReplayArgs().parse_args(argv=[str(exe), "missing", "--platform", "targ1"]) + assert "error: 'missing' does not exist" in capsys.readouterr()[-1] + + +@mark.parametrize( + "args, msg", + [ + # test any-crash with signature + ( + ["--any-crash", "--sig", "x"], + "error: signature is ignored when running with --any-crash", + ), + # test in valid idle_delay + ( + ["--idle-threshold", "1", "--idle-delay", "-1"], + "error: --idle-delay value must be positive", + ), + # test invalid min-crashes value + (["--min-crashes", "0"], "error: --min-crashes value must be positive"), + # test invalid repeat value + (["--repeat", "-1"], "error: --repeat value must be positive"), + # test missing signature file + (["--sig", "missing"], "error: signature file not found"), + ], +) +def test_replay_args_02(capsys, mocker, tmp_path, args, msg): + """test CommonArgs.parse_args() - sanity checks""" + target = "target1" + mocker.patch("grizzly.args.scan_plugins", autospec=True, return_value=[target]) + fake_bin = tmp_path / "fake.bin" + fake_bin.touch() + with raises(SystemExit): + ReplayArgs().parse_args( + argv=[str(fake_bin), str(fake_bin), "--platform", target] + args + ) + assert msg in capsys.readouterr()[-1] diff --git a/grizzly/replay/test_main.py b/grizzly/replay/test_main.py index 0532778f..25b3610b 100644 --- a/grizzly/replay/test_main.py +++ b/grizzly/replay/test_main.py @@ -8,74 +8,17 @@ """ from shutil import rmtree -from pytest import mark, raises +from pytest import mark from sapphire import SERVED_ALL from ..common import Report, TestCase, TestCaseLoadFailure -from ..replay import ReplayManager -from ..replay.args import ReplayArgs from ..session import Session from ..target import Target, TargetLaunchError, TargetLaunchTimeout +from .replay import ReplayManager from .test_replay import _fake_save_logs -def test_args_01(capsys, tmp_path): - """test parsing args""" - # missing args tests - with raises(SystemExit): - ReplayArgs().parse_args([]) - # specified prefs.js missing - exe = tmp_path / "binary" - exe.touch() - inp = tmp_path / "input" - inp.mkdir() - (inp / "somefile").touch() - (inp / "test_info.json").touch() - with raises(SystemExit): - ReplayArgs().parse_args([str(exe), str(inp / "somefile"), "--prefs", "missing"]) - assert "error: -p/--prefs not found 'missing'" in capsys.readouterr()[-1] - # test case directory - (inp / "prefs.js").touch() - ReplayArgs().parse_args([str(exe), str(inp)]) - # test case file - ReplayArgs().parse_args( - [str(exe), str(inp / "somefile"), "--prefs", str(inp / "prefs.js")] - ) - # test negative min-crashes value - with raises(SystemExit): - ReplayArgs().parse_args([str(exe), str(inp), "--min-crashes", "-1"]) - assert "error: '--min-crashes' value must be positive" in capsys.readouterr()[-1] - # test negative repeat value - with raises(SystemExit): - ReplayArgs().parse_args([str(exe), str(inp), "--repeat", "-1"]) - assert "error: '--repeat' value must be positive" in capsys.readouterr()[-1] - # test missing signature file - with raises(SystemExit): - ReplayArgs().parse_args([str(exe), str(inp), "--sig", "missing"]) - assert "error: signature file not found" in capsys.readouterr()[-1] - # test any crash and signature - with raises(SystemExit): - ReplayArgs().parse_args([str(exe), str(inp), "--any-crash", "--sig", "x"]) - assert ( - "error: signature is ignored when running with '--any-crash'" - in capsys.readouterr()[-1] - ) - # test multiple debuggers - with raises(SystemExit): - ReplayArgs().parse_args([str(exe), str(inp), "--rr", "--valgrind"]) - assert "'--rr' and '--valgrind' cannot be used together" in capsys.readouterr()[-1] - # test idle args - with raises(SystemExit): - ReplayArgs().parse_args( - [str(exe), str(inp), "--idle-threshold", "1", "--idle-delay", "0"] - ) - assert "'--idle-delay' value must be positive" in capsys.readouterr()[-1] - # force relaunch == 1 with --no-harness - args = ReplayArgs().parse_args([str(exe), str(inp), "--no-harness"]) - assert args.relaunch == 1 - - def test_main_01(mocker, tmp_path): """test ReplayManager.main()""" # This is a typical scenario - a test that reproduces results ~50% of the time. diff --git a/grizzly/test_args.py b/grizzly/test_args.py index d85c0b6f..7dfed9f4 100644 --- a/grizzly/test_args.py +++ b/grizzly/test_args.py @@ -3,19 +3,104 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # pylint: disable=protected-access -from pytest import raises +from pytest import mark, raises -from .args import GrizzlyArgs +from .args import CommonArgs, GrizzlyArgs -def test_grizzly_args_01(capsys, mocker, tmp_path): - """test GrizzlyArgs.parse_args()""" +def test_common_args_01(capsys, mocker): + """test CommonArgs.parse_args()""" + mocker.patch("grizzly.args.scan_plugins", autospec=True, return_value=[]) # test help with raises(SystemExit): - GrizzlyArgs().parse_args(argv=["-h"]) - out, _ = capsys.readouterr() - assert "For addition help check out the wiki" in out + CommonArgs().parse_args(argv=["-h"]) + assert "For addition help check out the wiki" in capsys.readouterr()[0] + # test empty args + with raises(SystemExit): + CommonArgs().parse_args(argv=[]) + assert "the following arguments are required: binary" in capsys.readouterr()[-1] + + +def test_common_args_01a(capsys, mocker, tmp_path): + """test CommonArgs.parse_args()""" + mocker.patch("grizzly.args.scan_plugins", autospec=True, return_value=["targ1"]) + fake_bin = tmp_path / "fake.bin" + fake_bin.touch() + # test with missing bin + with raises(SystemExit): + CommonArgs().parse_args(argv=["missing-bin"]) + assert "error: file not found: 'missing-bin'" in capsys.readouterr()[-1] # test success + CommonArgs().parse_args(argv=[str(fake_bin), "--platform", "targ1"]) + # test invalid extension + with raises(SystemExit): + CommonArgs().parse_args(argv=[str(fake_bin), "--extension", str(fake_bin)]) + assert "error: Extension must be a folder or .xpi" in capsys.readouterr()[-1] + + +@mark.parametrize( + "args, msg, targets", + [ + # test no installed platforms + ([], "error: No Platforms (Targets) are installed", []), + # test invalid ignore value + (["--ignore", "bad"], "error: Unrecognized ignore value 'bad'", ["targ1"]), + # test invalid log level + (["--log-level", "bad"], "error: Invalid log-level 'bad'", ["targ1"]), + # test invalid log limit + (["--log-limit", "-1"], "error: --log-limit must be >= 0", ["targ1"]), + # test invalid memory limit + (["--memory", "-1"], "error: --memory must be >= 0", ["targ1"]), + # test invalid relaunch value + (["--relaunch", "0"], "error: --relaunch must be >= 1", ["targ1"]), + # test missing extension + (["--extension", "missing"], "error: 'missing' does not exist", ["targ1"]), + # test invalid platform/target + (["--platform", "bad"], "error: Platform 'bad' not installed", ["targ1"]), + # test invalid prefs file + ( + ["--platform", "targ1", "--prefs", "bad"], + "error: --prefs file not found", + ["targ1"], + ), + # test invalid time-limit + ( + ["--platform", "targ1", "--time-limit", "-1"], + "error: --time-limit must be >= 1", + ["targ1"], + ), + # test invalid timeout + ( + ["--platform", "targ1", "--timeout", "-1"], + "error: --timeout must be >= 1", + ["targ1"], + ), + # test invalid tool usage + ( + ["--platform", "targ1", "--tool", "x"], + "error: --tool can only be given with --fuzzmanager", + ["targ1"], + ), + # test enabling both rr and Valgrind + ( + ["--platform", "targ1", "--rr", "--valgrind"], + "error: --rr and --valgrind are mutually exclusive", + ["targ1"], + ), + ], +) +def test_common_args_02(capsys, mocker, tmp_path, args, msg, targets): + """test CommonArgs.parse_args()""" + mocker.patch("grizzly.args.scan_plugins", autospec=True, return_value=targets) + fake_bin = tmp_path / "fake.bin" + fake_bin.touch() + with raises(SystemExit): + CommonArgs().parse_args(argv=[str(fake_bin)] + args) + assert msg in capsys.readouterr()[-1] + + +def test_grizzly_args_01(mocker, tmp_path): + """test GrizzlyArgs.parse_args() - success""" fake_bin = tmp_path / "fake.bin" fake_bin.touch() mocker.patch( @@ -29,22 +114,21 @@ def test_grizzly_args_01(capsys, mocker, tmp_path): def test_grizzly_args_02(capsys, mocker): - """test GrizzlyArgs.parse_args() handling binary""" + """test GrizzlyArgs.parse_args() - handling binary""" mocker.patch("grizzly.args.scan_plugins", autospec=True, return_value=["blah"]) # test missing required args with raises(SystemExit): GrizzlyArgs().parse_args(argv=[]) - _, err = capsys.readouterr() + err = capsys.readouterr()[-1] assert "the following arguments are required: binary, adapter" in err # test missing binary with raises(SystemExit): GrizzlyArgs().parse_args(argv=["missing_bin", "adapter"]) - _, err = capsys.readouterr() - assert "error: file not found: 'missing_bin'" in err + assert "error: file not found: 'missing_bin'" in capsys.readouterr()[-1] def test_grizzly_args_03(capsys, mocker, tmp_path): - """test GrizzlyArgs.parse_args() handling Adapter""" + """test GrizzlyArgs.parse_args() - handling Adapter""" scan_plugins = mocker.patch("grizzly.args.scan_plugins", autospec=True) fake_bin = tmp_path / "fake.bin" fake_bin.touch() @@ -52,14 +136,46 @@ def test_grizzly_args_03(capsys, mocker, tmp_path): scan_plugins.side_effect = (["targ1"], [], ["targ1"], []) with raises(SystemExit): GrizzlyArgs().parse_args(argv=[str(fake_bin), "adpt", "--platform", "targ1"]) - _, err = capsys.readouterr() - assert "error: No Adapters are installed" in err + assert "error: No Adapters are installed" in capsys.readouterr()[-1] # invalid adapter name scan_plugins.side_effect = (["targ1"], ["a1", "a2"], ["targ1"], ["a1", "a2"]) with raises(SystemExit): GrizzlyArgs().parse_args(argv=[str(fake_bin), "missing", "--platform", "targ1"]) - _, err = capsys.readouterr() - assert "error: Adapter 'missing' is not installed" in err + assert "error: Adapter 'missing' is not installed" in capsys.readouterr()[-1] -# TODO: Add CommonArgs tests +@mark.parametrize( + "args, msg", + [ + # test invalid collect value + (["--collect", "0"], "error: --collect must be greater than 0"), + # test enabling both fuzzmanager and s3-fuzzmanager reporters + ( + ["--fuzzmanager", "--s3-fuzzmanager"], + "error: --fuzzmanager and --s3-fuzzmanager are mutually exclusive", + ), + # test missing input + (["--input", "missing"], "error: 'missing' does not exist"), + # test invalid limit value + (["--limit", "-1"], "error: --limit must be >= 0 (0 = no limit)"), + # test tool + ( + ["--tool", "x"], + "error: --tool can only be given with --fuzzmanager/--s3-fuzzmanager", + ), + ], +) +def test_grizzly_args_04(capsys, mocker, tmp_path, args, msg): + """test CommonArgs.parse_args()""" + mocker.patch( + "grizzly.args.scan_plugins", + autospec=True, + side_effect=["targ1", "adpt", "targ1", "adpt"], + ) + fake_bin = tmp_path / "fake.bin" + fake_bin.touch() + with raises(SystemExit): + GrizzlyArgs().parse_args( + argv=[str(fake_bin), "adpt", "--platform", "targ1"] + args + ) + assert msg in capsys.readouterr()[-1] From 7963df30d148af12907c0bf3c00f41c7db15c761 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 31 Mar 2021 19:39:28 -0700 Subject: [PATCH 246/531] [tests] Fix tests on Python version < 3.8 --- grizzly/common/test_utils.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/grizzly/common/test_utils.py b/grizzly/common/test_utils.py index afa617ed..46d2491b 100644 --- a/grizzly/common/test_utils.py +++ b/grizzly/common/test_utils.py @@ -46,4 +46,7 @@ def test_configure_logging_01(mocker, env, log_level): mocker.patch("grizzly.common.utils.getenv", autospec=True, return_value=env) configure_logging(log_level) assert config.call_count == 1 - assert config.call_args.kwargs["level"] == (DEBUG if env != "0" else log_level) + if env != "0": + assert config.call_args[-1]["level"] == DEBUG + else: + assert config.call_args[-1]["level"] == log_level From 685650487226f88e6a4e444a4bfa4953de8d38b9 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 1 Apr 2021 10:40:42 -0700 Subject: [PATCH 247/531] Move sanitizer_opts() to common.utils --- grizzly/common/iomanager.py | 2 +- grizzly/common/storage.py | 3 +-- grizzly/common/test_utils.py | 37 ++++++++++++++++++++++++++++++++++- grizzly/common/utils.py | 29 ++++++++++++++++++++++++++- grizzly/target/__init__.py | 9 +-------- grizzly/target/target.py | 23 +--------------------- grizzly/target/test_target.py | 30 +--------------------------- 7 files changed, 69 insertions(+), 64 deletions(-) diff --git a/grizzly/common/iomanager.py b/grizzly/common/iomanager.py index 7abb14a7..2e2d67e0 100644 --- a/grizzly/common/iomanager.py +++ b/grizzly/common/iomanager.py @@ -8,8 +8,8 @@ from sapphire.server_map import ServerMap -from ..target import sanitizer_opts from .storage import TestCase, TestFile +from .utils import sanitizer_opts __all__ = ("IOManager",) __author__ = "Tyson Smith" diff --git a/grizzly/common/storage.py b/grizzly/common/storage.py index 8549bee7..88074915 100644 --- a/grizzly/common/storage.py +++ b/grizzly/common/storage.py @@ -16,8 +16,7 @@ from zipfile import BadZipfile, ZipFile from zlib import error as zlib_error -from ..target import sanitizer_opts -from .utils import grz_tmp +from .utils import grz_tmp, sanitizer_opts __all__ = ("TestCase", "TestFile", "TestCaseLoadFailure", "TestFileExists") __author__ = "Tyson Smith" diff --git a/grizzly/common/test_utils.py b/grizzly/common/test_utils.py index 46d2491b..53d088e5 100644 --- a/grizzly/common/test_utils.py +++ b/grizzly/common/test_utils.py @@ -6,7 +6,7 @@ from pytest import mark -from .utils import configure_logging, grz_tmp +from .utils import configure_logging, grz_tmp, sanitizer_opts def test_grz_tmp_01(mocker, tmp_path): @@ -50,3 +50,38 @@ def test_configure_logging_01(mocker, env, log_level): assert config.call_args[-1]["level"] == DEBUG else: assert config.call_args[-1]["level"] == log_level + + +@mark.parametrize( + "to_parse, expected", + [ + # test empty string + ("", {}), + # test single value + ("test_value=true", {"test_value": "true"}), + # test multiple values + ("a=1:b=-2:C=3", {"a": "1", "b": "-2", "C": "3"}), + # path parsing + ( + "p1='z:/a':p2='x:\\a.1':p3='/test/path/':p4='':p5=\"x:/a.a\"", + { + "p1": "'z:/a'", + "p2": "'x:\\a.1'", + "p3": "'/test/path/'", + "p4": "''", + "p5": '"x:/a.a"', + }, + ), + # test platform specific parsing + ( + "bar=1:file='%s':foo=2" % (__file__,), + {"bar": "1", "file": "'%s'" % (__file__,), "foo": "2"}, + ), + ], +) +def test_sanitizer_opts_01(to_parse, expected): + """test sanitizer_opts()""" + parsed = sanitizer_opts(to_parse) + assert len(parsed) == len(expected) + for key in expected: + assert expected[key] == parsed[key] diff --git a/grizzly/common/utils.py b/grizzly/common/utils.py index e0fbf9cf..0ab362e1 100644 --- a/grizzly/common/utils.py +++ b/grizzly/common/utils.py @@ -5,9 +5,16 @@ from logging import DEBUG, basicConfig from os import getenv, makedirs from os.path import join as pathjoin +from re import split as resplit from tempfile import gettempdir -__all__ = ("ConfigError", "configure_logging", "grz_tmp", "TIMEOUT_DELAY") +__all__ = ( + "ConfigError", + "configure_logging", + "grz_tmp", + "sanitizer_opts", + "TIMEOUT_DELAY", +) __author__ = "Tyson Smith" __credits__ = ["Tyson Smith"] @@ -49,3 +56,23 @@ def grz_tmp(*subdir): path = pathjoin(gettempdir(), "grizzly", *subdir) makedirs(path, exist_ok=True) return path + + +def sanitizer_opts(env_data): + """Parse the values defined in given *SAN_OPTIONS environment variable. + For example "ASAN_OPTIONS=debug=false:log_path='/test/file.log'" + would return {"debug": "false", "log_path": "'/test/file.log'"} + + Args: + env_var (str): *SAN_OPTIONS environment variable to parse. + + Returns: + dict: Sanitized values from environment. + """ + opts = dict() + for opt in resplit(r":(?![\\|/])", env_data): + if not opt: + continue + key, val = opt.split("=") + opts[key] = val + return opts diff --git a/grizzly/target/__init__.py b/grizzly/target/__init__.py index 97cd4fd4..c879bdc4 100644 --- a/grizzly/target/__init__.py +++ b/grizzly/target/__init__.py @@ -2,20 +2,13 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -from .target import ( - Target, - TargetError, - TargetLaunchError, - TargetLaunchTimeout, - sanitizer_opts, -) +from .target import Target, TargetError, TargetLaunchError, TargetLaunchTimeout __all__ = ( "Target", "TargetError", "TargetLaunchError", "TargetLaunchTimeout", - "sanitizer_opts", ) __author__ = "Tyson Smith" __credits__ = ["Tyson Smith", "Jesse Schwartzentruber"] diff --git a/grizzly/target/target.py b/grizzly/target/target.py index e1d20107..d3f263b5 100644 --- a/grizzly/target/target.py +++ b/grizzly/target/target.py @@ -5,36 +5,15 @@ from abc import ABCMeta, abstractmethod, abstractproperty from logging import getLogger from os.path import isfile -from re import split as resplit from threading import Lock -__all__ = ("Target", "sanitizer_opts") +__all__ = ("Target", "TargetError", "TargetLaunchError") __author__ = "Tyson Smith" __credits__ = ["Tyson Smith", "Jesse Schwartzentruber"] LOG = getLogger(__name__) -def sanitizer_opts(env_data): - """Parse the values defined in given *SAN_OPTIONS environment variable. - For example "ASAN_OPTIONS=debug=false:log_path='/test/file.log'" - would return {"debug": "false", "log_path": "'/test/file.log'"} - - Args: - env_var (str): *SAN_OPTIONS environment variable to parse. - - Returns: - dict: Sanitized values from environment. - """ - opts = dict() - for opt in resplit(r":(?![\\|/])", env_data): - if not opt: - continue - key, val = opt.split("=") - opts[key] = val - return opts - - class TargetError(Exception): """Raised by Target""" diff --git a/grizzly/target/test_target.py b/grizzly/target/test_target.py index 572b78da..2a6ec935 100644 --- a/grizzly/target/test_target.py +++ b/grizzly/target/test_target.py @@ -3,7 +3,7 @@ # file, You can obtain one at http://mozilla.org/MPL/2.0/. # pylint: disable=protected-access -from .target import Target, sanitizer_opts +from .target import Target class SimpleTarget(Target): @@ -58,31 +58,3 @@ def test_target_01(tmp_path): target.add_abort_token("none!") target.dump_coverage() target.reverse(1, 2) - - -def test_sanitizer_opts_01(tmp_path): - """test sanitizer_opts()""" - # test empty string - assert not sanitizer_opts("") - # test single value - opts = sanitizer_opts("test_value=true") - assert len(opts) == 1 - assert opts["test_value"] == "true" - # test multiple values - opts = sanitizer_opts("a=1:b=-2:C=3") - assert len(opts) == 3 - assert opts["a"] == "1" - assert opts["b"] == "-2" - assert opts["C"] == "3" - # path parsing - opts = sanitizer_opts("p1='z:/a':p2='x:\\a.1':p3='/test/path/':p4='':p5=\"x:/a.a\"") - assert opts["p1"] == "'z:/a'" - assert opts["p2"] == "'x:\\a.1'" - assert opts["p3"] == "'/test/path/'" - assert opts["p4"] == "''" - assert opts["p5"] == '"x:/a.a"' - # platform specific parsing - fake_file = tmp_path / "fake.log" - opts = sanitizer_opts("bar=1:file='%s':foo=2" % (str(fake_file),)) - assert len(opts) == 3 - assert opts["file"] == "'%s'" % (str(fake_file),) From 3d29c8462b13c856b332fd741c740b01647df8b3 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 1 Apr 2021 10:51:08 -0700 Subject: [PATCH 248/531] [tests] Add tests for ReduceFuzzManagerIDArgs and ReduceFuzzManagerIDQualityArgs --- grizzly/reduce/test_main.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/grizzly/reduce/test_main.py b/grizzly/reduce/test_main.py index c558ea87..c679f14b 100644 --- a/grizzly/reduce/test_main.py +++ b/grizzly/reduce/test_main.py @@ -14,7 +14,7 @@ from ..common import TestCaseLoadFailure from ..target import TargetLaunchError, TargetLaunchTimeout from . import ReduceManager -from .args import ReduceArgs +from .args import ReduceArgs, ReduceFuzzManagerIDArgs, ReduceFuzzManagerIDQualityArgs from .exceptions import GrizzlyReduceBaseException LOG = getLogger(__name__) @@ -65,6 +65,24 @@ def test_args_02(tmp_path): ReduceArgs().parse_args([str(exe), str(inp), "--report-period", "15"]) +def test_args_03(tmp_path): + """test ReduceFuzzManagerIDArgs""" + exe = tmp_path / "binary" + exe.touch() + ReduceFuzzManagerIDArgs().parse_args([str(exe), "123"]) + + +def test_args_04(capsys, tmp_path): + """test ReduceFuzzManagerIDQualityArgs""" + exe = tmp_path / "binary" + exe.touch() + with raises(SystemExit): + ReduceFuzzManagerIDQualityArgs().parse_args( + [str(exe), "123", "--quality", "-1"] + ) + assert "error: '--quality' value cannot be negative" in capsys.readouterr()[-1] + + @pytest.mark.parametrize( "patch_func, side_effect, return_value, kwargs, result", [ From a725b20e76a970cc2535960fec89d49f513892f3 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 6 Apr 2021 12:32:25 -0700 Subject: [PATCH 249/531] [tests] Fix test_status_reporter_03 on Windows --- grizzly/common/test_status_reporter.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/grizzly/common/test_status_reporter.py b/grizzly/common/test_status_reporter.py index 62e985cd..8ad1ac7e 100644 --- a/grizzly/common/test_status_reporter.py +++ b/grizzly/common/test_status_reporter.py @@ -92,9 +92,7 @@ def test_status_reporter_03(mocker, disk, memory, getloadavg): mocker.patch("grizzly.common.status_reporter.getloadavg", None) else: mocker.patch( - "grizzly.common.status_reporter.getloadavg", - autospec=True, - side_effect=getloadavg, + "grizzly.common.status_reporter.getloadavg", side_effect=getloadavg ) sysinfo = StatusReporter._sys_info() if disk.free < GBYTES or memory.available < GBYTES: From f4971e1ad101921f0a4ad599ee1404dee92eb7ce Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 15 Apr 2021 14:48:19 -0700 Subject: [PATCH 250/531] [ci] Use matrix formatting and enable Windows --- .pre-commit-config.yaml | 33 +++--- .taskcluster.yml | 231 +++++++++++++++++----------------------- CODE_OF_CONDUCT.md | 4 +- tox.ini | 13 ++- 4 files changed, 122 insertions(+), 159 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 56370125..03c61681 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,24 +1,8 @@ repos: - - repo: https://github.com/pycqa/isort - rev: 5.7.0 - hooks: - - id: isort - repo: https://github.com/asottile/yesqa rev: v1.2.2 hooks: - id: yesqa - - repo: https://github.com/ambv/black - rev: 20.8b1 - hooks: - - id: black - - repo: https://github.com/pycqa/pylint - rev: pylint-2.7.3 - hooks: - - id: pylint - - repo: https://gitlab.com/pycqa/flake8 - rev: 3.9.0 - hooks: - - id: flake8 - repo: https://github.com/pre-commit/pre-commit-hooks rev: v3.4.0 hooks: @@ -35,18 +19,25 @@ repos: args: ['--django'] - id: check-json - id: requirements-txt-fixer - - repo: https://github.com/codespell-project/codespell - rev: v2.0.0 - hooks: - - id: codespell - exclude_types: [json] - repo: https://github.com/marco-c/taskcluster_yml_validator rev: v0.0.7 hooks: - id: taskcluster_yml + - repo: https://github.com/MozillaSecurity/orion + rev: v0.0.2 + hooks: + - id: orion_ci - repo: meta hooks: - id: check-useless-excludes + - repo: local + hooks: + - id: lint + name: Run linters + entry: tox -e lint + language: system + pass_filenames: false + types: [python] default_language_version: python: python3 diff --git a/.taskcluster.yml b/.taskcluster.yml index aa953a56..b0676a1e 100644 --- a/.taskcluster.yml +++ b/.taskcluster.yml @@ -35,140 +35,103 @@ tasks: project_name: Grizzly + matrix: + language: python + secrets: + - type: env + secret: project/fuzzing/codecov-grizzly + name: CODECOV_TOKEN + key: token + script: + - bash + - '-xec' + - tox; tox -e codecov + jobs: + include: + - name: tests python 3.6 + version: "3.6" + env: + TOXENV: py36 + - name: tests python 3.7 + version: "3.7" + env: + TOXENV: py37 + - name: tests python 3.8 + version: "3.8" + env: + TOXENV: py38 + - name: test python 3.8 (windows) + version: "3.8" + platform: windows + env: + TOXENV: py38 + - name: tests python 3.9 + version: "3.9" + env: + TOXENV: py39 + - name: lint + version: "3.9" + env: + TOXENV: lint + script: + - tox + - name: precommit + version: "3.9" + env: + TOXENV: precommit + script: + - tox + - name: PyPI upload + version: "3.8" + env: + TOXENV: pypi + script: + - tox + when: + release: true + all_passed: true + secrets: + - type: env + secret: project/fuzzing/pypi-grizzly + name: TWINE_USERNAME + key: username + - type: env + secret: project/fuzzing/pypi-grizzly + name: TWINE_PASSWORD + key: password + in: $if: 'tasks_for in ["github-push", "github-release"] || (tasks_for == "github-pull-request" && event["action"] in ["opened", "reopened", "synchronize"])' then: - $flatten: - - $map: [] - #- {msys: 'NmOU83KwRJGjk-btMbOOPA', toxenv: 'py38', name: 'tests python 3.8'} - each(build): - taskId: {$eval: as_slugid(build.toxenv + '-win')} - provisionerId: proj-fuzzing - workerType: ci-windows - created: {$fromNow: ''} - deadline: {$fromNow: '1 hour'} - scopes: - - secrets:get:project/fuzzing/${codecov_secret} - dependencies: - - ${build.msys} - payload: - env: - MSYSTEM: MINGW64 - TOXENV: ${build.toxenv} - CODECOV_SECRET: ${codecov_secret} - FETCH_REF: ${fetch_ref} - FETCH_REV: ${fetch_rev} - CLONE_REPO: ${http_repo} - mounts: - - format: tar.bz2 - content: - taskId: ${build.msys} - artifact: public/msys2.tar.bz2 - directory: . - command: - - "set HOME=%CD%" - - "set ARTIFACTS=%CD%" - - "set PATH=%CD%\\msys64\\MINGW64\\bin;%PATH%" - - "set PATH=%CD%\\msys64\\usr\\bin;%PATH%" - - >- - bash -x -e -c " - . py-ci.sh; - clone; - tox; - tox_codecov;" - features: - taskclusterProxy: true - maxRunTime: 900 - metadata: - name: ${project_name} ${build.name} (windows) - description: ${project_name} ${build.name} (windows) - owner: '${user}@users.noreply.github.com' - source: ${http_repo}/raw/${fetch_rev}/.taskcluster.yml - - $map: - - {image: ci-py-36, toxenv: 'py36', name: 'tests python 3.6'} - - {image: ci-py-37, toxenv: 'py37', name: 'tests python 3.7'} - - {image: ci-py-38, toxenv: 'py38', name: 'tests python 3.8'} - - {image: ci-py-39, toxenv: 'py39', name: 'tests python 3.9'} - - {image: ci-py-39, toxenv: 'lint', name: 'lint'} - each(build): - taskId: {$eval: as_slugid(build.toxenv)} - provisionerId: proj-fuzzing - workerType: ci - created: {$fromNow: ''} - deadline: {$fromNow: '1 hour'} - scopes: - - secrets:get:project/fuzzing/${codecov_secret} - payload: - maxRunTime: 900 - image: - type: indexed-image - path: public/${build.image}.tar.zst - namespace: project.fuzzing.orion.${build.image}.master - env: - TOXENV: ${build.toxenv} - CODECOV_SECRET: ${codecov_secret} - FETCH_REF: ${fetch_ref} - FETCH_REV: ${fetch_rev} - CLONE_REPO: ${http_repo} - features: - taskclusterProxy: true - command: - - /bin/bash - - '--login' - - '-x' - - '-e' - - '-c' - - >- - . py-ci.sh; - clone; - tox; - if [[ "${build.toxenv}" != "lint" ]]; then tox_codecov; fi; - metadata: - name: ${project_name} ${build.name} - description: ${project_name} ${build.name} - owner: '${user}@users.noreply.github.com' - source: ${http_repo}/raw/${fetch_rev}/.taskcluster.yml - - $if: 'tasks_for in ["github-release"]' - then: - - provisionerId: proj-fuzzing - workerType: ci - dependencies: - - {$eval: as_slugid("py36")} - - {$eval: as_slugid("py37")} - - {$eval: as_slugid("py38")} - #- {$eval: as_slugid("py38-win")} - - {$eval: as_slugid("py39")} - - {$eval: as_slugid("lint")} - created: {$fromNow: ''} - deadline: {$fromNow: '1 hour'} - scopes: - - secrets:get:project/fuzzing/${pypi_secret} - payload: - maxRunTime: 900 - image: - type: indexed-image - path: public/ci-py-38.tar.zst - namespace: project.fuzzing.orion.ci-py-38.master - features: - taskclusterProxy: true - env: - TOXENV: pypi - FETCH_REF: ${fetch_ref} - FETCH_REV: ${fetch_rev} - CLONE_REPO: ${http_repo} - PYPI_SECRET: ${pypi_secret} - command: - - /bin/bash - - '--login' - - '-x' - - '-e' - - '-c' - - >- - . py-ci.sh; - clone; - tox_pypi; - metadata: - name: ${project_name} PyPI upload - description: ${project_name} PyPI upload - owner: '${user}@users.noreply.github.com' - source: ${http_repo}/raw/${fetch_rev}/.taskcluster.yml + - created: {$fromNow: ''} + deadline: {$fromNow: '1 hour'} + provisionerId: proj-fuzzing + workerType: ci + payload: + features: + taskclusterProxy: true + maxRunTime: 3600 + image: + type: indexed-image + path: public/orion-decision.tar.zst + namespace: project.fuzzing.orion.orion-decision.master + env: + PROJECT_NAME: ${project_name} + CI_MATRIX: {$json: {$eval: matrix}} + GITHUB_EVENT: {$json: {$eval: event}} + GITHUB_ACTION: ${tasks_for} + TASKCLUSTER_NOW: ${now} + command: + - ci-decision + - -v + scopes: + - queue:create-task:highest:proj-fuzzing/ci + - queue:create-task:highest:proj-fuzzing/ci-* + - queue:scheduler-id:taskcluster-github + - secrets:get:project/fuzzing/codecov-grizzly + - secrets:get:project/fuzzing/pypi-grizzly + metadata: + name: ${project_name} CI decision + description: Schedule CI tasks for ${project_name} + owner: twsmith@mozilla.com + source: https://github.com/MozillaSecurity/grizzly diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 498baa3f..041fbb69 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,8 +1,8 @@ # Community Participation Guidelines -This repository is governed by Mozilla's code of conduct and etiquette guidelines. +This repository is governed by Mozilla's code of conduct and etiquette guidelines. For more details, please read the -[Mozilla Community Participation Guidelines](https://www.mozilla.org/about/governance/policies/participation/). +[Mozilla Community Participation Guidelines](https://www.mozilla.org/about/governance/policies/participation/). ## How to Report For more information on how to report violations of the Community Participation Guidelines, please read our '[How to Report](https://www.mozilla.org/about/governance/policies/participation/reporting/)' page. diff --git a/tox.ini b/tox.ini index fa4cb42e..8f91ecce 100644 --- a/tox.ini +++ b/tox.ini @@ -14,11 +14,13 @@ extras = passenv = BUILD_CACHE CI + CI_* CODECOV_* TOXENV TRAVIS TRAVIS_* TWINE_* + VCS_* usedevelop = true [testenv:codecov] @@ -29,12 +31,19 @@ deps = coverage[toml] skip_install = true +[testenv:precommit] +commands = + pre-commit run -a +deps = + pre-commit +skip_install = true + [testenv:lint] allowlist_externals = bash commands = - isort {toxinidir} - black {toxinidir} + isort --check-only {toxinidir} + black --check {toxinidir} # codespell trips over the regex in 'sapphire/worker.py' saying 'sHTTP ==> https' # https://github.com/codespell-project/codespell/issues/1774 # ignoring it is broken so we need to ignore the file From 311c3c1e1768c5e219eaa8fad567e76927f81fd9 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 27 Apr 2021 12:22:15 -0700 Subject: [PATCH 251/531] Update README.md --- README.md | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index c6c74dbe..06e55028 100644 --- a/README.md +++ b/README.md @@ -5,22 +5,29 @@ Grizzly [![Matrix](https://img.shields.io/badge/dynamic/json?color=green&label=chat&query=%24.chunk[%3F(%40.canonical_alias%3D%3D%22%23fuzzing%3Amozilla.org%22)].num_joined_members&suffix=%20users&url=https%3A%2F%2Fmozilla.modular.im%2F_matrix%2Fclient%2Fr0%2FpublicRooms&style=flat&logo=matrix)](https://riot.im/app/#/room/#fuzzing:mozilla.org) [![PyPI](https://img.shields.io/pypi/v/grizzly-framework)](https://pypi.org/project/grizzly-framework) -Grizzly is a general purpose browser fuzzing framework made up of multiple modules. -The intention is to create a platform that can be extended by the creation of adapters -and targets to support different fuzzers that target browsers. -An adapter is used to wrap an existing fuzzer to allow it to be run via Grizzly. -Adapters take the content output by fuzzers and transform it (if needed) into a format that can -be served to and processed by the browser. +Grizzly is a modular general purpose browser fuzzing framework. The goal is to create a platform that can be extended via the creation of plug-ins to support multiple combinations of browsers and fuzzers. An Adapter is used to add support for a fuzzer and a Target to add support for a browser. + Cross platform compatibility is available for Windows, Linux and MacOS. However not all features may be available. For additional information please check out the [wiki](https://github.com/MozillaSecurity/grizzly/wiki) or the [announcement](https://blog.mozilla.org/security/2019/07/10/grizzly/). -Installation +Quick Start ------------ -To install the latest version from PyPI run `python3 -m pip install grizzly-framework`. See [getting started](https://github.com/MozillaSecurity/grizzly/wiki/Getting-Started) on the wiki for more details. +Install the latest version from PyPI. For more details see [getting started](https://github.com/MozillaSecurity/grizzly/wiki/Getting-Started) on the wiki for more details. + +```python3 -m pip install grizzly-framework``` + +**Fuzz** - Run the `no-op` test adapter to check everything is working. + +```python3 -m grizzly no-op``` + +**Reduce** - [Grizzly Reduce](https://github.com/MozillaSecurity/grizzly/wiki/Grizzly-Reduce) can reduce a test case. + +```python3 -m grizzly.reduce ``` + +**Replay** - [Grizzly Replay](https://github.com/MozillaSecurity/grizzly/wiki/Grizzly-Replay) can replay a test case with different builds and debuggers. + +```python3 -m grizzly.replay ``` + -Target platforms -------- -Other target platforms can be defined as [setuptools entry-points](https://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins), -using the name "grizzly_targets". Targets must implement `grizzly.target.Target`. From 85812f087a1b3d227d16949d94a9178cf185e6f4 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 27 Apr 2021 12:24:09 -0700 Subject: [PATCH 252/531] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 06e55028..2f2c3691 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ For additional information please check out the [wiki](https://github.com/Mozill Quick Start ------------ -Install the latest version from PyPI. For more details see [getting started](https://github.com/MozillaSecurity/grizzly/wiki/Getting-Started) on the wiki for more details. +Install the latest version from PyPI. For more details see [getting started](https://github.com/MozillaSecurity/grizzly/wiki/Getting-Started) on the wiki. ```python3 -m pip install grizzly-framework``` From 4361751264037926de24bda6ff2f03613fc56b1a Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 27 Apr 2021 17:11:09 -0700 Subject: [PATCH 253/531] [tests] Address pylint 'consider-using-with' --- grizzly/common/storage.py | 1 + sapphire/conftest.py | 38 +++++++++++++++++--------------------- 2 files changed, 18 insertions(+), 21 deletions(-) diff --git a/grizzly/common/storage.py b/grizzly/common/storage.py index 88074915..709d579e 100644 --- a/grizzly/common/storage.py +++ b/grizzly/common/storage.py @@ -539,6 +539,7 @@ def __init__(self, file_name): raise TypeError("file_name is invalid %r" % (file_name,)) # name including path relative to wwwroot self._file_name = normpath(file_name) + # pylint: disable=consider-using-with self._fp = SpooledTemporaryFile( dir=grz_tmp("storage"), max_size=self.CACHE_LIMIT, prefix="testfile_" ) diff --git a/sapphire/conftest.py b/sapphire/conftest.py index a30993a4..071661e0 100644 --- a/sapphire/conftest.py +++ b/sapphire/conftest.py @@ -98,28 +98,27 @@ def _handle_request( # will calculate the md5 hash data_hash = hashlib.md5() if t_file.md5_org is not None else None target_url = quote(t_file.url) - cli = None try: if t_file.custom_request is None: - cli = urlopen( + with urlopen( "http://%s:%d/%s" % (addr, port, target_url), timeout=10 - ) - resp_code = cli.getcode() - content_type = cli.info().get("Content-Type") - if resp_code == 200: - data_length = 0 - while True: - data = cli.read(self.rx_size) - data_length += len(data) + ) as cli: + resp_code = cli.getcode() + content_type = cli.info().get("Content-Type") + if resp_code == 200: + data_length = 0 + while True: + data = cli.read(self.rx_size) + data_length += len(data) + if data_hash is not None: + data_hash.update(data) + if len(data) < self.rx_size: + break + if throttle > 0: + # try to simulate a slow connection + time.sleep(throttle) if data_hash is not None: - data_hash.update(data) - if len(data) < self.rx_size: - break - if throttle > 0: - # try to simulate a slow connection - time.sleep(throttle) - if data_hash is not None: - data_hash = data_hash.hexdigest() + data_hash = data_hash.hexdigest() else: # custom request sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: @@ -177,9 +176,6 @@ def _handle_request( if not skip_served or t_file.code is None: t_file.code = 0 break - finally: - if cli is not None: - cli.close() self._idle.set() def wait(self, timeout=None): From a56e41096fb989bb6a84e231719cded85f929b8a Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Fri, 30 Apr 2021 13:34:17 -0700 Subject: [PATCH 254/531] [sapphire] Add more debug output in ConnectionManager.listener() --- sapphire/connection_manager.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/sapphire/connection_manager.py b/sapphire/connection_manager.py index daf6dbf1..a0f9a7cd 100644 --- a/sapphire/connection_manager.py +++ b/sapphire/connection_manager.py @@ -113,8 +113,7 @@ def listener(serv_sock, serv_job, max_workers, shutdown_delay=0): ) serv_job.worker_complete.wait() serv_job.worker_complete.clear() - # remove complete workers - LOG.debug("trimming worker pool") + LOG.debug("removing completed workers from worker pool") # sometimes the thread that triggered the event doesn't quite # cleanup in time, so retry (10x with 0.5 second sleep on failure) for _ in range(10): @@ -132,16 +131,20 @@ def listener(serv_sock, serv_job, max_workers, shutdown_delay=0): serv_job.exceptions.put(exc_info()) serv_job.finish() finally: - LOG.debug("listener cleaning up workers") + LOG.debug( + "shutting down listener, waiting %0.2fs for %d worker(s)...", + shutdown_delay, + len(worker_pool), + ) + # use shutdown_delay to avoid cutting off connections deadline = time() + shutdown_delay while time() < deadline: - worker_pool = list(w for w in worker_pool if not w.done) - if not worker_pool: + # wait for all running workers to exit + if all(w.done for w in worker_pool): break - # avoid cutting off connections - LOG.debug("waiting for %d worker(s)...", len(worker_pool)) sleep(0.1) else: # pragma: no cover - LOG.debug("closing remaining workers") - for worker in (w for w in worker_pool if not w.done): + worker_pool = list(w for w in worker_pool if not w.done) + LOG.debug("closing remaining %d worker(s)", len(worker_pool)) + for worker in worker_pool: worker.close() From 0735640ac84c30bcd617778d2d058b6128b0628e Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 4 May 2021 10:03:06 -0700 Subject: [PATCH 255/531] Sanity check platform specific args --- grizzly/args.py | 10 ++++++++++ grizzly/test_args.py | 19 +++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/grizzly/args.py b/grizzly/args.py index faf5ea62..6e97181c 100644 --- a/grizzly/args.py +++ b/grizzly/args.py @@ -5,6 +5,7 @@ from argparse import ArgumentParser, HelpFormatter from logging import CRITICAL, DEBUG, ERROR, INFO, WARNING from os.path import exists, isdir, isfile +from platform import system from .common.plugins import scan as scan_plugins from .common.utils import TIMEOUT_DELAY @@ -214,6 +215,15 @@ def sanity_check(self, args): if args.tool is not None and not args.fuzzmanager: self.parser.error("--tool can only be given with --fuzzmanager") + # check platform specific args + os_name = system() + if args.rr and os_name != "Linux": + self.parser.error("--rr is only supported on Linux") + if args.valgrind and os_name != "Linux": + self.parser.error("--valgrind is only supported on Linux") + if args.xvfb and os_name != "Linux": + self.parser.error("--xvfb is only supported on Linux") + class GrizzlyArgs(CommonArgs): def __init__(self): diff --git a/grizzly/test_args.py b/grizzly/test_args.py index 7dfed9f4..64f87737 100644 --- a/grizzly/test_args.py +++ b/grizzly/test_args.py @@ -87,11 +87,30 @@ def test_common_args_01a(capsys, mocker, tmp_path): "error: --rr and --valgrind are mutually exclusive", ["targ1"], ), + # test rr on unsupported platform + ( + ["--platform", "targ1", "--rr"], + "error: --rr is only supported on Linux", + ["targ1"], + ), + # test Valgrind on unsupported platform + ( + ["--platform", "targ1", "--valgrind"], + "error: --valgrind is only supported on Linux", + ["targ1"], + ), + # test Xvfb on unsupported platform + ( + ["--platform", "targ1", "--xvfb"], + "error: --xvfb is only supported on Linux", + ["targ1"], + ), ], ) def test_common_args_02(capsys, mocker, tmp_path, args, msg, targets): """test CommonArgs.parse_args()""" mocker.patch("grizzly.args.scan_plugins", autospec=True, return_value=targets) + mocker.patch("grizzly.args.system", autospec=True, return_value="foo") fake_bin = tmp_path / "fake.bin" fake_bin.touch() with raises(SystemExit): From 3dcb0d53268a33595311c0be034c0e77752b4441 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Wed, 12 May 2021 15:03:59 -0700 Subject: [PATCH 256/531] Add to stackless OOM ignore list --- grizzly/common/reporter.py | 18 ++++++++++++------ grizzly/common/test_reporter.py | 7 ++++++- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/grizzly/common/reporter.py b/grizzly/common/reporter.py index e90fe91b..aa91e80e 100644 --- a/grizzly/common/reporter.py +++ b/grizzly/common/reporter.py @@ -62,7 +62,7 @@ class Report: DEFAULT_MAJOR = "NO_STACK" DEFAULT_MINOR = "0" HANG_STACK_HEIGHT = 10 - MAX_LOG_SIZE = 1048576 # 1MB + MAX_LOG_SIZE = 1_048_576 # 1MB __slots__ = ( "_crash_info", @@ -596,14 +596,20 @@ def _process_rr_trace(self, report): @staticmethod def _ignored(report): # This is here to prevent reporting stack-less crashes - # that were caused by system OOM or bogus other crashes + # that were caused by system OOM with open(report.preferred, "rb") as log_fp: log_data = log_fp.read().decode("utf-8", errors="ignore") - mem_errs = ("ERROR: Failed to mmap", ": AddressSanitizer failed to allocate") # ignore sanitizer OOMs missing stack - for msg in mem_errs: - if msg in log_data and "#0 " not in log_data: - return True + if report.stack is None: + mem_errs = ( + "ERROR: Failed to mmap", + # NOTE: max_allocation_size_mb can trigger a similar message + ": AddressSanitizer failed to allocate", + "Sanitizer: internal allocator is out of memory trying to allocate", + ) + for msg in mem_errs: + if msg in log_data: + return True # ignore Valgrind crashes if log_data.startswith("VEX temporary storage exhausted."): return True diff --git a/grizzly/common/test_reporter.py b/grizzly/common/test_reporter.py index 58a4ad22..cab07313 100644 --- a/grizzly/common/test_reporter.py +++ b/grizzly/common/test_reporter.py @@ -576,7 +576,12 @@ def test_fuzzmanager_reporter_09(mocker, tmp_path): """test FuzzManagerReporter._ignored()""" log_file = tmp_path / "test.log" log_file.touch() - report = mocker.Mock(spec=Report, path=str(tmp_path), preferred=str(log_file)) + report = mocker.Mock( + spec_set=Report, + path=str(tmp_path), + preferred=str(log_file), + stack=None, + ) # not ignored assert not FuzzManagerReporter._ignored(report) # ignored - sanitizer OOM missing stack From 81291e45a531f52c22319a957f1057b86d2d5016 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 18 May 2021 17:12:53 -0700 Subject: [PATCH 257/531] Better detection of TSan reports --- grizzly/common/reporter.py | 8 +++++--- grizzly/common/test_reporter.py | 16 ++++++++++++++++ 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/grizzly/common/reporter.py b/grizzly/common/reporter.py index aa91e80e..ea207b95 100644 --- a/grizzly/common/reporter.py +++ b/grizzly/common/reporter.py @@ -283,18 +283,20 @@ def _find_sanitizer(logs): prioritize_tokens = ( "use-after-", "-buffer-overflow on", + ": data race ", ": SEGV on ", "access-violation on ", "attempting free on ", "negative-size-param", "-param-overlap", ) + fallback = None found = None for fname in (x for x in logs if "asan" in x): with open(fname, "r") as log_fp: data = log_fp.read(65536) # look for interesting crash info in the log - if "==ERROR:" in data: + if "==ERROR:" in data or "WARNING:" in data: # check for e10s forced crash if re_e10s_forced.search(data) is not None: continue @@ -310,8 +312,8 @@ def _find_sanitizer(logs): found = fname # catch all (choose the one with info for now) elif data: - found = fname - return found + fallback = fname + return found or fallback @staticmethod def _find_valgrind(logs): diff --git a/grizzly/common/test_reporter.py b/grizzly/common/test_reporter.py index cab07313..ef1f17ac 100644 --- a/grizzly/common/test_reporter.py +++ b/grizzly/common/test_reporter.py @@ -241,6 +241,22 @@ def test_report_09(tmp_path): selected = Report._find_sanitizer([str(x) for x in tmp_path.iterdir()]) assert selected is not None assert "heap-use-after-free" in Path(selected).read_text() + # test selecting TSan reports + tsan_path = tmp_path / "tsan" + tsan_path.mkdir() + (tsan_path / "log_asan_benign.txt").write_text( + "==27531==WARNING: Symbolizer buffer too small\n" + "==27531==WARNING: Symbolizer buffer too small" + ) + tsan_report = tsan_path / "log_asan_report.txt" + tsan_report.write_text( + "WARNING: ThreadSanitizer: data race (pid=26919)\n" + " Write of size 8 at 0x7f0ca2fc3400 by thread T51:\n" + " #0 memcpy /sanitizer_common_interceptors.inc:810:5 (lib+0x6656e)\n" + ) + selected = Report._find_sanitizer([str(x) for x in tsan_path.iterdir()]) + assert selected is not None + assert selected == str(tsan_report) def test_report_10(tmp_path): From f81d9248f644e8c2e1895802bbbb4e9c81c5351b Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 18 May 2021 18:20:54 -0700 Subject: [PATCH 258/531] Create FM report zip in a temporary directory --- grizzly/common/reporter.py | 57 +++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 31 deletions(-) diff --git a/grizzly/common/reporter.py b/grizzly/common/reporter.py index ea207b95..29314f1d 100644 --- a/grizzly/common/reporter.py +++ b/grizzly/common/reporter.py @@ -17,7 +17,7 @@ from re import compile as re_compile from shutil import copyfile, copyfileobj, move, rmtree from tarfile import open as tar_open -from tempfile import mkstemp +from tempfile import TemporaryDirectory, mkstemp from time import strftime from zipfile import ZIP_DEFLATED, ZipFile @@ -702,38 +702,33 @@ def _submit_report(self, report, test_cases): copyfile(screen_log, target_log) Report.tail(target_log, 10240) # limit to last 10K - # add results to a zip file - zip_name = "%s.zip" % (report.prefix,) - with ZipFile(zip_name, mode="w", compression=ZIP_DEFLATED) as zip_fp: - # add test files - for dir_name, _, dir_files in walk(report.path): - arc_path = relpath(dir_name, report.path) - for file_name in dir_files: - zip_fp.write( - pathjoin(dir_name, file_name), - arcname=pathjoin(arc_path, file_name), - ) - - # override tool name if specified - if self.tool is not None: - collector.tool = self.tool - - # announce shortDescription if crash is not in a bucket - if ( - cache_metadata["_grizzly_seen_count"] == 1 - and not cache_metadata["frequent"] - ): - LOG.info("Submitting new crash %r", cache_metadata["shortDescription"]) - # submit results to the FuzzManager server - new_entry = collector.submit( - report.crash_info, testCase=zip_name, testCaseQuality=self.quality - ) + with TemporaryDirectory(prefix="fm-zip", dir=grz_tmp()) as tmp_dir: + # add results to a zip file + zip_name = pathjoin(tmp_dir, "%s.zip" % (report.prefix,)) + with ZipFile(zip_name, mode="w", compression=ZIP_DEFLATED) as zip_fp: + # add test files + for dir_name, _, dir_files in walk(report.path): + arc_path = relpath(dir_name, report.path) + for file_name in dir_files: + zip_fp.write( + pathjoin(dir_name, file_name), + arcname=pathjoin(arc_path, file_name), + ) + # override tool name if specified + if self.tool is not None: + collector.tool = self.tool + # announce shortDescription if crash is not in a bucket + if ( + cache_metadata["_grizzly_seen_count"] == 1 + and not cache_metadata["frequent"] + ): + LOG.info("Submitting new crash %r", cache_metadata["shortDescription"]) + # submit results to the FuzzManager server + new_entry = collector.submit( + report.crash_info, testCase=zip_name, testCaseQuality=self.quality + ) LOG.info("Logged %d with quality %d", new_entry["id"], self.quality) - # remove zipfile - if isfile(zip_name): - unlink(zip_name) - return new_entry["id"] From e32f3b3af4c1f5d54c9035567d7c9bfc9a2dbdde Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 18 May 2021 18:30:21 -0700 Subject: [PATCH 259/531] Fix nits in reporter --- grizzly/common/reporter.py | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/grizzly/common/reporter.py b/grizzly/common/reporter.py index 29314f1d..6f6cc9e1 100644 --- a/grizzly/common/reporter.py +++ b/grizzly/common/reporter.py @@ -89,10 +89,9 @@ def __init__(self, log_path, target_binary, is_hang=False, size_limit=MAX_LOG_SI if size_limit < 1: LOG.warning("No limit set on report log size!") else: - with scandir(path=log_path) as contents: - for log in contents: - if log.is_file() and log.stat().st_size > size_limit: - Report.tail(log.path, size_limit) + for log in scandir(path=log_path): + if log.is_file() and log.stat().st_size > size_limit: + Report.tail(log.path, size_limit) # look through logs one by one until we find a stack for log_file in (x for x in self._logs if x is not None): with open(log_file, "rb") as log_fp: @@ -383,11 +382,9 @@ def select_logs(cls, path): Returns: LogMap: A LogMap pointing to log files or None if path is empty. """ - to_scan = None - with scandir(path=path) as contents: - files = (x for x in contents if x.is_file()) - # order by date hopefully the oldest log is the cause of the issue - to_scan = [x.path for x in sorted(files, key=lambda x: x.stat().st_mtime)] + files = (x for x in scandir(path=path) if x.is_file()) + # order by date hopefully the oldest log is the cause of the issue + to_scan = [x.path for x in sorted(files, key=lambda x: x.stat().st_mtime)] if not to_scan: LOG.warning("No files found in %r", path) return None @@ -495,8 +492,7 @@ def _submit_report(self, report, test_cases): dest_path = pathjoin(self.report_path, report.major[:16]) else: dest_path = self.report_path - if not isdir(dest_path): - makedirs(dest_path) + makedirs(dest_path, exist_ok=True) # dump test cases and the contained files to working directory for test_number, test_case in enumerate(test_cases): dump_path = pathjoin(dest_path, "%s-%d" % (report.prefix, test_number)) From 2255c14ae8ff0a7ab269460c1aed3e36723913d3 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Fri, 28 May 2021 13:48:32 -0700 Subject: [PATCH 260/531] Add force_close kwarg to Target.close() --- grizzly/target/puppet_target.py | 4 ++-- grizzly/target/target.py | 2 +- grizzly/target/test_target.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/grizzly/target/puppet_target.py b/grizzly/target/puppet_target.py index c9ed4e06..9070ebdb 100644 --- a/grizzly/target/puppet_target.py +++ b/grizzly/target/puppet_target.py @@ -62,10 +62,10 @@ def cleanup(self): if self._remove_prefs and self._prefs and isfile(self._prefs): unlink(self._prefs) - def close(self): + def close(self, force_close=False): # prevent parallel calls to FFPuppet.close() and/or FFPuppet.clean_up() with self._lock: - self._puppet.close() + self._puppet.close(force_close=force_close) @property def closed(self): diff --git a/grizzly/target/target.py b/grizzly/target/target.py index d3f263b5..fa4c9511 100644 --- a/grizzly/target/target.py +++ b/grizzly/target/target.py @@ -73,7 +73,7 @@ def cleanup(self): pass @abstractmethod - def close(self): + def close(self, force_close=False): pass @abstractproperty diff --git a/grizzly/target/test_target.py b/grizzly/target/test_target.py index 2a6ec935..c9b0fb32 100644 --- a/grizzly/target/test_target.py +++ b/grizzly/target/test_target.py @@ -10,7 +10,7 @@ class SimpleTarget(Target): def cleanup(self): pass - def close(self): + def close(self, force_close=False): pass @property From 569a31840b8aeef1f0a8f6860f4da89c80b84daf Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Fri, 28 May 2021 13:51:38 -0700 Subject: [PATCH 261/531] [replay] Use Target.close(force_close=True) when not checking results --- grizzly/replay/replay.py | 3 ++- grizzly/replay/test_replay.py | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 71807200..9a6e459c 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -504,7 +504,8 @@ def run( # remove unpacked testcase data for tc_path in unpacked: rmtree(tc_path) - self.target.close() + # we don't want to clean up but we are not checking results + self.target.close(force_close=True) # remove unprocessed reports for report in reports.values(): report.report.cleanup() diff --git a/grizzly/replay/test_replay.py b/grizzly/replay/test_replay.py index 3a1ec9ce..73f86b97 100644 --- a/grizzly/replay/test_replay.py +++ b/grizzly/replay/test_replay.py @@ -59,6 +59,8 @@ def test_replay_02(mocker, tmp_path): assert replay.status.results == 0 assert target.monitor.is_healthy.call_count == 1 assert target.close.call_count == 2 + assert target.close.mock_calls[0] == mocker.call() + assert target.close.mock_calls[1] == mocker.call(force_close=True) assert not any(tmp_path.iterdir()) From 137069f17856eea997f6259196440ac97178c4fc Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Mon, 14 Jun 2021 16:30:37 -0700 Subject: [PATCH 262/531] Update FFPuppet support --- grizzly/args.py | 37 ++++++++++++---------------- grizzly/main.py | 1 + grizzly/target/puppet_target.py | 30 ++++++++++++++-------- grizzly/target/test_puppet_target.py | 21 +++++++--------- grizzly/test_args.py | 24 ------------------ grizzly/test_main.py | 1 + setup.cfg | 2 +- 7 files changed, 48 insertions(+), 68 deletions(-) diff --git a/grizzly/args.py b/grizzly/args.py index 6e97181c..795ca941 100644 --- a/grizzly/args.py +++ b/grizzly/args.py @@ -102,9 +102,6 @@ def __init__(self): help="Number of iterations performed before relaunching the browser" " (default: %(default)s)", ) - self.launcher_grp.add_argument( - "--rr", action="store_true", help="Use rr (Linux only)" - ) self.launcher_grp.add_argument( "--time-limit", type=int, @@ -125,12 +122,10 @@ def __init__(self): " and will be closed. Typically this should be a few seconds greater" " than the value used for `test-duration`." % (TIMEOUT_DELAY,), ) - self.launcher_grp.add_argument( - "--valgrind", action="store_true", help="Use Valgrind (Linux only)" - ) - self.launcher_grp.add_argument( - "--xvfb", action="store_true", help="Use Xvfb (Linux only)" - ) + if system().startswith("Linux"): + self.launcher_grp.add_argument( + "--xvfb", action="store_true", help="Use Xvfb." + ) self.reporter_grp = self.parser.add_argument_group("Reporter Arguments") self.reporter_grp.add_argument( @@ -148,6 +143,18 @@ def __init__(self): help="Override tool name used when reporting issues to FuzzManager", ) + if system().startswith("Linux"): + dbg_group = self.launcher_grp.add_mutually_exclusive_group() + dbg_group.add_argument( + "--pernosco", + action="store_true", + help="Use rr. Trace intended to be used with Pernosco.", + ) + dbg_group.add_argument("--rr", action="store_true", help="Use rr.") + dbg_group.add_argument( + "--valgrind", action="store_true", help="Use Valgrind." + ) + self.parser.epilog = ( "For addition help check out the wiki:" " https://github.com/MozillaSecurity/grizzly/wiki" @@ -202,9 +209,6 @@ def sanity_check(self, args): if args.prefs and not isfile(args.prefs): self.parser.error("--prefs file not found") - if args.rr and args.valgrind: - self.parser.error("--rr and --valgrind are mutually exclusive") - if args.time_limit is not None and args.time_limit < 1: self.parser.error("--time-limit must be >= 1") @@ -215,15 +219,6 @@ def sanity_check(self, args): if args.tool is not None and not args.fuzzmanager: self.parser.error("--tool can only be given with --fuzzmanager") - # check platform specific args - os_name = system() - if args.rr and os_name != "Linux": - self.parser.error("--rr is only supported on Linux") - if args.valgrind and os_name != "Linux": - self.parser.error("--valgrind is only supported on Linux") - if args.xvfb and os_name != "Linux": - self.parser.error("--xvfb is only supported on Linux") - class GrizzlyArgs(CommonArgs): def __init__(self): diff --git a/grizzly/main.py b/grizzly/main.py index 50e98118..e556fd53 100644 --- a/grizzly/main.py +++ b/grizzly/main.py @@ -83,6 +83,7 @@ def main(args): args.launch_timeout, args.log_limit, args.memory, + pernosco=args.pernosco, rr=args.rr, valgrind=args.valgrind, xvfb=args.xvfb, diff --git a/grizzly/target/puppet_target.py b/grizzly/target/puppet_target.py index 9070ebdb..e02bdc8d 100644 --- a/grizzly/target/puppet_target.py +++ b/grizzly/target/puppet_target.py @@ -16,6 +16,7 @@ from time import sleep, time from ffpuppet import BrowserTimeoutError, FFPuppet, LaunchError +from ffpuppet.core import Debugger, Reason from prefpicker import PrefPicker from psutil import AccessDenied, NoSuchProcess, Process, process_iter @@ -32,20 +33,29 @@ class PuppetTarget(Target): - __slots__ = ("use_rr", "use_valgrind", "_puppet", "_remove_prefs") + __slots__ = ("use_valgrind", "_puppet", "_remove_prefs") def __init__( self, binary, extension, launch_timeout, log_limit, memory_limit, **kwds ): super().__init__(binary, extension, launch_timeout, log_limit, memory_limit) - self.use_rr = kwds.pop("rr", False) - self.use_valgrind = kwds.pop("valgrind", False) + # TODO: clean up handling debuggers + if kwds.pop("pernosco", False): + debugger = Debugger.PERNOSCO + elif kwds.pop("rr", False): + debugger = Debugger.RR + elif kwds.pop("valgrind", False): + self.use_valgrind = True + debugger = Debugger.VALGRIND + else: + debugger = Debugger.NONE self._remove_prefs = False + # create Puppet object self._puppet = FFPuppet( - use_rr=self.use_rr, - use_valgrind=self.use_valgrind, + debugger=debugger, use_xvfb=kwds.pop("xvfb", False), + working_path=grz_tmp("target_ffpuppet"), ) if kwds: LOG.warning( @@ -113,19 +123,19 @@ def detect_failure(self, ignored): if not self._puppet.is_healthy(): self.close() # something has happened figure out what - if self._puppet.reason == FFPuppet.RC_CLOSED: + if self._puppet.reason == Reason.CLOSED: LOG.debug("target.close() was called") - elif self._puppet.reason == FFPuppet.RC_EXITED: + elif self._puppet.reason == Reason.EXITED: LOG.debug("target closed itself") elif ( - self._puppet.reason == FFPuppet.RC_WORKER + self._puppet.reason == Reason.WORKER and "memory" in ignored and "ffp_worker_memory_usage" in self._puppet.available_logs() ): status = self.RESULT_IGNORED LOG.debug("memory limit exceeded") elif ( - self._puppet.reason == FFPuppet.RC_WORKER + self._puppet.reason == Reason.WORKER and "log-limit" in ignored and "ffp_worker_log_size" in self._puppet.available_logs() ): @@ -133,7 +143,7 @@ def detect_failure(self, ignored): LOG.debug("log size limit exceeded") else: # crash or hang (forced SIGABRT) has been detected - LOG.debug("failure detected, ffpuppet reason %r", self._puppet.reason) + LOG.debug("failure detected, ffpuppet %s", self._puppet.reason) status = self.RESULT_FAILURE return status diff --git a/grizzly/target/test_puppet_target.py b/grizzly/target/test_puppet_target.py index b12957c1..9ddf2206 100644 --- a/grizzly/target/test_puppet_target.py +++ b/grizzly/target/test_puppet_target.py @@ -6,7 +6,8 @@ from os.path import isfile from platform import system -from ffpuppet import BrowserTerminatedError, BrowserTimeoutError, FFPuppet +from ffpuppet import BrowserTerminatedError, BrowserTimeoutError +from ffpuppet.core import Reason from pytest import mark, raises from .puppet_target import PuppetTarget @@ -16,7 +17,7 @@ def test_puppet_target_01(mocker, tmp_path): """test creating a PuppetTarget""" fake_ffp = mocker.patch("grizzly.target.puppet_target.FFPuppet", autospec=True) - fake_ffp.return_value.reason = FFPuppet.RC_CLOSED + fake_ffp.return_value.reason = Reason.CLOSED fake_ffp.return_value.log_length.return_value = 562 fake_file = tmp_path / "fake" fake_file.touch() @@ -83,26 +84,22 @@ def test_puppet_target_02(mocker, tmp_path): # running as expected - no failures (True, None, [], Target.RESULT_NONE, 0), # browser process closed - (False, FFPuppet.RC_CLOSED, [], Target.RESULT_NONE, 1), + (False, Reason.CLOSED, [], Target.RESULT_NONE, 1), # browser process crashed - (False, FFPuppet.RC_ALERT, [], Target.RESULT_FAILURE, 1), + (False, Reason.ALERT, [], Target.RESULT_FAILURE, 1), # browser exit with no crash logs - (False, FFPuppet.RC_EXITED, [], Target.RESULT_NONE, 1), + (False, Reason.EXITED, [], Target.RESULT_NONE, 1), # ffpuppet check failed - (False, FFPuppet.RC_WORKER, [], Target.RESULT_FAILURE, 1), + (False, Reason.WORKER, [], Target.RESULT_FAILURE, 1), # ffpuppet check ignored (memory) - (False, FFPuppet.RC_WORKER, ["memory"], Target.RESULT_IGNORED, 1), + (False, Reason.WORKER, ["memory"], Target.RESULT_IGNORED, 1), # ffpuppet check ignored (log-limit) - (False, FFPuppet.RC_WORKER, ["log-limit"], Target.RESULT_IGNORED, 1), + (False, Reason.WORKER, ["log-limit"], Target.RESULT_IGNORED, 1), ], ) def test_puppet_target_03(mocker, tmp_path, healthy, reason, ignore, result, closes): """test PuppetTarget.detect_failure()""" fake_ffp = mocker.patch("grizzly.target.puppet_target.FFPuppet", autospec=True) - fake_ffp.RC_ALERT = FFPuppet.RC_ALERT - fake_ffp.RC_CLOSED = FFPuppet.RC_CLOSED - fake_ffp.RC_EXITED = FFPuppet.RC_EXITED - fake_ffp.RC_WORKER = FFPuppet.RC_WORKER fake_file = tmp_path / "fake" fake_file.touch() target = PuppetTarget(str(fake_file), None, 300, 25, 5000) diff --git a/grizzly/test_args.py b/grizzly/test_args.py index 64f87737..d275ac8a 100644 --- a/grizzly/test_args.py +++ b/grizzly/test_args.py @@ -81,30 +81,6 @@ def test_common_args_01a(capsys, mocker, tmp_path): "error: --tool can only be given with --fuzzmanager", ["targ1"], ), - # test enabling both rr and Valgrind - ( - ["--platform", "targ1", "--rr", "--valgrind"], - "error: --rr and --valgrind are mutually exclusive", - ["targ1"], - ), - # test rr on unsupported platform - ( - ["--platform", "targ1", "--rr"], - "error: --rr is only supported on Linux", - ["targ1"], - ), - # test Valgrind on unsupported platform - ( - ["--platform", "targ1", "--valgrind"], - "error: --valgrind is only supported on Linux", - ["targ1"], - ), - # test Xvfb on unsupported platform - ( - ["--platform", "targ1", "--xvfb"], - "error: --xvfb is only supported on Linux", - ["targ1"], - ), ], ) def test_common_args_02(capsys, mocker, tmp_path, args, msg, targets): diff --git a/grizzly/test_main.py b/grizzly/test_main.py index 06627883..ec9980ed 100644 --- a/grizzly/test_main.py +++ b/grizzly/test_main.py @@ -29,6 +29,7 @@ def __init__(self): self.log_level = 10 # 10 = DEBUG, 20 = INFO self.log_limit = 0 self.memory = 0 + self.pernosco = False self.platform = "fake-target" self.prefs = None self.rr = False # pylint: disable=invalid-name diff --git a/setup.cfg b/setup.cfg index d7ea5725..03d8f110 100644 --- a/setup.cfg +++ b/setup.cfg @@ -22,7 +22,7 @@ include_package_data = True install_requires = cssbeautifier fasteners - ffpuppet + ffpuppet >= 0.8.0 FuzzManager jsbeautifier lithium-reducer >= 0.5 From 9a07fc347062b65edd0d52368ffd9156b6fbd4eb Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 15 Jun 2021 11:11:31 -0700 Subject: [PATCH 263/531] Pass pernosco args to target in all cases --- grizzly/main.py | 6 ++++-- grizzly/reduce/core.py | 9 ++++++--- grizzly/replay/replay.py | 5 ++++- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/grizzly/main.py b/grizzly/main.py index e556fd53..a4d1d5a3 100644 --- a/grizzly/main.py +++ b/grizzly/main.py @@ -38,9 +38,11 @@ def main(args): LOG.info("Ignoring: %s", ", ".join(args.ignore)) if args.xvfb: LOG.info("Running with Xvfb") - if args.rr: + if args.pernosco: + LOG.info("Running with RR (Pernosco mode)") + elif args.rr: LOG.info("Running with RR") - if args.valgrind: + elif args.valgrind: LOG.info("Running with Valgrind. This will be SLOW!") adapter = None diff --git a/grizzly/reduce/core.py b/grizzly/reduce/core.py index c6d96004..fc4503a5 100644 --- a/grizzly/reduce/core.py +++ b/grizzly/reduce/core.py @@ -712,10 +712,12 @@ def main(cls, args): LOG.info("Ignoring: %s", ", ".join(args.ignore)) if args.xvfb: LOG.info("Running with Xvfb") - if args.valgrind: - LOG.info("Running with Valgrind. This will be SLOW!") - if args.rr: + if args.pernosco: + LOG.info("Running with RR (Pernosco mode)") + elif args.rr: LOG.info("Running with RR") + elif args.valgrind: + LOG.info("Running with Valgrind. This will be SLOW!") signature = None signature_desc = None @@ -772,6 +774,7 @@ def main(cls, args): args.launch_timeout, args.log_limit, args.memory, + pernosco=args.pernosco, rr=args.rr, valgrind=args.valgrind, xvfb=args.xvfb, diff --git a/grizzly/replay/replay.py b/grizzly/replay/replay.py index 9a6e459c..1b8873a6 100644 --- a/grizzly/replay/replay.py +++ b/grizzly/replay/replay.py @@ -552,7 +552,9 @@ def main(cls, args): LOG.info("Ignoring: %s", ", ".join(args.ignore)) if args.xvfb: LOG.info("Running with Xvfb") - if args.rr: + if args.pernosco: + LOG.info("Running with RR (Pernosco mode)") + elif args.rr: LOG.info("Running with RR") elif args.valgrind: LOG.info("Running with Valgrind. This will be SLOW!") @@ -606,6 +608,7 @@ def main(cls, args): args.launch_timeout, args.log_limit, args.memory, + pernosco=args.pernosco, rr=args.rr, valgrind=args.valgrind, xvfb=args.xvfb, From ff0f527afebdd8ba9dfe41326ecffba3cd95eb79 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 15 Jun 2021 11:14:33 -0700 Subject: [PATCH 264/531] [tests] Test processing debugger args in PuppetTarget --- grizzly/target/puppet_target.py | 7 +++--- grizzly/target/test_puppet_target.py | 34 +++++++++++++++++++++++++++- 2 files changed, 36 insertions(+), 5 deletions(-) diff --git a/grizzly/target/puppet_target.py b/grizzly/target/puppet_target.py index e02bdc8d..e03e9851 100644 --- a/grizzly/target/puppet_target.py +++ b/grizzly/target/puppet_target.py @@ -40,15 +40,14 @@ def __init__( ): super().__init__(binary, extension, launch_timeout, log_limit, memory_limit) # TODO: clean up handling debuggers + debugger = Debugger.NONE if kwds.pop("pernosco", False): debugger = Debugger.PERNOSCO - elif kwds.pop("rr", False): + if kwds.pop("rr", False): debugger = Debugger.RR - elif kwds.pop("valgrind", False): + if kwds.pop("valgrind", False): self.use_valgrind = True debugger = Debugger.VALGRIND - else: - debugger = Debugger.NONE self._remove_prefs = False # create Puppet object diff --git a/grizzly/target/test_puppet_target.py b/grizzly/target/test_puppet_target.py index 9ddf2206..9aaedaf7 100644 --- a/grizzly/target/test_puppet_target.py +++ b/grizzly/target/test_puppet_target.py @@ -7,7 +7,7 @@ from platform import system from ffpuppet import BrowserTerminatedError, BrowserTimeoutError -from ffpuppet.core import Reason +from ffpuppet.core import Debugger, Reason from pytest import mark, raises from .puppet_target import PuppetTarget @@ -328,3 +328,35 @@ def test_puppet_target_08(mocker, tmp_path): with raises(TargetError, match="Missing prefs.js file 'missing'"): target.prefs = "missing" assert not isfile(prefs_file) + + +@mark.parametrize( + "pernosco, rr, valgrind", + [ + # No debugger selected + (False, False, False), + # Pernosco selected + (True, False, False), + # rr selected + (False, True, False), + # Valgrind selected + (False, False, True), + ], # pylint: disable=invalid-name +) +def test_puppet_target_09(mocker, tmp_path, pernosco, rr, valgrind): + """test PuppetTarget debugger args""" + fake_ffp = mocker.patch("grizzly.target.puppet_target.FFPuppet", autospec=True) + fake_file = tmp_path / "fake" + fake_file.touch() + with PuppetTarget( + str(fake_file), None, 30, 25, 500, pernosco=pernosco, rr=rr, valgrind=valgrind + ) as _: + pass + if pernosco: + assert fake_ffp.call_args[-1]["debugger"] == Debugger.PERNOSCO + elif rr: + assert fake_ffp.call_args[-1]["debugger"] == Debugger.RR + elif valgrind: + assert fake_ffp.call_args[-1]["debugger"] == Debugger.VALGRIND + else: + assert fake_ffp.call_args[-1]["debugger"] == Debugger.NONE From 41331fd0950b525084b8b3c6fcb6474fbe27ee0f Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Tue, 15 Jun 2021 11:31:04 -0700 Subject: [PATCH 265/531] Update minimum FFPuppet version to 0.8.1 --- grizzly/target/puppet_target.py | 3 +-- grizzly/target/test_puppet_target.py | 3 +-- setup.cfg | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/grizzly/target/puppet_target.py b/grizzly/target/puppet_target.py index e03e9851..bcf80545 100644 --- a/grizzly/target/puppet_target.py +++ b/grizzly/target/puppet_target.py @@ -15,8 +15,7 @@ from tempfile import mkdtemp, mkstemp from time import sleep, time -from ffpuppet import BrowserTimeoutError, FFPuppet, LaunchError -from ffpuppet.core import Debugger, Reason +from ffpuppet import BrowserTimeoutError, Debugger, FFPuppet, LaunchError, Reason from prefpicker import PrefPicker from psutil import AccessDenied, NoSuchProcess, Process, process_iter diff --git a/grizzly/target/test_puppet_target.py b/grizzly/target/test_puppet_target.py index 9aaedaf7..68ce1688 100644 --- a/grizzly/target/test_puppet_target.py +++ b/grizzly/target/test_puppet_target.py @@ -6,8 +6,7 @@ from os.path import isfile from platform import system -from ffpuppet import BrowserTerminatedError, BrowserTimeoutError -from ffpuppet.core import Debugger, Reason +from ffpuppet import BrowserTerminatedError, BrowserTimeoutError, Debugger, Reason from pytest import mark, raises from .puppet_target import PuppetTarget diff --git a/setup.cfg b/setup.cfg index 03d8f110..49d13f5f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -22,7 +22,7 @@ include_package_data = True install_requires = cssbeautifier fasteners - ffpuppet >= 0.8.0 + ffpuppet >= 0.8.1 FuzzManager jsbeautifier lithium-reducer >= 0.5 From 41e227fc70adcaaa263922900616081ab42fbcc4 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 17 Jun 2021 12:10:56 -0700 Subject: [PATCH 266/531] Replace Status.duration with Status.runtime Calculations are now based on read-only status --- grizzly/common/status.py | 32 ++++++++++++-------- grizzly/common/status_reporter.py | 11 +++---- grizzly/common/test_status.py | 42 ++++++++++++++++++-------- grizzly/common/test_status_reporter.py | 5 +-- 4 files changed, 56 insertions(+), 34 deletions(-) diff --git a/grizzly/common/status.py b/grizzly/common/status.py index 7aa7b5dc..5a18c8f1 100644 --- a/grizzly/common/status.py +++ b/grizzly/common/status.py @@ -113,18 +113,6 @@ def _data(self): "timestamp": self.timestamp, } - @property - def duration(self): - """Calculate the number of seconds since start() was called. - - Args: - None - - Returns: - int: Total runtime in seconds since start() was called - """ - return max(self.timestamp - self.start_time, 0) - @classmethod def load(cls, data_file): """Load status report. Loading a status report from disk will create a @@ -162,6 +150,7 @@ def load(cls, data_file): return None for attr, value in data.items(): setattr(status, attr, value) + assert status.start_time <= status.timestamp # set read only status.data_file = None return status @@ -231,7 +220,8 @@ def rate(self): Returns: float: Number of iterations performed per second. """ - return self.iteration / float(self.duration) if self.duration > 0 else 0 + runtime = self.runtime + return self.iteration / float(runtime) if runtime else 0 def record(self, name, duration): """Used to add profiling data. This is intended to be used to make rough @@ -278,6 +268,7 @@ def report(self, force=False, report_freq=REPORT_FREQ): now = time() if not force and now < (self.timestamp + report_freq): return False + assert self.start_time <= now self.timestamp = now with self._lock: with open(self.data_file, "w") as out_fp: @@ -296,6 +287,21 @@ def results(self): """ return sum(self._results.values()) + @property + def runtime(self): + """Calculate the number of seconds since start() was called. Value is + calculated relative to 'timestamp' if status object is read-only. + + Args: + None + + Returns: + int: Total runtime in seconds. + """ + if self.data_file is None: + return self.timestamp - self.start_time + return max(time() - self.start_time, 0) + def signatures(self): """Provide the signature and the number of times it has been found for each result. diff --git a/grizzly/common/status_reporter.py b/grizzly/common/status_reporter.py index aec54429..4fc8bf33 100644 --- a/grizzly/common/status_reporter.py +++ b/grizzly/common/status_reporter.py @@ -41,6 +41,7 @@ class StatusReporter: SUMMARY_LIMIT = 4095 # summary output must be no more than 4KB def __init__(self, reports, tracebacks=None): + assert all(x.data_file is None for x in reports) self.reports = reports self.tracebacks = tracebacks @@ -136,7 +137,7 @@ def _specific(self): if not self.reports: return "No status reports available" exp = int(time()) - self.EXP_LIMIT - self.reports.sort(key=lambda x: x.duration, reverse=True) + self.reports.sort(key=lambda x: x.runtime, reverse=True) self.reports.sort(key=lambda x: x.timestamp < exp) txt = list() for num, report in enumerate(self.reports, start=1): @@ -144,7 +145,7 @@ def _specific(self): if report.timestamp < exp: txt.append(" (EXPIRED)\n") continue - txt.append(" Runtime %s\n" % (timedelta(seconds=int(report.duration)),)) + txt.append(" Runtime %s\n" % (timedelta(seconds=int(report.runtime)),)) txt.append(" * Iterations: %d" % (report.iteration,)) txt.append(" @ %0.2f," % (round(report.rate, 2),)) txt.append(" Ignored: %d," % (report.ignored,)) @@ -162,9 +163,7 @@ def _specific(self): txt.append(str(timedelta(seconds=int(entry.total)))) else: txt.append("%0.3fs" % (round(entry.total, 3),)) - txt.append( - " %0.2f%%" % (round(entry.total / report.duration * 100, 2),) - ) + txt.append(" %0.2f%%" % (round(entry.total / report.runtime * 100, 2),)) txt.append(" (%0.3f avg," % (round(avg, 3),)) txt.append(" %0.3f max," % (round(entry.max, 3),)) txt.append(" %0.3f min)" % (round(entry.min, 3),)) @@ -218,7 +217,7 @@ def _summary(self, runtime=True, sysinfo=False, timestamp=False): # Runtime if runtime: txt.append("\n") - total_runtime = sum(x.duration for x in reports) + total_runtime = sum(x.runtime for x in reports) txt.append(" Runtime : %s" % (timedelta(seconds=int(total_runtime)),)) # Log size log_usage = sum(log_sizes) / 1048576.0 diff --git a/grizzly/common/test_status.py b/grizzly/common/test_status.py index 0de9825e..fa321016 100644 --- a/grizzly/common/test_status.py +++ b/grizzly/common/test_status.py @@ -13,9 +13,10 @@ from .status import Status -def test_status_01(tmp_path): +def test_status_01(mocker, tmp_path): """test Status.start()""" Status.PATH = str(tmp_path) + mocker.patch("grizzly.common.status.time", return_value=1.0) status = Status.start() assert status is not None assert status.data_file is not None @@ -23,12 +24,12 @@ def test_status_01(tmp_path): assert stat(status.data_file).st_size > 0 assert status.start_time > 0 assert status.timestamp >= status.start_time - assert int(status.duration) == 0 assert status.ignored == 0 assert status.iteration == 0 assert status.log_size == 0 assert status.rate == 0 assert status.results == 0 + assert int(status.runtime) == 0 assert status.pid is not None assert not status._enable_profiling assert not status._profiles @@ -83,9 +84,10 @@ def test_status_04(tmp_path): assert Status.load(str(bad)) is None -def test_status_05(tmp_path): +def test_status_05(mocker, tmp_path): """test Status.load()""" Status.PATH = str(tmp_path) + mocker.patch("grizzly.common.status.time", return_value=1.0) # create simple entry status = Status.start(enable_profiling=True) status.count_result("sig1") @@ -96,7 +98,7 @@ def test_status_05(tmp_path): assert loaded.data_file is None assert status.start_time == loaded.start_time assert status.timestamp == loaded.timestamp - assert status.duration == loaded.duration + assert status.runtime == loaded.runtime assert status.ignored == loaded.ignored assert status.iteration == loaded.iteration assert status.log_size == loaded.log_size @@ -127,19 +129,33 @@ def test_status_06(tmp_path): assert len(tuple(Status.loadall())) == 5 -def test_status_07(tmp_path): - """test Status.duration and Status.rate calculations""" +def test_status_07(mocker, tmp_path): + """test Status.runtime and Status.rate calculations""" Status.PATH = str(tmp_path) + mocker.patch( + "grizzly.common.status.time", side_effect=(1.0, 1.0, 3.0, 3.0, 5.0, 5.0, 5.0) + ) status = Status.start() - status.start_time = 1 - status.timestamp = 2 - status.iteration = 0 - assert status.duration == 1 + assert status.data_file is not None + assert status.start_time == 1 + # test no iterations + assert status.runtime == 2.0 assert status.rate == 0 + # test one iteration status.iteration = 1 - assert status.rate == 1 - status.timestamp += 1 - assert status.rate == 0.5 + # timestamp should be ignored when calculating rate and runtime on active object + status.timestamp = 100 + assert status.runtime == 4.0 + assert status.rate == 0.25 + # test loaded + status.report(force=True) + loaded = Status.load(status.data_file) + assert loaded.runtime == 4.0 + assert loaded.rate == 0.25 + # timestamp should be used when calculating rate and runtime on loaded object + loaded.timestamp = 2.0 + assert loaded.runtime == 1.0 + assert loaded.rate == 1.0 def _client_writer(done, reported, working_path): diff --git a/grizzly/common/test_status_reporter.py b/grizzly/common/test_status_reporter.py index 8ad1ac7e..7deeab03 100644 --- a/grizzly/common/test_status_reporter.py +++ b/grizzly/common/test_status_reporter.py @@ -125,8 +125,10 @@ def test_status_reporter_04(tmp_path): assert any(StatusReporter._scan(str(tmp_path), "TEST_FILE")) -def test_status_reporter_05(tmp_path): +def test_status_reporter_05(mocker, tmp_path): """test StatusReporter._summary()""" + mocker.patch("grizzly.common.status.time", side_effect=count(start=1.0, step=1.0)) + mocker.patch("grizzly.common.status_reporter.time", side_effect=(1.0, 2.0)) Status.PATH = str(tmp_path) # single report status = Status.start() @@ -149,7 +151,6 @@ def test_status_reporter_05(tmp_path): assert len(output.split("\n")) == 3 # multiple reports status = Status.start() - status.start_time += 66.0 status.ignored = 1 status.iteration = 8 status.log_size = 86900000 From 54f8d6a97e0d2b82bd0df416cd74932af496b29d Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Thu, 17 Jun 2021 14:04:37 -0700 Subject: [PATCH 267/531] Add --runtime argument --- grizzly/args.py | 22 +++++++++++++----- grizzly/main.py | 7 ++++-- grizzly/session.py | 8 ++++++- grizzly/test_args.py | 4 +++- grizzly/test_main.py | 20 ++++++++++------- grizzly/test_session.py | 50 +++++++++++++++++++++++++++-------------- 6 files changed, 76 insertions(+), 35 deletions(-) diff --git a/grizzly/args.py b/grizzly/args.py index 795ca941..90ed6650 100644 --- a/grizzly/args.py +++ b/grizzly/args.py @@ -236,7 +236,7 @@ def __init__(self): " status reporter while running Grizzly.", ) self.parser.add_argument( - "-i", "--input", help="Test case or directory containing test cases" + "-i", "--input", help="Test case or directory containing test cases." ) self.parser.add_argument( "--limit", @@ -255,7 +255,14 @@ def __init__(self): ) self.launcher_grp.add_argument( - "--coverage", action="store_true", help="Enable coverage collection" + "--coverage", action="store_true", help="Enable coverage collection." + ) + self.launcher_grp.add_argument( + "--runtime", + type=int, + default=0, + help="Maximum runtime in seconds. Checked after each iteration." + " (default: 'no limit')", ) self.reporter_grp.add_argument( @@ -263,14 +270,14 @@ def __init__(self): "--collect", type=int, default=1, - help="Maximum number of test cases to include in the report" - "(default: %(default)s)", + help="Maximum number of test cases to include in the report." + " (default: %(default)s)", ) self.reporter_grp.add_argument( "--s3-fuzzmanager", action="store_true", help="Report large attachments (if any) to S3 and then the crash &" - " S3 link to FuzzManager", + " S3 link to FuzzManager.", ) def sanity_check(self, args): @@ -294,7 +301,10 @@ def sanity_check(self, args): self.parser.error("%r does not exist" % (args.input,)) if args.limit < 0: - self.parser.error("--limit must be >= 0 (0 = no limit)") + self.parser.error("--limit must be >= 0") + + if args.runtime < 0: + self.parser.error("--runtime must be >= 0") if args.tool is not None and not (args.fuzzmanager or args.s3_fuzzmanager): self.parser.error( diff --git a/grizzly/main.py b/grizzly/main.py index a4d1d5a3..f1891635 100644 --- a/grizzly/main.py +++ b/grizzly/main.py @@ -52,7 +52,7 @@ def main(args): LOG.debug("initializing Adapter %r", args.adapter) adapter = load_plugin(args.adapter, "grizzly_adapters", Adapter)(args.adapter) - # test time limit and timeout sanity checking + # test case time limit and timeout sanity checking if args.time_limit: time_limit = args.time_limit else: @@ -108,8 +108,10 @@ def main(args): reporter = FilesystemReporter(pathjoin(getcwd(), "results")) LOG.info("Results will be stored in %r", reporter.report_path) - if args.limit > 0: + if args.limit: LOG.info("%r iteration(s) will be attempted", args.limit) + if args.runtime: + LOG.info("Runtime is limited to %rs", args.runtime) # set 'auto_close=1' so the client error pages (code 4XX) will # call 'window.close()' after a second. @@ -137,6 +139,7 @@ def main(args): time_limit, input_path=args.input, iteration_limit=args.limit, + runtime_limit=args.runtime, display_mode=display_mode, ) diff --git a/grizzly/session.py b/grizzly/session.py index d27b352c..cf82c03e 100644 --- a/grizzly/session.py +++ b/grizzly/session.py @@ -147,10 +147,12 @@ def run( time_limit, input_path=None, iteration_limit=0, + runtime_limit=0, display_mode=DISPLAY_NORMAL, ): - assert time_limit > 0 assert iteration_limit >= 0 + assert runtime_limit >= 0 + assert time_limit > 0 LOG.debug("calling adapter.setup()") self.adapter.setup(input_path, self.iomanager.server_map) @@ -274,6 +276,10 @@ def run( LOG.info("Hit iteration limit (%d)", iteration_limit) break + if runtime_limit and self.status.runtime >= runtime_limit: + LOG.info("Hit runtime limit (%ds)", runtime_limit) + break + # warn about large browser logs self.status.log_size = self.target.log_size() if self.status.log_size > self.TARGET_LOG_SIZE_WARN: diff --git a/grizzly/test_args.py b/grizzly/test_args.py index d275ac8a..83ff0470 100644 --- a/grizzly/test_args.py +++ b/grizzly/test_args.py @@ -152,7 +152,9 @@ def test_grizzly_args_03(capsys, mocker, tmp_path): # test missing input (["--input", "missing"], "error: 'missing' does not exist"), # test invalid limit value - (["--limit", "-1"], "error: --limit must be >= 0 (0 = no limit)"), + (["--limit", "-1"], "error: --limit must be >= 0"), + # test runtime limit value + (["--runtime", "-1"], "error: --runtime must be >= 0"), # test tool ( ["--tool", "x"], diff --git a/grizzly/test_main.py b/grizzly/test_main.py index ec9980ed..8ec5a370 100644 --- a/grizzly/test_main.py +++ b/grizzly/test_main.py @@ -34,6 +34,7 @@ def __init__(self): self.prefs = None self.rr = False # pylint: disable=invalid-name self.relaunch = 1000 + self.runtime = 0 self.s3_fuzzmanager = False self.time_limit = None self.timeout = None @@ -44,21 +45,23 @@ def __init__(self): @mark.parametrize( - "cov, adpt_relaunch, limit, verbose", + "cov, adpt_relaunch, limit, runtime, verbose", [ # successful run - (False, 0, 0, True), - # successful run (with limit) - (False, 0, 10, True), + (False, 0, 0, 0, True), + # successful run (with iteration limit) + (False, 0, 10, 0, True), + # successful run (with runtime limit) + (False, 0, 0, 10, True), # successful run (with coverage) - (True, 0, 0, False), + (True, 0, 0, 0, False), # relaunch 1 - (False, 1, 0, False), + (False, 1, 0, 0, False), # relaunch 10 - (False, 10, 0, False), + (False, 10, 0, 0, False), ], ) -def test_main_01(mocker, cov, adpt_relaunch, limit, verbose): +def test_main_01(mocker, cov, adpt_relaunch, limit, runtime, verbose): """test main()""" fake_adapter = mocker.NonCallableMock(spec_set=Adapter) fake_adapter.RELAUNCH = adpt_relaunch @@ -76,6 +79,7 @@ def test_main_01(mocker, cov, adpt_relaunch, limit, verbose): args.adapter = "fake" args.ignore = ["fake", "fake"] args.limit = limit + args.runtime = runtime args.prefs = "fake" args.rr = True args.valgrind = True diff --git a/grizzly/test_session.py b/grizzly/test_session.py index 01818c56..01027af5 100644 --- a/grizzly/test_session.py +++ b/grizzly/test_session.py @@ -6,6 +6,8 @@ """ unit tests for grizzly.Session """ +from itertools import count + from pytest import mark, raises from sapphire import SERVED_ALL, SERVED_NONE, SERVED_TIMEOUT, Sapphire @@ -37,29 +39,34 @@ def generate(self, testcase, _server_map): @mark.parametrize( - "harness, profiling, coverage, relaunch, iters", + "harness, profiling, coverage, relaunch, iters, runtime", [ # with harness, single iteration - (True, False, False, 1, 1), + (True, False, False, 1, 1, 0), # with harness, 10 iterations relaunch every iteration - (True, False, False, 1, 10), + (True, False, False, 1, 10, 0), # with harness, 10 iterations relaunch every other iteration - (True, False, False, 2, 10), + (True, False, False, 2, 10, 0), # with harness, 10 iterations no relaunches - (True, False, False, 10, 10), + (True, False, False, 10, 10, 0), # no harness, single iteration - (False, False, False, 1, 1), + (False, False, False, 1, 1, 0), # no harness, 10 iterations - (False, False, False, 1, 10), + (False, False, False, 1, 10, 0), # test enable profiling - (True, True, False, 10, 10), + (True, True, False, 10, 10, 0), # test Session.dump_coverage() - (True, True, True, 2, 2), + (True, True, True, 2, 2, 0), + # with harness, runtime limit + (True, False, False, 1, 0, 1), ], ) -def test_session_01(mocker, tmp_path, harness, profiling, coverage, relaunch, iters): +def test_session_01( + mocker, tmp_path, harness, profiling, coverage, relaunch, iters, runtime +): """test Session with typical fuzzer Adapter""" Status.PATH = str(tmp_path) + mocker.patch("grizzly.common.status.time", side_effect=count(start=1.0, step=1.0)) server = mocker.Mock(spec=Sapphire, port=0x1337) prefs = tmp_path / "prefs.js" prefs.touch() @@ -68,9 +75,12 @@ def test_session_01(mocker, tmp_path, harness, profiling, coverage, relaunch, it target.monitor.launches = 1 # avoid shutdown delay target.monitor.is_healthy.return_value = False + # we can only test iter limit OR runtime limit not both + assert bool(iters) != bool(runtime), "test is broken!" + max_iters = iters or 1 # calculate if the target is 'closed' based on relaunch type(target).closed = mocker.PropertyMock( - side_effect=((x % relaunch == 0) for x in range(iters)) + side_effect=((x % relaunch == 0) for x in range(max_iters)) ) with Session( SimpleAdapter(harness), @@ -85,18 +95,24 @@ def test_session_01(mocker, tmp_path, harness, profiling, coverage, relaunch, it SERVED_ALL, [session.iomanager.page_name(offset=-1)], ) - session.run([], 10, input_path="file.bin", iteration_limit=iters) - assert session.status.iteration == iters + session.run( + [], + 10, + input_path="file.bin", + iteration_limit=iters, + runtime_limit=runtime, + ) + assert session.status.iteration == max_iters assert session.status.test_name == "file.bin" - assert target.close.call_count == iters / relaunch - assert target.detect_failure.call_count == iters + assert target.close.call_count == max_iters / relaunch + assert target.detect_failure.call_count == max_iters assert target.handle_hang.call_count == 0 if profiling: - assert any(session.status.profile_entries()) + assert any(session.status.profile_entries()) == profiling else: assert not any(session.status.profile_entries()) if coverage: - assert target.dump_coverage.call_count == iters + assert target.dump_coverage.call_count == max_iters else: assert target.dump_coverage.call_count == 0 From 5dc334a0d7100338db10bd28202df0d5f937e750 Mon Sep 17 00:00:00 2001 From: Tyson Smith Date: Mon, 21 Jun 2021 11:00:52 -0700 Subject: [PATCH 268/531] [tests] Add debugger selection tests --- grizzly/replay/test_main.py | 147 +++++++++++++++++++++++++----------- grizzly/test_main.py | 40 ++++++++++ 2 files changed, 142 insertions(+), 45 deletions(-) diff --git a/grizzly/replay/test_main.py b/grizzly/replay/test_main.py index 25b3610b..1c92cccb 100644 --- a/grizzly/replay/test_main.py +++ b/grizzly/replay/test_main.py @@ -24,15 +24,13 @@ def test_main_01(mocker, tmp_path): # This is a typical scenario - a test that reproduces results ~50% of the time. # Of the four attempts only the first and third will 'reproduce' the result # and the forth attempt should be skipped. - # mock Sapphire.serve_path only mocker.patch("grizzly.common.runner.sleep", autospec=True) + # mock Sapphire.serve_path only serve_path = mocker.patch( - "grizzly.replay.replay.Sapphire.serve_path", autospec=True + "grizzly.replay.replay.Sapphire.serve_path", + autospec=True, + return_value=(SERVED_ALL, ["test.html"]), # passed to Target.detect_failure ) - serve_path.return_value = ( - SERVED_ALL, - ["test.html"], - ) # passed to mocked Target.detect_failure # setup Target load_target = mocker.patch("grizzly.replay.replay.load_plugin") target = mocker.Mock(spec=Target, binary="bin", launch_timeout=30) @@ -61,7 +59,8 @@ def test_main_01(mocker, tmp_path): input=str(tmp_path / "test.html"), logs=str(log_path), min_crashes=2, - no_harness=True, + no_harness=False, + pernosco=False, prefs=str(tmp_path / "prefs.js"), relaunch=1, repeat=4, @@ -88,15 +87,13 @@ def test_main_01(mocker, tmp_path): def test_main_02(mocker, tmp_path): """test ReplayManager.main() - no repro""" - # mock Sapphire.serve_path only mocker.patch("grizzly.common.runner.sleep", autospec=True) - serve_path = mocker.patch( - "grizzly.replay.replay.Sapphire.serve_path", autospec=True + # mock Sapphire.serve_path only + mocker.patch( + "grizzly.replay.replay.Sapphire.serve_path", + autospec=True, + return_value=(SERVED_ALL, ["test.html"]), # passed to Target.detect_failure ) - serve_path.return_value = ( - SERVED_ALL, - ["test.html"], - ) # passed to mocked Target.detect_failure # setup Target load_target = mocker.patch("grizzly.replay.replay.load_plugin") target = mocker.Mock(spec=Target, binary="bin", launch_timeout=30) @@ -105,7 +102,6 @@ def test_main_02(mocker, tmp_path): load_target.return_value.return_value = target # setup args (tmp_path / "test.html").touch() - (tmp_path / "prefs.js").touch() args = mocker.Mock( fuzzmanager=False, idle_delay=0, @@ -114,6 +110,7 @@ def test_main_02(mocker, tmp_path): input=str(tmp_path / "test.html"), min_crashes=2, no_harness=True, + pernosco=False, prefs=None, relaunch=1, repeat=1, @@ -142,31 +139,27 @@ def test_main_03(mocker): ignore=list(), input="test", min_crashes=1, - no_harenss=True, + no_harness=True, + pernosco=False, prefs=None, relaunch=1, repeat=1, + rr=False, sig=None, test_index=None, time_limit=10, timeout=None, + valgrind=False, ) # user abort fake_load_target.side_effect = KeyboardInterrupt - # coverage - args.rr = True - args.valgrind = False assert ReplayManager.main(args) == Session.EXIT_ABORT fake_load_target.reset_mock() # invalid test case fake_tc.load.side_effect = TestCaseLoadFailure - # coverage - args.rr = False - args.valgrind = True assert ReplayManager.main(args) == Session.EXIT_ERROR assert fake_load_target.call_count == 0 # no test cases - args.valgrind = False fake_tc.load.side_effect = None fake_tc.load.return_value = list() assert ReplayManager.main(args) == Session.EXIT_ERROR @@ -210,14 +203,17 @@ def test_main_04(mocker, tmp_path): ignore=list(), input="test", min_crashes=1, - no_harenss=True, + no_harness=True, + pernosco=False, prefs=None, relaunch=1, repeat=1, + rr=False, sig=None, test_index=None, time_limit=10, timeout=None, + valgrind=False, ) # target launch error fake_logs = tmp_path / "fake_report" @@ -240,12 +236,10 @@ def test_main_04(mocker, tmp_path): def test_main_05(mocker, tmp_path): """test ReplayManager.main() loading/generating prefs.js""" serve_path = mocker.patch( - "grizzly.replay.replay.Sapphire.serve_path", autospec=True + "grizzly.replay.replay.Sapphire.serve_path", + autospec=True, + return_value=(None, ["test.html"]), # passed to Target.detect_failure ) - serve_path.return_value = ( - None, - ["test.html"], - ) # passed to mocked Target.detect_failure # setup Target target = mocker.Mock(spec=Target, binary="bin", launch_timeout=30) target.RESULT_FAILURE = Target.RESULT_FAILURE @@ -262,12 +256,15 @@ def test_main_05(mocker, tmp_path): ignore=list(), min_crashes=1, no_harness=True, + pernosco=False, relaunch=1, repeat=1, + rr=False, sig=None, test_index=None, time_limit=1, timeout=None, + valgrind=False, ) log_path = tmp_path / "logs" args.logs = str(log_path) @@ -339,15 +336,13 @@ def test_main_05(mocker, tmp_path): ) def test_main_06(mocker, tmp_path, arg_timelimit, arg_timeout, test_timelimit, result): """test ReplayManager.main() - test time limit and timeout""" - # mock Sapphire.serve_path only mocker.patch("grizzly.common.runner.sleep", autospec=True) - serve_path = mocker.patch( - "grizzly.replay.replay.Sapphire.serve_path", autospec=True + # mock Sapphire.serve_path only + mocker.patch( + "grizzly.replay.replay.Sapphire.serve_path", + autospec=True, + return_value=(SERVED_ALL, ["test.html"]), # passed to Target.detect_failure ) - serve_path.return_value = ( - SERVED_ALL, - ["test.html"], - ) # passed to mocked Target.detect_failure # setup Target target = mocker.Mock(spec=Target, binary="bin", launch_timeout=30) target.RESULT_NONE = Target.RESULT_NONE @@ -355,16 +350,15 @@ def test_main_06(mocker, tmp_path, arg_timelimit, arg_timeout, test_timelimit, r load_target = mocker.patch("grizzly.replay.replay.load_plugin") load_target.return_value.return_value = target # create test to load - test = TestCase("test.html", None, None) - test_file = tmp_path / "test.html" - test_file.write_text("test") - test.add_from_file(str(test_file)) - replay_path = tmp_path / "test" - replay_path.mkdir() - test.time_limit = test_timelimit - test.dump(str(replay_path), include_details=True) + with TestCase("test.html", None, None) as test: + test_file = tmp_path / "test.html" + test_file.write_text("test") + test.add_from_file(str(test_file)) + replay_path = tmp_path / "test" + replay_path.mkdir() + test.time_limit = test_timelimit + test.dump(str(replay_path), include_details=True) # setup args - (tmp_path / "prefs.js").touch() args = mocker.Mock( fuzzmanager=False, idle_delay=0, @@ -373,6 +367,7 @@ def test_main_06(mocker, tmp_path, arg_timelimit, arg_timeout, test_timelimit, r input=str(replay_path), min_crashes=2, no_harness=True, + pernosco=False, prefs=None, relaunch=1, repeat=1, @@ -384,3 +379,65 @@ def test_main_06(mocker, tmp_path, arg_timelimit, arg_timeout, test_timelimit, r valgrind=False, ) assert ReplayManager.main(args) == result + + +@mark.parametrize( + "pernosco, rr, valgrind, no_harness", + [ + # No debugger enabled and no harness + (False, False, False, False), + # No debugger enabled and with harness + (False, False, False, True), + # Pernosco enabled + (True, False, False, False), + # rr enabled + (False, True, False, False), + # Valgrind enabled + (False, False, True, False), + ], # pylint: disable=invalid-name +) +def test_main_07(mocker, tmp_path, pernosco, rr, valgrind, no_harness): + """test ReplayManager.main() enable debuggers""" + mocker.patch("grizzly.common.runner.sleep", autospec=True) + # mock Sapphire.serve_path only + mocker.patch( + "grizzly.replay.replay.Sapphire.serve_path", + autospec=True, + return_value=(SERVED_ALL, ["test.html"]), # passed to Target.detect_failure + ) + # setup Target + load_target = mocker.patch("grizzly.replay.replay.load_plugin") + target = mocker.Mock(spec=Target, binary="bin", launch_timeout=30) + target.RESULT_NONE = Target.RESULT_NONE + target.detect_failure.return_value = Target.RESULT_NONE + load_target.return_value.return_value = target + # setup args + (tmp_path / "test.html").touch() + args = mocker.Mock( + fuzzmanager=False, + idle_delay=0, + idle_threshold=0, + ignore=["fake", "timeout"], + input=str(tmp_path / "test.html"), + min_crashes=2, + no_harness=no_harness, + pernosco=pernosco, + prefs=None, + relaunch=1, + repeat=1, + rr=rr, + sig=None, + test_index=None, + time_limit=10, + timeout=None, + valgrind=valgrind, + ) + # maximum one debugger allowed at a time + assert sum((pernosco, rr, valgrind)) < 2, "test broken!" + assert ReplayManager.main(args) == Session.EXIT_FAILURE + assert target.detect_failure.call_count == 1 + assert target.close.call_count == 2 + assert target.cleanup.call_count == 1 + assert load_target.return_value.call_args[-1]["pernosco"] == pernosco + assert load_target.return_value.call_args[-1]["rr"] == rr + assert load_target.return_value.call_args[-1]["valgrind"] == valgrind diff --git a/grizzly/test_main.py b/grizzly/test_main.py index 8ec5a370..ff7b589c 100644 --- a/grizzly/test_main.py +++ b/grizzly/test_main.py @@ -209,3 +209,43 @@ def test_main_04(mocker, arg_testlimit, arg_timeout, exit_code): args.time_limit = arg_testlimit args.timeout = arg_timeout assert main(args) == exit_code + + +@mark.parametrize( + "pernosco, rr, valgrind", + [ + # No debugger enabled + (False, False, False), + # Pernosco enabled + (True, False, False), + # rr enabled + (False, True, False), + # Valgrind enabled + (False, False, True), + ], # pylint: disable=invalid-name +) +def test_main_05(mocker, pernosco, rr, valgrind): + """test enabling debuggers""" + fake_adapter = mocker.NonCallableMock(spec_set=Adapter) + fake_adapter.RELAUNCH = 1 + fake_adapter.TIME_LIMIT = 10 + fake_target = mocker.Mock(spec_set=Target) + plugin_loader = mocker.patch("grizzly.main.load_plugin", autospec=True) + plugin_loader.side_effect = ( + mocker.Mock(spec_set=Adapter, return_value=fake_adapter), + fake_target, + ) + fake_session = mocker.patch("grizzly.main.Session", autospec=True) + fake_session.return_value.server = mocker.Mock(spec_set=Sapphire) + fake_session.EXIT_SUCCESS = Session.EXIT_SUCCESS + args = FakeArgs() + args.adapter = "fake" + # maximum one debugger allowed at a time + assert sum((pernosco, rr, valgrind)) < 2, "test broken!" + args.pernosco = pernosco + args.rr = rr + args.valgrind = valgrind + assert main(args) == Session.EXIT_SUCCESS + assert fake_target.call_args[-1]["pernosco"] == pernosco + assert fake_target.call_args[-1]["rr"] == rr + assert fake_target.call_args[-1]["valgrind"] == valgrind From e9512c3227e4af7981481bc6fe85dc667448abbe Mon Sep 17 00:00:00 2001 From: Jesse Schwartzentruber Date: Tue, 22 Jun 2021 15:12:16 -0400 Subject: [PATCH 269/531] Fix platform specific argument availability in main. --- grizzly/args.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/grizzly/args.py b/grizzly/args.py index 90ed6650..05e138f9 100644 --- a/grizzly/args.py +++ b/grizzly/args.py @@ -126,6 +126,8 @@ def __init__(self): self.launcher_grp.add_argument( "--xvfb", action="store_true", help="Use Xvfb." ) + else: + self.parser.set_defaults(xvfb=False) self.reporter_grp = self.parser.add_argument_group("Reporter Arguments") self.reporter_grp.add_argument( @@ -154,6 +156,12 @@ def __init__(self): dbg_group.add_argument( "--valgrind", action="store_true", help="Use Valgrind." ) + else: + self.parser.set_defaults( + pernosco=False, + rr=False, + valgrind=False, + ) self.parser.epilog = ( "For addition help check out the wiki:" From b832ac987325544f3d3e89e8e1e45fa8b64fea55 Mon Sep 17 00:00:00 2001 From: Jesse Schwartzentruber Date: Thu, 10 Jun 2021 17:21:35 -0400 Subject: [PATCH 270/531] Add tool logging for `grizzly.reduce.crash` main. --- grizzly/reduce/crash.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/grizzly/reduce/crash.py b/grizzly/reduce/crash.py index 41ef0124..8b8e62c2 100644 --- a/grizzly/reduce/crash.py +++ b/grizzly/reduce/crash.py @@ -34,6 +34,10 @@ def main(args): bucket = Bucket(crash.bucket) args.sig = str(bucket.signature_path()) if args.tool is None: + LOG.info( + "Setting default --tool=%s from CrashEntry", + crash.tool, + ) args.tool = crash.tool # call grizzly.reduce From c500cd3897dd87e1819eb31c66d2f4f558eb9aa2 Mon Sep 17 00:00:00 2001 From: Jesse Schwartzentruber Date: Tue, 15 Jun 2021 15:56:18 -0400 Subject: [PATCH 271/531] [reduce] Don't alter testcase newlines in beautify. --- grizzly/reduce/strategies/beautify.py | 40 +++++++++++++++++++-------- grizzly/reduce/test_strategies.py | 15 ++++++++-- setup.cfg | 2 +- 3 files changed, 41 insertions(+), 16 deletions(-) diff --git a/grizzly/reduce/strategies/beautify.py b/grizzly/reduce/strategies/beautify.py index c68e397a..2243146f 100644 --- a/grizzly/reduce/strategies/beautify.py +++ b/grizzly/reduce/strategies/beautify.py @@ -8,6 +8,7 @@ HAVE_CSSBEAUTIFIER (bool): True if `cssbeautifier` module is available. HAVE_JSBEAUTIFIER (bool): True if `jsbeautifier` module is available. """ +import os import re from abc import ABC, abstractmethod from logging import getLogger @@ -118,11 +119,12 @@ def update(self, success, served=None): @classmethod @abstractmethod - def beautify_bytes(cls, data): + def beautify_bytes(cls, data, linesep=b"\n"): """Perform beautification on a code buffer. Arguments: data (bytes): The code data to be beautified. + linesep (bytes): Newline sequence used in this testcase. Returns: bytes: The beautified result. @@ -212,6 +214,14 @@ def __iter__(self): lith_tc.load(file) raw = b"".join(lith_tc.parts) + # try to determine line-endings from testcase before resorting to os.linesep + if b"\r\n" in lith_tc.before or b"\r\n" in lith_tc.after or b"\r\n" in raw: + linesep = b"\r\n" + elif b"\n" in lith_tc.before or b"\n" in lith_tc.after or b"\n" in raw: + linesep = b"\n" + else: + linesep = os.linesep.encode("utf-8") + with file.open("wb") as testcase_fp: last = 0 any_beautified = False @@ -221,13 +231,13 @@ def __iter__(self): testcase_fp.write(before) to_beautify = raw[start:end] LOG.debug("before: %r", to_beautify) - beautified = self.beautify_bytes(to_beautify) + beautified = self.beautify_bytes(to_beautify, linesep) LOG.debug("after: %r", beautified) if beautified: - if before and not before.endswith(b"\n"): - beautified = b"\n" + beautified - if not beautified.endswith(b"\n"): - beautified = beautified + b"\n" + if before and not before.endswith(linesep): + beautified = linesep + beautified + if not beautified.endswith(linesep): + beautified = beautified + linesep testcase_fp.write(beautified) if beautified == to_beautify: @@ -293,18 +303,21 @@ class CSSBeautify(_BeautifyStrategy): tag_name = "style" @classmethod - def beautify_bytes(cls, data): + def beautify_bytes(cls, data, linesep=b"\n"): """Perform CSS beautification on a code buffer. Arguments: data (bytes): The code data to be beautified. + linesep (bytes): Newline sequence used in this testcase. Returns: bytes: The beautified result. """ assert cls.import_available + linesep_u = linesep.decode("utf-8") data = data.decode("utf-8", errors="surrogateescape") - return cssbeautifier.beautify(data, cls.opts).encode( + beautified = cssbeautifier.beautify(data, cls.opts) + return linesep_u.join(beautified.splitlines()).encode( "utf-8", errors="surrogateescape" ) @@ -323,23 +336,26 @@ class JSBeautify(_BeautifyStrategy): native_extension = ".js" opts = None tag_name = "script" - try_catch_re = re.compile(r"(\s*try {)\n\s*(.*)\n\s*(}\s*catch.*)") + try_catch_re = re.compile(r"(\s*try {)\r?\n\s*(.*)\r?\n\s*(}\s*catch.*)") @classmethod - def beautify_bytes(cls, data): + def beautify_bytes(cls, data, linesep=b"\n"): """Perform JS beautification on a code buffer. Arguments: data (bytes): The code data to be beautified. + linesep (bytes): Newline sequence used in this testcase. Returns: bytes: The beautified result. """ assert HAVE_JSBEAUTIFIER + linesep_u = linesep.decode("utf-8") data = data.decode("utf-8", errors="surrogateescape") - beautified = jsbeautifier.beautify(data, cls.opts) # All try/catch pairs will be expanded on their own lines # Collapse these pairs when only a single instruction is contained within beautified = cls.try_catch_re.sub(r"\1 \2 \3", beautified) - return beautified.encode("utf-8", errors="surrogateescape") + return linesep_u.join(beautified.splitlines()).encode( + "utf-8", errors="surrogateescape" + ) diff --git a/grizzly/reduce/test_strategies.py b/grizzly/reduce/test_strategies.py index 142e3163..bc7835f8 100644 --- a/grizzly/reduce/test_strategies.py +++ b/grizzly/reduce/test_strategies.py @@ -295,11 +295,15 @@ def replay_run(testcases, _time_limit, **_): # test DDBEGIN/END respected in .js file pytest.param( *BeautifyStrategyParams( - test_data="try{\n//DDBEGIN\n'fluff';'required'\n//DDEND\n}catch(e){}\n", + test_data=( + "try{\r\n//DDBEGIN\r\n'fluff';'required'\r\n//DDEND\r\n" + "}catch(e){}\r\n\r\n" + ), test_name="test.js", expected_run_calls=1, expected_results={ - "try{\n//DDBEGIN\n'fluff';\n'required'\n//DDEND\n}catch(e){}\n" + "try{\r\n//DDBEGIN\r\n'fluff';\r\n'required'\r\n//DDEND\r\n" + "}catch(e){}\r\n" }, expected_num_reports=2, strategies=["jsbeautify"], @@ -612,7 +616,12 @@ def replay_run(testcases, _time_limit, **_): assert replayer.run.call_count == expected_run_calls assert set(log_path.iterdir()) == {log_path / "reports"} - tests = {test.read_text() for test in log_path.glob("reports/*-*/" + test_name)} + # don't use test.read_text() because that converts newlines, and some tests need + # unaltered newlines + tests = { + test.read_bytes().decode("utf-8") + for test in log_path.glob("reports/*-*/" + test_name) + } assert tests == expected_results assert ( sum(1 for _ in (log_path / "reports").iterdir()) == expected_num_reports diff --git a/setup.cfg b/setup.cfg index 49d13f5f..0d88ed05 100644 --- a/setup.cfg +++ b/setup.cfg @@ -25,7 +25,7 @@ install_requires = ffpuppet >= 0.8.1 FuzzManager jsbeautifier - lithium-reducer >= 0.5 + lithium-reducer >= 0.5.3 prefpicker psutil >= 4.4.0 packages = From a8e3a66266472a7c8faf5a2f463c49295c6288b2 Mon Sep 17 00:00:00 2001 From: Jesse Schwartzentruber Date: Tue, 22 Jun 2021 23:50:24 -0400 Subject: [PATCH 272/531] Update beautify tests --- grizzly/reduce/test_strategies.py | 427 +-------------------- grizzly/reduce/test_strategies_beautify.py | 261 +++++++++++++ 2 files changed, 262 insertions(+), 426 deletions(-) create mode 100644 grizzly/reduce/test_strategies_beautify.py diff --git a/grizzly/reduce/test_strategies.py b/grizzly/reduce/test_strategies.py index bc7835f8..8562d804 100644 --- a/grizzly/reduce/test_strategies.py +++ b/grizzly/reduce/test_strategies.py @@ -3,8 +3,7 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # pylint: disable=protected-access -"""Unit tests for `grizzly.reduce.strategies`. -""" +"""Unit tests for `grizzly.reduce.strategies`.""" from collections import namedtuple from logging import getLogger @@ -18,12 +17,6 @@ from ..target import Target from . import ReduceManager from .strategies import Strategy, _load_strategies -from .strategies.beautify import ( - HAVE_CSSBEAUTIFIER, - HAVE_JSBEAUTIFIER, - CSSBeautify, - JSBeautify, -) LOG = getLogger(__name__) pytestmark = pytest.mark.usefixtures( @@ -210,424 +203,6 @@ def replay_run(testcases, _time_limit, **_): ), list((log_path / "reports").iterdir()) -BeautifyStrategyParams = namedtuple( - "BeautifyStrategyParams", - "test_data, test_name, expected_run_calls, expected_results, expected_num_reports," - "strategies, have_beautifiers", -) - - -@pytest.mark.parametrize( - BeautifyStrategyParams._fields, - [ - # test beautify a .js file - pytest.param( - *BeautifyStrategyParams( - test_data="try{'fluff';'required'}catch(e){}\n", - test_name="test.js", - expected_run_calls=1, - expected_results={ - "try {\n 'fluff';\n 'required'\n} catch (e) {}\n" - }, - expected_num_reports=2, - strategies=["jsbeautify"], - have_beautifiers=True, - ), - marks=pytest.mark.skipif( - not HAVE_JSBEAUTIFIER, reason="jsbeautifier required" - ), - ), - # test beautify js embedded in html - pytest.param( - *BeautifyStrategyParams( - test_data="\n", - test_name="test.html", - expected_run_calls=1, - expected_results={ - "\n" - }, - expected_num_reports=2, - strategies=["jsbeautify"], - have_beautifiers=True, - ), - marks=pytest.mark.skipif( - not HAVE_JSBEAUTIFIER, reason="jsbeautifier required" - ), - ), - # test beautify multiple js embedded in html - pytest.param( - *BeautifyStrategyParams( - test_data="" - "\n", - test_name="test.html", - expected_run_calls=1, - expected_results={ - "\n" - }, - expected_num_reports=2, - strategies=["jsbeautify"], - have_beautifiers=True, - ), - marks=pytest.mark.skipif( - not HAVE_JSBEAUTIFIER, reason="jsbeautifier required" - ), - ), - # test beautify js embedded in html with no end - pytest.param( - *BeautifyStrategyParams( - test_data="\n", - test_name="test.html", - expected_run_calls=1, - expected_results={ - "\n\n" - }, - expected_num_reports=2, - strategies=["jsbeautify"], - have_beautifiers=True, - ), - marks=pytest.mark.skipif( - not HAVE_JSBEAUTIFIER, reason="jsbeautifier required" - ), - ), - # test DDBEGIN/END respected for js embedded in html, DD inside \n", - test_name="test.html", - expected_run_calls=1, - expected_results={ - "\n" - }, - expected_num_reports=2, - strategies=["jsbeautify"], - have_beautifiers=True, - ), - marks=pytest.mark.skipif( - not HAVE_JSBEAUTIFIER, reason="jsbeautifier required" - ), - ), - # test DDBEGIN/END respected for js embedded in html, DD straddle before - # - pytest.param( - *BeautifyStrategyParams( - test_data="\n\n", - test_name="test.html", - expected_run_calls=1, - expected_results={ - "\n\n" - }, - expected_num_reports=2, - strategies=["jsbeautify"], - have_beautifiers=True, - ), - marks=pytest.mark.skipif( - not HAVE_JSBEAUTIFIER, reason="jsbeautifier required" - ), - ), - # test beautify js embedded in html (no \n", + "\n", + id="#6: test beautify js embedded in html", + ), + pytest.param( + "\n", + "\n\n", + id="#8: test DDBEGIN/END respected for js embedded in html, " + "DD outside \n", + "\n", + id="#9: test DDBEGIN/END respected for js embedded in html, " + "DD inside \n\n", + "\n\n", + id="#12: test DDBEGIN/END respected for js embedded in html, " + "DD straddle after ", + ), + pytest.param( + "try{'a';'R'}catch(e){}\n", + "try{'a';'R'}catch(e){}\n", + id="#13: test beautify js embedded in html (no \n", + "" + "\n", + id="#14: test beautify multiple js embedded in html", + ), + ], +) +def test_beautify_js_4(test_data, reduced, mocker): + _test_beautify( + JSBeautify, + lambda x: "Q" in x and "R" in x, + "test.html", + test_data, + reduced, + mocker, + ) + + +@pytest.mark.parametrize( + "test_data, reduced", + [ + pytest.param( + "*,#a{a:0;R:1}\n", + "*,\n#a {\n a: 0;\n R: 1\n}\n", + id="#0: test beautify a .css file", + ), + pytest.param( + "*,\r\n#a{a:0;R:1}\n", + "*,\r\n#a {\r\n a: 0;\r\n R: 1\r\n}\r\n", + id="#1: test that mixed crlf/lf gets converted to crlf", + ), + pytest.param( + "*,\r#a{a:0;R:1}\n", + "*,\n#a {\n a: 0;\n R: 1\n}\n", + id="#2: test that mixed cr/lf gets converted to lf", + ), + pytest.param( + "*,\x1e#a{a:0;R:1}\x1e", + linesep.join(("*,", "#a {", " a: 0;", " R: 1", "}", "")), + id="#3: test that other line-ending gets converted to lf", + ), + ], +) +def test_beautify_css_1(test_data, reduced, mocker): + _test_beautify( + CSSBeautify, lambda x: "R" in x, "test.css", test_data, reduced, mocker + ) + + +@pytest.mark.parametrize( + "test_data, reduced", + [ + pytest.param( + "\n", + "\n", + id="#4: test beautify css embedded in html", + ), + pytest.param( + "\n", + "\n", + id="#6: test already beautified css (beautify does nothing)", + ), + pytest.param( + "*,#a{a:0;R:1}\n", + "*,#a{a:0;R:1}\n", + id="#7: test beautify css embedded in html (no