Merge pull request #1555 from crytic/feat/test-ids

Feat/test ids
pull/1568/head
Feist Josselin 2 years ago committed by GitHub
commit ceba99f48e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 9
      CONTRIBUTING.md
  2. 34
      tests/test_ast_parsing.py
  3. 2
      tests/test_detectors.py

@ -64,7 +64,10 @@ For each new detector, at least one regression tests must be present.
- If updating an existing detector, identify the respective json artifacts and then delete them, or run `python ./tests/test_detectors.py --overwrite` instead. - If updating an existing detector, identify the respective json artifacts and then delete them, or run `python ./tests/test_detectors.py --overwrite` instead.
- Run `pytest ./tests/test_detectors.py` and check that everything worked. - Run `pytest ./tests/test_detectors.py` and check that everything worked.
To see the tests coverage, run `pytest tests/test_detectors.py --cov=slither/detectors --cov-branch --cov-report html` To see the tests coverage, run `pytest tests/test_detectors.py --cov=slither/detectors --cov-branch --cov-report html`.
To run tests for a specific detector, run `pytest tests/test_detectors.py -k ReentrancyReadBeforeWritten` (the detector's class name is the argument).
To run tests for a specific version, run `pytest tests/test_detectors.py -k 0.7.6`.
The IDs of tests can be inspected using `pytest tests/test_detectors.py --collect-only`.
### Parser tests ### Parser tests
- Create a test in `tests/ast-parsing` - Create a test in `tests/ast-parsing`
@ -73,6 +76,10 @@ To see the tests coverage, run `pytest tests/test_detectors.py --cov=slither/d
- Run `pytest ./tests/test_ast_parsing.py` and check that everything worked. - Run `pytest ./tests/test_ast_parsing.py` and check that everything worked.
To see the tests coverage, run `pytest tests/test_ast_parsing.py --cov=slither/solc_parsing --cov-branch --cov-report html` To see the tests coverage, run `pytest tests/test_ast_parsing.py --cov=slither/solc_parsing --cov-branch --cov-report html`
To run tests for a specific test case, run `pytest tests/test_ast_parsing.py -k user_defined_value_type` (the filename is the argument).
To run tests for a specific version, run `pytest tests/test_ast_parsing.py -k 0.8.12`.
To run tests for a specific compiler json format, run `pytest tests/test_ast_parsing.py -k legacy` (can be legacy or compact).
The IDs of tests can be inspected using ``pytest tests/test_ast_parsing.py --collect-only`.
### Synchronization with crytic-compile ### Synchronization with crytic-compile
By default, `slither` follows either the latest version of crytic-compile in pip, or `crytic-compile@master` (look for dependencies in [`setup.py`](./setup.py). If crytic-compile development comes with breaking changes, the process to update `slither` is: By default, `slither` follows either the latest version of crytic-compile in pip, or `crytic-compile@master` (look for dependencies in [`setup.py`](./setup.py). If crytic-compile development comes with breaking changes, the process to update `slither` is:

@ -445,20 +445,21 @@ except OSError:
pass pass
@pytest.mark.parametrize("test_item", ALL_TESTS, ids=lambda x: x.test_file) def pytest_generate_tests(metafunc):
def test_parsing(test_item: Test): test_cases = []
flavors = ["compact"] for test_item in ALL_TESTS:
if not test_item.disable_legacy: for version, flavor in test_item.versions_with_flavors:
flavors += ["legacy"] test_cases.append((test_item.test_file, version, flavor))
for version, flavor in test_item.versions_with_flavors: metafunc.parametrize("test_file, version, flavor", test_cases)
test_file = os.path.join(
TEST_ROOT, "compile", f"{test_item.test_file}-{version}-{flavor}.zip"
)
expected_file = os.path.join(
TEST_ROOT, "expected", f"{test_item.test_file}-{version}-{flavor}.json"
)
cc = load_from_zip(test_file)[0]
class TestASTParsing:
# pylint: disable=no-self-use
def test_parsing(self, test_file, version, flavor):
actual = os.path.join(TEST_ROOT, "compile", f"{test_file}-{version}-{flavor}.zip")
expected = os.path.join(TEST_ROOT, "expected", f"{test_file}-{version}-{flavor}.json")
cc = load_from_zip(actual)[0]
sl = Slither( sl = Slither(
cc, cc,
@ -470,26 +471,25 @@ def test_parsing(test_item: Test):
actual = generate_output(sl) actual = generate_output(sl)
try: try:
with open(expected_file, "r", encoding="utf8") as f: with open(expected, "r", encoding="utf8") as f:
expected = json.load(f) expected = json.load(f)
except OSError: except OSError:
pytest.xfail("the file for this test was not generated") pytest.xfail("the file for this test was not generated")
raise raise
diff = DeepDiff(expected, actual, ignore_order=True, verbose_level=2, view="tree") diff = DeepDiff(expected, actual, ignore_order=True, verbose_level=2, view="tree")
if diff: if diff:
for change in diff.get("values_changed", []): for change in diff.get("values_changed", []):
path_list = re.findall(r"\['(.*?)'\]", change.path()) path_list = re.findall(r"\['(.*?)'\]", change.path())
path = "_".join(path_list) path = "_".join(path_list)
with open( with open(
f"test_artifacts/{test_item.test_file}_{path}_expected.dot", f"test_artifacts/{test_file}_{path}_expected.dot",
"w", "w",
encoding="utf8", encoding="utf8",
) as f: ) as f:
f.write(change.t1) f.write(change.t1)
with open( with open(
f"test_artifacts/{test_item.test_file}_{version}_{flavor}_{path}_actual.dot", f"test_artifacts/{test_file}_{version}_{flavor}_{path}_actual.dot",
"w", "w",
encoding="utf8", encoding="utf8",
) as f: ) as f:

@ -52,7 +52,7 @@ def set_solc(test_item: Test): # pylint: disable=too-many-lines
def id_test(test_item: Test): def id_test(test_item: Test):
return f"{test_item.detector}: {test_item.solc_ver}/{test_item.test_file}" return f"{test_item.detector.__name__}-{test_item.solc_ver}-{test_item.test_file}"
ALL_TEST_OBJECTS = [ ALL_TEST_OBJECTS = [

Loading…
Cancel
Save