update CONTRIBUTING.md to explain compiling and adding snapshot tests

pull/1844/head
alpharush 2 years ago
parent d3e189fb80
commit f8794a3958
  1. 10
      CONTRIBUTING.md
  2. 66
      tests/e2e/detectors/test_detectors.py

@ -79,10 +79,12 @@ How do I know what kind of test(s) to write?
For each new detector, at least one regression tests must be present.
1. Create a test in `tests/e2e/detectors`
2. Update `ALL_TEST` in `tests/e2e/detectors/test_detectors.py`
3. Run `python tests/e2e/detectors/test_detectors.py --generate`. This will generate the json artifacts in `tests/expected_json`. Add the generated files to git. If updating an existing detector, identify the respective json artifacts and then delete them, or run `python ./tests/test_detectors.py --overwrite` instead.
4. Run `pytest tests/e2e/detectors/test_detectors.py` and check that everything worked.
1. Create a folder in `tests/e2e/detectors/test_data` with the detector's argument name.
2. Create a test contract in `tests/e2e/detectors/test_data/<detector_name>/`.
3. Update `ALL_TEST` in `tests/e2e/detectors/test_detectors.py`
4. Run `python tests/e2e/detectors/test_detectors.py --compile` to create a zip file of the compilation artifacts.
5. `pytest tests/e2e/detectors/test_detectors.py --insta update-new`. This will generate a snapshot of the detector output in `tests/e2e/detectors/snapshots/`. If updating an existing detector, run `pytest tests/e2e/detectors/test_detectors.py --insta review` and accept or reject the updates.
6. Run `pytest tests/e2e/detectors/test_detectors.py` to ensure everything worked. Then, add and commit the files to git.
> ##### Helpful commands for detector tests
>

@ -33,7 +33,6 @@ class Test: # pylint: disable=too-few-public-methods
"""
self.detector = detector
self.test_file = test_file
self.expected_result = test_file + "." + solc_ver + "." + detector.__name__ + ".json"
self.solc_ver = solc_ver
if additional_files is None:
self.additional_files = []
@ -44,6 +43,11 @@ class Test: # pylint: disable=too-few-public-methods
def set_solc(test_item: Test): # pylint: disable=too-many-lines
# hacky hack hack to pick the solc version we want
env = dict(os.environ)
from solc_select import solc_select
if not solc_select.artifact_path(test_item.solc_ver).exists():
print("Installing solc version", test_item.solc_ver)
solc_select.install_artifacts([test_item.solc_ver])
env["SOLC_VERSION"] = test_item.solc_ver
os.environ.clear()
os.environ.update(env)
@ -1638,26 +1642,12 @@ ALL_TEST_OBJECTS = [
),
]
def get_all_tests() -> List[Test]:
# installed_solcs = set(get_installed_solc_versions())
# required_solcs = {test.solc_ver for test in ALL_TEST_OBJECTS}
# missing_solcs = list(required_solcs - installed_solcs)
# if missing_solcs:
# install_solc_versions(missing_solcs)
return ALL_TEST_OBJECTS
ALL_TESTS = get_all_tests()
GENERIC_PATH = "/GENERIC_PATH"
TEST_DATA_DIR = Path(__file__).resolve().parent / "test_data"
# pylint: disable=too-many-locals
@pytest.mark.parametrize("test_item", ALL_TESTS, ids=id_test)
@pytest.mark.parametrize("test_item", ALL_TEST_OBJECTS, ids=id_test)
def test_detector(test_item: Test, snapshot):
test_dir_path = Path(
TEST_DATA_DIR,
@ -1681,38 +1671,6 @@ def test_detector(test_item: Test, snapshot):
assert snapshot() == actual_output
def _generate_test(test_item: Test, skip_existing=False):
test_dir_path = Path(
TEST_DATA_DIR,
test_item.detector.ARGUMENT,
test_item.solc_ver,
).as_posix()
test_file_path = Path(test_dir_path, test_item.test_file).as_posix()
expected_result_path = Path(test_dir_path, test_item.expected_result).absolute().as_posix()
if skip_existing:
if os.path.isfile(expected_result_path):
return
set_solc(test_item)
sl = Slither(test_file_path)
sl.register_detector(test_item.detector)
results = sl.run_detectors()
results_as_string = json.dumps(results)
test_file_path = test_file_path.replace("\\", "\\\\")
results_as_string = results_as_string.replace(test_file_path, GENERIC_PATH)
for additional_file in test_item.additional_files:
additional_path = Path(test_dir_path, additional_file).absolute().as_posix()
additional_path = additional_path.replace("\\", "\\\\")
results_as_string = results_as_string.replace(additional_path, GENERIC_PATH)
results = json.loads(results_as_string)
with open(expected_result_path, "w", encoding="utf8") as f:
f.write(json.dumps(results, indent=4))
def _generate_compile(test_item: Test, skip_existing=False):
test_dir_path = Path(
TEST_DATA_DIR,
@ -1733,13 +1691,9 @@ def _generate_compile(test_item: Test, skip_existing=False):
if __name__ == "__main__":
if len(sys.argv) != 2:
print("To generate the json artifacts run\n\tpython tests/test_detectors.py --generate")
elif sys.argv[1] == "--generate":
for next_test in ALL_TESTS:
_generate_test(next_test, skip_existing=True)
elif sys.argv[1] == "--overwrite":
for next_test in ALL_TESTS:
_generate_test(next_test)
print(
"To generate the zip artifacts run\n\tpython tests/e2e/tests/test_detectors.py --compile"
)
elif sys.argv[1] == "--compile":
for next_test in ALL_TESTS:
for next_test in ALL_TEST_OBJECTS:
_generate_compile(next_test, skip_existing=True)

Loading…
Cancel
Save