Use temp files to store tests output, make it easy to compare results if output changes

pull/101/head
freewind 7 years ago
parent 7f3d848ce0
commit 2d4fe3956b
  1. 1
      .gitignore
  2. 6
      README_DEV.md
  3. 2
      all_tests.sh
  4. 3
      coverage_report.sh
  5. 6
      tests/__init__.py
  6. 10
      tests/disassembler_test.py
  7. 20
      tests/graph_test.py
  8. 54
      tests/report_test.py
  9. 0
      tests/testdata/outputs_expected/calls.sol.easm
  10. 0
      tests/testdata/outputs_expected/calls.sol.graph.html
  11. 0
      tests/testdata/outputs_expected/calls.sol.json
  12. 0
      tests/testdata/outputs_expected/calls.sol.markdown
  13. 0
      tests/testdata/outputs_expected/calls.sol.text
  14. 0
      tests/testdata/outputs_expected/ether_send.sol.easm
  15. 0
      tests/testdata/outputs_expected/ether_send.sol.graph.html
  16. 0
      tests/testdata/outputs_expected/ether_send.sol.json
  17. 0
      tests/testdata/outputs_expected/ether_send.sol.markdown
  18. 0
      tests/testdata/outputs_expected/ether_send.sol.text
  19. 0
      tests/testdata/outputs_expected/exceptions.sol.easm
  20. 0
      tests/testdata/outputs_expected/exceptions.sol.graph.html
  21. 0
      tests/testdata/outputs_expected/exceptions.sol.json
  22. 0
      tests/testdata/outputs_expected/exceptions.sol.markdown
  23. 0
      tests/testdata/outputs_expected/exceptions.sol.text
  24. 0
      tests/testdata/outputs_expected/kinds_of_calls.sol.easm
  25. 0
      tests/testdata/outputs_expected/kinds_of_calls.sol.graph.html
  26. 0
      tests/testdata/outputs_expected/kinds_of_calls.sol.json
  27. 0
      tests/testdata/outputs_expected/kinds_of_calls.sol.markdown
  28. 0
      tests/testdata/outputs_expected/kinds_of_calls.sol.text
  29. 0
      tests/testdata/outputs_expected/metacoin.sol.easm
  30. 0
      tests/testdata/outputs_expected/metacoin.sol.graph.html
  31. 0
      tests/testdata/outputs_expected/metacoin.sol.json
  32. 0
      tests/testdata/outputs_expected/metacoin.sol.markdown
  33. 0
      tests/testdata/outputs_expected/metacoin.sol.text
  34. 0
      tests/testdata/outputs_expected/multi_contracts.sol.easm
  35. 0
      tests/testdata/outputs_expected/multi_contracts.sol.graph.html
  36. 0
      tests/testdata/outputs_expected/multi_contracts.sol.json
  37. 0
      tests/testdata/outputs_expected/multi_contracts.sol.markdown
  38. 0
      tests/testdata/outputs_expected/multi_contracts.sol.text
  39. 0
      tests/testdata/outputs_expected/origin.sol.easm
  40. 0
      tests/testdata/outputs_expected/origin.sol.graph.html
  41. 0
      tests/testdata/outputs_expected/origin.sol.json
  42. 0
      tests/testdata/outputs_expected/origin.sol.markdown
  43. 0
      tests/testdata/outputs_expected/origin.sol.text
  44. 0
      tests/testdata/outputs_expected/returnvalue.sol.easm
  45. 0
      tests/testdata/outputs_expected/returnvalue.sol.graph.html
  46. 0
      tests/testdata/outputs_expected/returnvalue.sol.json
  47. 0
      tests/testdata/outputs_expected/returnvalue.sol.markdown
  48. 0
      tests/testdata/outputs_expected/returnvalue.sol.text
  49. 0
      tests/testdata/outputs_expected/rubixi.sol.easm
  50. 0
      tests/testdata/outputs_expected/rubixi.sol.graph.html
  51. 0
      tests/testdata/outputs_expected/rubixi.sol.json
  52. 0
      tests/testdata/outputs_expected/rubixi.sol.markdown
  53. 0
      tests/testdata/outputs_expected/rubixi.sol.text
  54. 0
      tests/testdata/outputs_expected/suicide.sol.easm
  55. 0
      tests/testdata/outputs_expected/suicide.sol.graph.html
  56. 0
      tests/testdata/outputs_expected/suicide.sol.json
  57. 0
      tests/testdata/outputs_expected/suicide.sol.markdown
  58. 0
      tests/testdata/outputs_expected/suicide.sol.text
  59. 0
      tests/testdata/outputs_expected/underflow.sol.easm
  60. 0
      tests/testdata/outputs_expected/underflow.sol.graph.html
  61. 0
      tests/testdata/outputs_expected/underflow.sol.json
  62. 0
      tests/testdata/outputs_expected/underflow.sol.markdown
  63. 0
      tests/testdata/outputs_expected/underflow.sol.text
  64. 0
      tests/testdata/outputs_expected/weak_random.sol.easm
  65. 0
      tests/testdata/outputs_expected/weak_random.sol.graph.html
  66. 0
      tests/testdata/outputs_expected/weak_random.sol.json
  67. 0
      tests/testdata/outputs_expected/weak_random.sol.markdown
  68. 0
      tests/testdata/outputs_expected/weak_random.sol.text

1
.gitignore vendored

@ -14,3 +14,4 @@ lol*
.idea*
coverage_html_report/
.coverage
tests/testdata/outputs_current/

@ -52,6 +52,12 @@ pip3 install -r requirements.txt
It may cost you about 3 minutes to run all the tests.
The tests may save their outputs content to `./tests/testdata/outputs_current/`, you can compare the files between it and `./tests/testdata/outputs_expected/` to see the difference if there is any changes.
If you think the changes are expected, you can just copy them to `outputs_expected` and commit them as new expected outputs.
The `./tests/testdata/outputs_current/` directory is deleted and recreated in `all_tests.sh` and `coverage_report.sh` each time.
### Generating test coverage report
```bash

@ -1,3 +1,5 @@
#!/bin/sh
rm -rf ./tests/testdata/outputs_current/
mkdir -p ./tests/testdata/outputs_current/
python3 -m unittest discover -p "*_test.py"

@ -1,6 +1,9 @@
#!/bin/sh
rm -rf ./tests/testdata/outputs_current/
mkdir -p ./tests/testdata/outputs_current/
rm -rf coverage_html_report
coverage run -m unittest discover -p "*_test.py"
coverage html
open coverage_html_report/index.html

@ -5,6 +5,10 @@ TESTS_DIR = Path(__file__).parent
PROJECT_DIR = TESTS_DIR.parent
TESTDATA = TESTS_DIR / "testdata"
TESTDATA_INPUTS = TESTDATA / "inputs"
TESTDATA_OUTPUTS = TESTDATA / "outputs"
TESTDATA_OUTPUTS_EXPECTED = TESTDATA / "outputs_expected"
TESTDATA_OUTPUTS_CURRENT = TESTDATA / "outputs_current"
os.environ['MYTHRIL_DIR'] = str(TESTS_DIR / "mythril_dir")
def compare_files_error_message(expected, current):
return "Expected output changes, compare the following files to see differences: \n- {}\n- {}\n".format(str(expected), str(current))

@ -18,12 +18,12 @@ class DisassemblerTestCase(TestCase):
def test_easm_from_solidity_files(self):
for input_file in TESTDATA_INPUTS.iterdir():
code = _compile_to_code(input_file)
output_expected = TESTDATA_OUTPUTS_EXPECTED / (input_file.name + ".easm")
output_current = TESTDATA_OUTPUTS_CURRENT / (input_file.name + ".easm")
code = _compile_to_code(input_file)
disassembly = Disassembly(code)
# Useful for generating output file
# (TESTDATA_OUTPUTS / (input_file.name + ".easm")).write_text(disassembly.get_easm())
output_current.write_text(disassembly.get_easm())
expected_easm = (TESTDATA_OUTPUTS / (input_file.name + ".easm")).read_text()
self.assertEqual(disassembly.get_easm(), expected_easm, "{} returns invalid easm".format(str(input_file)))
self.assertEqual(output_expected.read_text(), output_current.read_text(), compare_files_error_message(output_expected, output_current))

@ -1,8 +1,6 @@
from unittest import TestCase
from mythril.analysis.callgraph import generate_graph
from mythril.analysis.report import Report
from mythril.analysis.security import fire_lasers
from mythril.analysis.symbolic import SymExecWrapper
from mythril.ether import util
from mythril.ether.soliditycontract import SolidityContract
@ -12,21 +10,13 @@ class GraphTest(TestCase):
def test_generate_graph(self):
for input_file in TESTDATA_INPUTS.iterdir():
output_expected = TESTDATA_OUTPUTS_EXPECTED / (input_file.name + ".graph.html")
output_current = TESTDATA_OUTPUTS_CURRENT / (input_file.name + ".graph.html")
contract = SolidityContract(str(input_file), name=None, solc_args=None)
sym = SymExecWrapper(contract, address=(util.get_indexed_address(0)))
issues = fire_lasers(sym)
for issue in issues:
issue.add_code_info(contract)
report = Report()
for issue in issues:
report.append_issue(issue)
html = generate_graph(sym)
output_current.write_text(html)
# Useful for generating output file
# (TESTDATA_OUTPUTS / (input_file.name + ".graph.html")).write_text(html)
expected = (TESTDATA_OUTPUTS / (input_file.name + ".graph.html")).read_text()
self.assertEqual(html, expected, "{}: graph html is changed".format(str(input_file)))
self.assertEqual(output_expected.read_text(), output_current.read_text(), compare_files_error_message(output_expected, output_current))

@ -5,6 +5,7 @@ from mythril.analysis.security import fire_lasers
from mythril.analysis.symbolic import SymExecWrapper
from mythril.ether import util
from mythril.ether.soliditycontract import SolidityContract
import json
from tests import *
@ -17,30 +18,49 @@ def _fix_debug_data(json_str):
issue["debug"] = "<DEBUG-DATA>"
return json.dumps(read_json, indent=4)
class AnalysisReportTest(TestCase):
def test_reports(self):
for input_file in (TESTDATA / "inputs").iterdir():
def _generate_report(input_file):
contract = SolidityContract(str(input_file), name=None, solc_args=None)
sym = SymExecWrapper(contract, address=(util.get_indexed_address(0)))
issues = fire_lasers(sym)
for issue in issues:
issue.add_code_info(contract)
report = Report()
for issue in issues:
issue.add_code_info(contract)
report.append_issue(issue)
# Useful for generating output file
# (TESTDATA_OUTPUTS / (input_file.name + ".text")).write_text(_fix_path(report.as_text()))
# (TESTDATA_OUTPUTS / (input_file.name + ".json")).write_text(_fix_path(_fix_debug_data(report.as_json())))
# (TESTDATA_OUTPUTS / (input_file.name + ".markdown")).write_text(_fix_path(report.as_markdown()))
return report
class AnalysisReportTest(TestCase):
def test_json_reports(self):
for input_file in TESTDATA_INPUTS.iterdir():
output_expected = TESTDATA_OUTPUTS_EXPECTED / (input_file.name + ".json")
output_current = TESTDATA_OUTPUTS_CURRENT / (input_file.name + ".json")
report = _generate_report(input_file)
output_current.write_text(_fix_path(_fix_debug_data(report.as_json())).strip())
self.assertEqual(output_expected.read_text(), output_expected.read_text(), compare_files_error_message(output_expected, output_current))
def test_markdown_reports(self):
for input_file in TESTDATA_INPUTS.iterdir():
output_expected = TESTDATA_OUTPUTS_EXPECTED / (input_file.name + ".markdown")
output_current = TESTDATA_OUTPUTS_CURRENT / (input_file.name + ".markdown")
report = _generate_report(input_file)
output_current.write_text(_fix_path(report.as_markdown()))
self.assertEqual(output_expected.read_text(), output_current.read_text(), compare_files_error_message(output_expected, output_current))
def test_text_reports(self):
for input_file in TESTDATA_INPUTS.iterdir():
output_expected = TESTDATA_OUTPUTS_EXPECTED / (input_file.name + ".text")
output_current = TESTDATA_OUTPUTS_CURRENT / (input_file.name + ".text")
report = _generate_report(input_file)
text = (TESTDATA / "outputs" / (input_file.name + ".text")).read_text()
json_report = (TESTDATA / "outputs" / (input_file.name + ".json")).read_text()
markdown = (TESTDATA / "outputs" / (input_file.name + ".markdown")).read_text()
output_current.write_text(_fix_path(report.as_text()))
self.assertEqual(_fix_path(report.as_text()), text, "{}: text report is changed".format(str(input_file)))
self.assertEqual(_fix_path(report.as_markdown()), markdown, "{}: markdown report is changed".format(str(input_file)))
self.assertEqual(_fix_path(_fix_debug_data(report.as_json())).strip(), json_report.strip(), "{}: json report is changed".format(str(input_file)))
self.assertEqual(output_expected.read_text(), output_current.read_text(), compare_files_error_message(output_expected, output_current))

Loading…
Cancel
Save