Merge pull request #1251 from LeoHChen/r3.1-merge-revert

R3.1 merge revert
pull/1254/head
Leo Chen 5 years ago committed by GitHub
commit 4d49a072b7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 1
      .hmy/02c8ff0b88f313717bc3a627d2f8bb172ba3ad3bb9ba3ecb8eed4b7c878653d3d4faf769876c528b73f343967f74a917.key
  2. 1
      .hmy/16513c487a6bb76f37219f3c2927a4f281f9dd3fd6ed2e3a64e500de6545cf391dd973cc228d24f9bd01efe94912e714.key
  3. 1
      .hmy/1c1fb28d2de96e82c3d9b4917eb54412517e2763112a3164862a6ed627ac62e87ce274bb4ea36e6a61fb66a15c263a06.key
  4. 1
      .hmy/2d3d4347c5a7398fbfa74e01514f9d0fcd98606ea7d245d9c7cfc011d472c2edc36c738b78bd115dda9d25533ddfe301.key
  5. 1
      .hmy/2d61379e44a772e5757e27ee2b3874254f56073e6bd226eb8b160371cc3c18b8c4977bd3dcb71fd57dc62bf0e143fd08.key
  6. 1
      .hmy/40379eed79ed82bebfb4310894fd33b6a3f8413a78dc4d43b98d0adc9ef69f3285df05eaab9f2ce5f7227f8cb920e809.key
  7. 1
      .hmy/4235d4ae2219093632c61db4f71ff0c32bdb56463845f8477c2086af1fe643194d3709575707148cad4f835f2fc4ea05.key
  8. 1
      .hmy/49d15743b36334399f9985feb0753430a2b287b2d68b84495bbb15381854cbf01bca9d1d9f4c9c8f18509b2bfa6bd40f.key
  9. 1
      .hmy/52ecce5f64db21cbe374c9268188f5d2cdd5bec1a3112276a350349860e35fb81f8cfe447a311e0550d961cf25cb988d.key
  10. 1
      .hmy/576d3c48294e00d6be4a22b07b66a870ddee03052fe48a5abbd180222e5d5a1f8946a78d55b025de21635fd743bbad90.key
  11. 1
      .hmy/63f479f249c59f0486fda8caa2ffb247209489dae009dfde6144ff38c370230963d360dffd318cfb26c213320e89a512.key
  12. 1
      .hmy/65f55eb3052f9e9f632b2923be594ba77c55543f5c58ee1454b9cfd658d25e06373b0f7d42a19c84768139ea294f6204.key
  13. 1
      .hmy/678ec9670899bf6af85b877058bea4fc1301a5a3a376987e826e3ca150b80e3eaadffedad0fedfa111576fa76ded980c.key
  14. 1
      .hmy/68ae289d73332872ec8d04ac256ca0f5453c88ad392730c5741b6055bc3ec3d086ab03637713a29f459177aaa8340615.key
  15. 1
      .hmy/776f3b8704f4e1092a302a60e84f81e476c212d6f458092b696df420ea19ff84a6179e8e23d090b9297dc041600bc100.key
  16. 1
      .hmy/86dc2fdc2ceec18f6923b99fd86a68405c132e1005cf1df72dca75db0adfaeb53d201d66af37916d61f079f34f21fb96.key
  17. 1
      .hmy/95117937cd8c09acd2dfae847d74041a67834ea88662a7cbed1e170350bc329e53db151e5a0ef3e712e35287ae954818.key
  18. 1
      .hmy/a547a9bf6fdde4f4934cde21473748861a3cc0fe8bbb5e57225a29f483b05b72531f002f8187675743d819c955a86100.key
  19. 1
      .hmy/b179c4fdc0bee7bd0b6698b792837dd13404d3f985b59d4a9b1cd0641a76651e271518b61abbb6fbebd4acf963358604.key
  20. 0
      .hmy/blspass.txt
  21. 1
      .hmy/c4e4708b6cf2a2ceeb59981677e9821eebafc5cf483fb5364a28fa604cc0ce69beeed40f3f03815c9e196fdaec5f1097.key
  22. 1
      .hmy/ca86e551ee42adaaa6477322d7db869d3e203c00d7b86c82ebee629ad79cb6d57b8f3db28336778ec2180e56a8e07296.key
  23. 1
      .hmy/e751ec995defe4931273aaebcb2cd14bf37e629c554a57d3f334c37881a34a6188a93e76113c55ef3481da23b7d7ab09.key
  24. 1
      .hmy/eca09c1808b729ca56f1b5a6a287c6e1c3ae09e29ccf7efa35453471fcab07d9f73cee249e2b91f5ee44eb9618be3904.key
  25. 1
      .hmy/ee2474f93cba9241562efc7475ac2721ab0899edf8f7f115a656c0c1f9ef8203add678064878d174bb478fa2e6630502.key
  26. 1
      .hmy/f47238daef97d60deedbde5302d05dea5de67608f11f406576e363661f7dcbc4a1385948549b31a6c70f6fde8a391486.key
  27. 1
      .hmy/fc4b9c535ee91f015efff3f32fbb9d32cdd9bfc8a837bb3eee89b8fff653c7af2050a4e147ebe5c7233dc2d5df06ee0a.key
  28. 1
      .hmy/keystore/one103q7qe5t2505lypvltkqtddaef5tzfxwsse4z7.key
  29. 1
      .hmy/keystore/one129r9pj3sk0re76f7zs3qz92rggmdgjhtwge62k.key
  30. 1
      .hmy/keystore/one12fuf7x9rgtdgqg7vgq0962c556m3p7afsxgvll.key
  31. 1
      .hmy/keystore/one1658znfwf40epvy7e46cqrmzyy54h4n0qa73nep.key
  32. 1
      .hmy/keystore/one16qsd5ant9v94jrs89mruzx62h7ekcfxmduh2rx.key
  33. 1
      .hmy/keystore/one1a0x3d6xpmr6f8wsyaxd9v36pytvp48zckswvv9.key
  34. 1
      .hmy/keystore/one1a50tun737ulcvwy0yvve0pvu5skq0kjargvhwe.key
  35. 1
      .hmy/keystore/one1d2rngmem4x2c6zxsjjz29dlah0jzkr0k2n88wc.key
  36. 1
      .hmy/keystore/one1d7jfnr6yraxnrycgaemyktkmhmajhp8kl0yahv.key
  37. 1
      .hmy/keystore/one1est2gxcvavmtnzc7mhd73gzadm3xxcv5zczdtw.key
  38. 1
      .hmy/keystore/one1ghkz3frhske7emk79p7v2afmj4a5t0kmjyt4s5.key
  39. 1
      .hmy/keystore/one1ljznytjyn269azvszjlcqvpcj6hjm822yrcp2e.key
  40. 1
      .hmy/keystore/one1m6m0ll3q7ljdqgmth2t5j7dfe6stykucpj2nr5.key
  41. 1
      .hmy/keystore/one1p7ht2d4kl8ve7a8jxw746yfnx4wnfxtp8jqxwe.key
  42. 1
      .hmy/keystore/one1pdv9lrdwl0rg5vglh4xtyrv3wjk3wsqket7zxy.key
  43. 1
      .hmy/keystore/one1pf75h0t4am90z8uv3y0dgunfqp4lj8wr3t5rsp.key
  44. 1
      .hmy/keystore/one1r4zyyjqrulf935a479sgqlpa78kz7zlcg2jfen.key
  45. 1
      .hmy/keystore/one1spshr72utf6rwxseaz339j09ed8p6f8ke370zj.key
  46. 1
      .hmy/keystore/one1uyshu2jgv8w465yc8kkny36thlt2wvel89tcmg.key
  47. 1
      .hmy/keystore/one1z05g55zamqzfw9qs432n33gycdmyvs38xjemyl.key
  48. 8
      .hmy/wallet.ini
  49. 36
      README.md
  50. 6
      api/client/client.go
  51. 3
      api/client/service/server.go
  52. 8
      api/proto/discovery/pingpong.go
  53. 9
      api/proto/message/server.go
  54. 9
      api/proto/node/node.go
  55. 4
      api/service/blockproposal/service.go
  56. 1
      api/service/config.go
  57. 6
      api/service/consensus/service.go
  58. 22
      api/service/discovery/service.go
  59. 41
      api/service/explorer/service.go
  60. 25
      api/service/explorer/storage.go
  61. 2
      api/service/explorer/structs.go
  62. 8
      api/service/manager.go
  63. 42
      api/service/networkinfo/service.go
  64. 4
      api/service/randomness/service.go
  65. 6
      api/service/resharding/service.go
  66. 31
      api/service/staking/service.go
  67. 16
      api/service/syncing/downloader/client.go
  68. 3
      api/service/syncing/downloader/server.go
  69. 115
      api/service/syncing/syncing.go
  70. 5
      cmd/bootnode/main.go
  71. 4
      cmd/client/txgen/main.go
  72. 8
      cmd/client/wallet/generated_wallet.ini.go
  73. 22
      cmd/client/wallet/main.go
  74. 212
      cmd/harmony/main.go
  75. 65
      cmd/hmyclient/main.go
  76. 36
      consensus/consensus.go
  77. 6
      consensus/consensus_leader_msg.go
  78. 108
      consensus/consensus_service.go
  79. 5
      consensus/consensus_test.go
  80. 520
      consensus/consensus_v2.go
  81. 4
      consensus/consensus_validator_msg.go
  82. 17
      consensus/consensus_viewchange_msg.go
  83. 18
      consensus/pbft_log.go
  84. 179
      consensus/view_change.go
  85. 215
      core/blockchain.go
  86. 25
      core/chain_indexer.go
  87. 12
      core/genesis.go
  88. 32
      core/headerchain.go
  89. 53
      core/rawdb/accessors_chain.go
  90. 20
      core/rawdb/accessors_indexes.go
  91. 10
      core/rawdb/accessors_metadata.go
  92. 89
      core/resharding.go
  93. 2
      core/state_transition.go
  94. 12
      core/tx_journal.go
  95. 2
      core/tx_list.go
  96. 113
      core/tx_pool.go
  97. 31
      core/types/block.go
  98. 2
      core/types/transaction.go
  99. 1
      core/types/transaction_signing.go
  100. 5
      core/vm/evm.go
  101. Some files were not shown because too many files have changed in this diff Show More

@ -0,0 +1 @@
e859f90f6cb272e078c5601ba6f921e53a881422928b10240aa50c5868af3db6f4fbbb30d91b232a8c0a1000355b8da9b510c0bb6eea36ba17a5ed8df875656e6e3467728c830e51ddfee1e736f986438e9d79ed56bc8f3d0e7734ec

@ -0,0 +1 @@
dd11eabd17b257220f9523efd6524f12b7c1356f21667a87fe1b8c630bdf9f7d247087f85a8f661a7ef8ccde6e6287d54361aa2287fdf258321a1cc1d885b45a3b636a23e40aec0357dbd699c82a36e07fff9ca6fd85f79652320113

@ -0,0 +1 @@
ef98515555a78fd211906a3eeaf2aca18452c48665b258b5e6a1600693ff65e8a3b6d6a3b1efd0775a297824d42d0454f4ae49789a8bc05bc2ff89b3eaa768754db6d2290fa013a6b6e42eeca8fa45a14c73192081fdd0820673ad17

@ -0,0 +1 @@
8ec21daae97cf60451250b08307df26f9d453860f3e98b4b8244d06846018a4f02c1183373bdf21d64c7ed203821df8e6026975c886ec3f3451127011696ffd8f591457ff12a1fa169b202944156408744374f89654d0d9367984dc0

@ -0,0 +1 @@
b9a18c5f14b14ac6a648a792265d28f81dab207b7367573b83c4a206e01e1bd238f3a0416c66554254a77a473dec0e68bd8bf30185c91b12b07d634e53fd3f7a059eaeca39ff2b67bc848eb05869fc05255121d53c1f0a11bf4f511d

@ -0,0 +1 @@
cdaa7ee86f59aa4bd326af635a899afb21b204882b1e911f5e5a250c76c044d1ccfbb15772d26e36ee9ffe6c290bdebfe49ac1cf1989c913f3d06c76d4c20ebf8ba4a00644405064e51ce2fbdc14c35892393c84e4cc98ca5f2780e6

@ -0,0 +1 @@
c3e2375e1f3d09399c96215861fd4f231e2ec9fd6ec9a8e1143efaff862893542610eacfcf93a76b359fc0a4644a41ffbae7e3d3f580d816231252e28f41633e297796baa3ddd94b9ef2b183ab2b678c675950ef3fbe64dc80e2a680

@ -0,0 +1 @@
2feddeb82936401a582b4d48aa7bc62f1ccabcad430ee9e70ca682392767a8a162d17f691cc22089f434c76c790e57ad0ce93a61aa070b87f802f42d84ac02d3d33cc4baa4a643eeb53b19c4105ba01e2dd7a3cccb75b0678cc71d21

@ -0,0 +1 @@
10f1173239fc903360055a2ffe37948cddbb2b199f66572af56e623b6681c100f2fc71b08cef8fd59c0a784047f321543d81aee9a2d493214c31e72852c5d1c8f350baf4d8667fec16f520680fc59371a0f874ffcc9cd5c824291ff5

@ -0,0 +1 @@
837cdffcc049ba89931d570e4be38bfef513df65d8dd277b0da8e62adf8c23b3c5229a2a4b36aa2c90c17d490dd5f95869365ebc4429d1efc784d25a64db246870d4bb4831d98ec02317ea88b76491a7fdfd2fa5e10f267e8f3989d9

@ -0,0 +1 @@
3866b51e59955446f625063edc6a6f352662e92224ce78a2921e7699d478ace9d1c7a3c7c1932a81c5d7fc677e9c7620fe414a0b8e852bc075d600455e8b07b0918296e2914a656764e438e6978623f23ae63207ef4b377aac478048

@ -0,0 +1 @@
56e7156cfa4151f7beb62d94f92bf08f0544cc35aa65eec1c68f0db99fdef68dd0cc354ba61e949dd962100216155e86052272965b7063d8ebb9a6c7be9c35ab421415f12d05dc0c719b6ac2b3ec20fedc267fbf6a4f022925bc45bf

@ -0,0 +1 @@
926454b15914a0d0a18686173dcee027bd6cb3911866f360a3b383b240431c75130e2f3b3697f5117e072a6c7fc39dcdeb8b1e790034e844b070867740d5eb2d918b649701490e56aecdb7ef22c335d6300a103719c98bb094aec530

@ -0,0 +1 @@
d2e561f8184c1e2ab98a77c82cec7aa6ee2d729ce7664c35784299245c5a2f98afeda45049ff55ac19ad7fe8c9a13d57fd2f250b4a0bcba9146f01d789ce8fbab4433ca09fbf3f6d476f7832a62539c4c12ac7fa5ce37d535c1d21e9

@ -0,0 +1 @@
dbbf69ebbe21d0da28bbf96294171b17644f40bce2bd38470b9345c75811b9685587b0acf9b1806b48a28e7cc56e5e4daad281d2950612beaeef607e8aa71bbc33ed91f76c170b1efe6691a4038ab3b149ac771279ee9ea90b501444

@ -0,0 +1 @@
2d531f231ea393b00d1e74887135dbc6144ccbb683438b7f1b7b9a217ca890ef82ba0a2fad7c103f31ac46cc6cbf073d4489726b3069af3414c930234d284453292cd1a5c14baf884a1f074fe4f70856c2e454e0da1f6acd1d745671

@ -0,0 +1 @@
5ea6f866b075e1c42d2a0aa72862f5161b45e4e5064f87f12c241d43425cdce6f785697d3a7dc40a7b17222543390dac8e4908e71c44a7180f86bda6930b1df96d59adb70f24756c4c4430325b6a2ba36364870dd768d55a4a0c86e1

@ -0,0 +1 @@
764e55fb6b84617d188846e274152ca564b8b013cfe661661bd7c6681fdae3b000ef96c0654cdd7ff8755d32768e3770c4137e9d08c5ea7c8083705f41aae761bc03737cf316382f51c63d9e335a4c6a2d770e190ce5306cdca7821a

@ -0,0 +1 @@
4f10ae42aefd0acfc50cc2a5874eff7ecd98c8a2a6bb9fa7f35f7e23a71211a24d8428b13472558000193d03077afdc5d534e0a91e0113114d141783db621499f7866bcf56300befd2a60408e56d9908069cc51e2216113f050c3d3e

@ -0,0 +1 @@
b3dae95cb93d7081949f19fa14b682254b52034de91020394a7df5bc87c0629ff73e3fc8ad760d911ea9fb488587d3bc36520e936d85e29bfa303db66dacc56728191906b686fc5ea73a63efb83c423f01e150b6737bd07c4763155c

@ -0,0 +1 @@
f7255c1a11961a9dd4f7b0776c6113fc479829880abe05a16322099b651d5b31b829f63794d9c89fe7e9fc9729f7a722b566b77ca3f50824895b4ce26779ecfcfe6e50908f7fd26b101d34f0addb0b6cab5d2d48e82c7db9964dd09d

@ -0,0 +1 @@
8e1c2ffc67eeb241e00f5a87a280cb1705f38dd5e21a727aaccf1f82f60256ff9b7e409e2a059fcaae2f7b41a76ef58149d25fe9c47626ca1df012661d344d9f8a25d817183c55e8be840f5a155e7374cf1044e0abbaa23e6ac0c041

@ -0,0 +1 @@
e32f49038a6eddd0734bc444789ee24cee3ca53f10f9fab44c103099ce0cb3d6978f524cdacbf5b1d4d99e8cca7c3ffe0ebc7f9bc256b9a47b0b3b5a09f821718ee948e28bd3e6392f113a79f9a1a1bfc7b743b2dc06a8a1dc9dd1e9

@ -0,0 +1 @@
04708dbccd097c7f1278b51f6e72a8dead4f913c55dbc412f5d4519ed67d8fb56944c719a544fe724f0648ac9a74875d5458855e7bb434eaae0249f3b6fafbf311147de2f1e64e0394043e55c6cec92ce28af61dda93da6bf65523d3

@ -0,0 +1 @@
2ddd1d3d707da7a1a790aa1c3712c7457eaf63430cdefbd8988602369b112a4b2bde7d1e9769a6e980dc168ed337021bec96ba72607272ba6efdac603bda0d601e0e757384ed92b83f7a99efe412d08d13a9540e654e57aa9f702553

@ -0,0 +1 @@
0eb52e17542aac608b63b714b4f00e620c47743524c9367c9d0c2cb770c727c81167f0b5e94c59c9c0e68864407ca4c64922f6b23a22cb93fa260486d17d67faab495acc567ee41adadffe36632e00d36a903f26cbc60055c5c681ad

@ -0,0 +1 @@
{"address":"7c41e0668b551f4f902cfaec05b5bdca68b124ce","crypto":{"cipher":"aes-128-ctr","ciphertext":"79c01b348e247ced69c60419504201b05e4cb0a0c13e7553800f24d3a6adad18","cipherparams":{"iv":"3a237dfa5d7a061794af8b31d96ce403"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"11b96c6d74f8a1c2518fa7a0c703c0c87755d165d2a52cc866ea18b366505606"},"mac":"7e3abf588d1735e6b54e1903d710fe13bd0c019f9348f078d7228e4ca76f4872"},"id":"b2ac0bab-6bdb-46d1-8c43-56829dc94631","version":3}

@ -0,0 +1 @@
{"address":"514650ca30b3c79f693e14220115434236d44aeb","crypto":{"cipher":"aes-128-ctr","ciphertext":"0a2720560f336d9fe97cb2a64f252efe516bca298e0b26368b05fb0b4a9a2acf","cipherparams":{"iv":"d55a4720d1d3a06eddfbadda834002aa"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"64664051f661b74e44f92c9020fb68fe229131aba70b7d0acbcb3482c57d50e4"},"mac":"238dbfccc29d99c645059bd6d2089a40e1ecc452d005dc6d3282dce1f8d351f8"},"id":"1c717ad7-2911-4fda-9c6a-acd550fe80eb","version":3}

@ -0,0 +1 @@
{"address":"52789f18a342da8023cc401e5d2b14a6b710fba9","crypto":{"cipher":"aes-128-ctr","ciphertext":"24f6c820536b43b40ab22b186bf8699eea1d7a50a44f7b2bfc4eae32aa98dd46","cipherparams":{"iv":"6c99649be303bd1624e8c84ca0510977"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"57a48fb838b517d66cef2a6676e31312ee77a43d6e3d6c53c83a38bb6fbcfc71"},"mac":"ce6701dd969ee7ef0bcaf2ba17b846d4bcf7029ce70b0bba422003445e4418f1"},"id":"899cdaaa-2d2f-4012-850f-bbbf83d028dc","version":3}

@ -0,0 +1 @@
{"address":"d50e29a5c9abf21613d9aeb001ec44252b7acde0","crypto":{"cipher":"aes-128-ctr","ciphertext":"3b217e377a14a88c7130a55429c4b0002c726c24bcf5489054677f60e0814df9","cipherparams":{"iv":"9d4e1c1ab09df4a0dca430ae71d0d31b"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"3d13946010c0fe14227651cfa3d403f37622b8e4237a064e428e114d012a6d3b"},"mac":"f60c48c710169fc7049661c4835797f04bc520c47e8636b6313c35fbd0f16624"},"id":"9be4b1ae-2e66-4bfc-a9a6-57b62e5c518e","version":3}

@ -0,0 +1 @@
{"address":"d020da766b2b0b590e072ec7c11b4abfb36c24db","crypto":{"cipher":"aes-128-ctr","ciphertext":"69afd94411d350fe17a182063d633753eac63fcc894bdc06a383da2684220405","cipherparams":{"iv":"84c9cc99b6c788f09c5b367fa7312c3f"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"d9a8670a5bacc3697ea4d75f549ed88997263abb2811e61bc7b1584a432e9c78"},"mac":"d511c9fcc18814411f2ff4b276e3dd85b30842e8931b8603ebf3f086b369d8b7"},"id":"7579047b-803b-4ad8-8de5-3f3e2a2b3875","version":3}

@ -0,0 +1 @@
{"address":"ebcd16e8c1d8f493ba04e99a56474122d81a9c58","crypto":{"cipher":"aes-128-ctr","ciphertext":"3c025fbb64ab0e76ef45e51995a286d2e867a37c60f4592962d90ab6ff93d597","cipherparams":{"iv":"7ec4278813d1e5e6e3c25203758bb666"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"5fee1817a7e15e8c7f03776573337c12826f3d5ee63258a544a98dbacf661eb9"},"mac":"71058b6e4edcd3dfb23a73e965ba29eebc50bfe186b259923cf3f3f6d7d0e318"},"id":"b029e616-9264-4408-9cf5-991796b81115","version":3}

@ -0,0 +1 @@
{"address":"ed1ebe4fd1f73f86388f231997859ca42c07da5d","crypto":{"cipher":"aes-128-ctr","ciphertext":"35d3f8a387ebb5afa840a6cff8266f3cad95076ab5e5e0d86478245bd9752832","cipherparams":{"iv":"1ddd2682b0f73968b038fc7c4a68533a"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"7abb28f154862fba917551aadb472a9a2f81d26e4b5cb526432bee7a95f4f32e"},"mac":"85c8467dc2c48faf24aaf6487158644022d9802de5cff310603539b186bc8dc7"},"id":"7f9f263b-3c79-4ec1-ba41-05dc6330d66b","version":3}

@ -0,0 +1 @@
{"address":"6a87346f3ba9958d08d09484a2b7fdbbe42b0df6","crypto":{"cipher":"aes-128-ctr","ciphertext":"83d2056e2fec434980d431fbb7b0b30802cebfa6e8db50d645574e9027c8ccac","cipherparams":{"iv":"116ae1984a3b739a90c77e7c402ab120"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"41cdc2deb8c9febe1fd20ff3de284049c30b9dc888ff4510d08db7fb43577e01"},"mac":"3a56d6522d54c473b0a7de5cfc8104184ed5c1e29d24aeca5b4c6dbf4929f7e0"},"id":"a2394479-a0a0-4deb-b7ca-9393b555c008","version":3}

@ -0,0 +1 @@
{"address":"6fa4998f441f4d319308ee764b2edbbefb2b84f6","crypto":{"cipher":"aes-128-ctr","ciphertext":"ed887b19ad9303ff7ee2d14906f808fc2cf3562d2b3e72ffa490b47a70ed1359","cipherparams":{"iv":"981b0640c5131c65afc2b66b54d180d5"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"96e51a7764ae05648e4690686b8b4e547fc11640d274f95ae4b3778cf84bde15"},"mac":"5d0d28d5f2907a17c6d228431b1fca42ff2976939d7d9e8a3d77b963df91e0e4"},"id":"7afbda0d-f126-4914-b157-c1de9d9d737e","version":3}

@ -0,0 +1 @@
{"address":"cc16a41b0ceb36b98b1edddbe8a05d6ee2636194","crypto":{"cipher":"aes-128-ctr","ciphertext":"6d7230bf642757bef746e720a9598803748ce99bc7052ee502dfd992eb160f59","cipherparams":{"iv":"081dec8badd657b1cf0d8c1c748379ad"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"254cecddedc1e238d83c4064fac06ae92467d0ddf882b2c506c951f494967b36"},"mac":"73e67e5781943ee42e73892ca3c3e1e9624a2d1ec6881716fcf3f16f249d9d75"},"id":"c87bc580-938a-43d9-b700-af6360fbd6aa","version":3}

@ -0,0 +1 @@
{"address":"45ec28a47785b3eceede287cc5753b957b45bedb","crypto":{"cipher":"aes-128-ctr","ciphertext":"59c1afb834b16c43dbf5a4835b1d8b6f582117a57063549140fbe4e72cc74429","cipherparams":{"iv":"93b78a71d41f1f7aba1c94aecf19f805"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"efb906bfb11889ddd6515f95f344bfdf8c0b037d9157dd875ca4ffb4906ecc47"},"mac":"af60f7e0c4f6129ce1fbc62007123071468d053c05c97936a74525a6ffb379ff"},"id":"952d11bc-c497-4b64-9446-a69471e9852e","version":3}

@ -0,0 +1 @@
{"address":"fc85322e449ab45e899014bf80303896af2d9d4a","crypto":{"cipher":"aes-128-ctr","ciphertext":"ef0bb981004178d80d281df4436aedaea7604cc53ab9d6b1c36f9ee2ba07b3cd","cipherparams":{"iv":"1937fbc73f2e2d33fd3857b99f20aa5a"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"f80f9e88a62798bc509fc96ebfe5969c817296f17cc0550c43ffab55bd328cbe"},"mac":"f18b6803eac0f1cb92fef62c3792ce05ea2f4b773a57ab90897ea81ef273dd67"},"id":"c3724130-3461-47a9-9fa7-1109e5e49375","version":3}

@ -0,0 +1 @@
{"address":"deb6fffe20f7e4d0236bba974979a9cea0b25b98","crypto":{"cipher":"aes-128-ctr","ciphertext":"a4db960531c5f6a12d6ecde28c24d5f16ddf0e2c37e55ee83720273df60ecfab","cipherparams":{"iv":"09488c65d1e7d8bc8429933b293f5c39"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"dcae38d68672588326801538d772362efce0c24d327a247f35cbdca998608c9b"},"mac":"f2d49678a3e8e0efecd30aa277f6337a9d22d21efcbc5f621c3e7da5e13fd67d"},"id":"1ee3efb9-db6b-446a-8745-23fe595e7ad0","version":3}

@ -0,0 +1 @@
{"address":"0faeb536b6f9d99f74f233bd5d1133355d349961","crypto":{"cipher":"aes-128-ctr","ciphertext":"4633b6fea4bdefd32ad6342c97c720d6de4209b9a5a941465758238d6925baee","cipherparams":{"iv":"54e8120f6e24993639135ce4c8c17b90"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"5968c0feb65d7e124c675bdc6781917a5f60a6bbe1f3d9b52baadf3d0b4815a9"},"mac":"32d312829ac7b130467422610a7b2f705724338972ba26421b7bac4cf9801802"},"id":"24427081-a7b9-4fde-94f9-afdde2f7d693","version":3}

@ -0,0 +1 @@
{"address":"0b585f8daefbc68a311fbd4cb20d9174ad174016","crypto":{"cipher":"aes-128-ctr","ciphertext":"1d5212df709bc0c3788d16d6eb382a0efe09d6375b46193ca150de57e13a63fc","cipherparams":{"iv":"ef0021e70de7b9c5e1fffab85aa4e767"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"74cb54fc249fc554a8e9eccc2249f5d8cb68480e958af8793507e5551cbe28e7"},"mac":"da6ee1a747cdab9205d9f7471c34e98c6a9ee2b07965d3290bf7e8ef6e7064b1"},"id":"f80c88e5-b36e-4204-8bc9-4a0a3b296acf","version":3}

@ -0,0 +1 @@
{"address":"0a7d4bbd75eecaf11f8c891ed47269006bf91dc3","crypto":{"cipher":"aes-128-ctr","ciphertext":"1341024ae7d676370a05a3598d79adc103779d172f16d89a72cc4b449fd6de80","cipherparams":{"iv":"29285ac7513225b04f265f60a11da619"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"1ae76b7984e5f7baa0e67fdd24721025b7e6e2530d79c42b3a2afc8bac8c7410"},"mac":"4460558fe35cd6c2be1ba51d305da021ccef0b7297b8b26d4e33e9f2e46d2904"},"id":"19506dea-0c8d-40ff-97ca-81ccac068c1b","version":3}

@ -0,0 +1 @@
{"address":"1d44424803e7d258d3b5f160807c3df1ec2f0bf8","crypto":{"cipher":"aes-128-ctr","ciphertext":"2d8133e01e3a3566c0cc3b16b6b4d40437d9a13c0c969a10cd1e0dc40a59f6df","cipherparams":{"iv":"692ec015f202a9cf8f3951e3f7771ab2"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"faf7cf29d64ea042b29b8220c9910d8d8ee989fb36e92e533f9fc61f43c8f8d7"},"mac":"1f7a64fbbb1f0dd7732771e52bd775989e627e15c2bd0317215c01d826110fd7"},"id":"b067e90d-5e97-410e-a20c-91aee87ad4e1","version":3}

@ -0,0 +1 @@
{"address":"806171f95c5a74371a19e8a312c9e5cb4e1d24f6","crypto":{"cipher":"aes-128-ctr","ciphertext":"7ab03cb710ac9b623756f90ffedd1953ec588040fef688e2273d0dfd895ead63","cipherparams":{"iv":"f898400903878543a2643cf50b1c0c59"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"09b87b6de8b3c5628ae20629b571f76afcee209b1658cad1eb51a0439d8e83a4"},"mac":"77fbd8d3244caa6bed5026b88fbdd6c49a88dbe0de572b3e54512e4a73fd28ab"},"id":"8730054b-72d5-4bfe-9235-ee9b671eeca3","version":3}

@ -0,0 +1 @@
{"address":"e1217e2a4861dd5d50983dad32474bbfd6a7333f","crypto":{"cipher":"aes-128-ctr","ciphertext":"e72fbeb5d7eb21753a37ed234cf75b2072c7affa949c854b5ef8c24ab232b731","cipherparams":{"iv":"9df55b283ed837430a947786b66e69aa"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"3111c6b914ab658c97edef7db0069f4381f88089f930fad45f8cc28bccbc3579"},"mac":"e4fa036d3d708eb2aee5461baf462b46f6594e6510c076fb33f15876e7585052"},"id":"f759e357-61a6-4b00-b3a5-dfeb4b388663","version":3}

@ -0,0 +1 @@
{"address":"13e88a505dd804971410ac5538c504c376464227","crypto":{"cipher":"aes-128-ctr","ciphertext":"694b24ef862aa81153ef15aae2de5bff545dccda73fae1ead8c25d0bcfb79e4c","cipherparams":{"iv":"960d32ebf3fde022d5eb5a591a0e92f6"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"2a13a84c729472772170eac13d580578bb40c95456dc1863eac6495c54f95b96"},"mac":"8720e6860217fb91fd10c9e8f4c2f44de6f9034e3e9fe8dd14be9b2ecd80528a"},"id":"c4ee8751-112f-4538-b010-07163775bdf3","version":3}

@ -21,11 +21,17 @@ rpc = s3.t.hmny.io:14555
[local] [local]
bootnode = /ip4/127.0.0.1/tcp/19876/p2p/Qmc1V6W7BwX8Ugb42Ti8RnXF1rY5PF7nnZ6bKBryCgi6cv bootnode = /ip4/127.0.0.1/tcp/19876/p2p/Qmc1V6W7BwX8Ugb42Ti8RnXF1rY5PF7nnZ6bKBryCgi6cv
shards = 1 shards = 2
[local.shard0.rpc] [local.shard0.rpc]
rpc = 127.0.0.1:14555 rpc = 127.0.0.1:14555
rpc = 127.0.0.1:14557
rpc = 127.0.0.1:14559
[local.shard1.rpc]
rpc = 127.0.0.1:14556 rpc = 127.0.0.1:14556
rpc = 127.0.0.1:14558
rpc = 127.0.0.1:14560
[devnet] [devnet]
bootnode = /ip4/100.26.90.187/tcp/9871/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv bootnode = /ip4/100.26.90.187/tcp/9871/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv

@ -60,27 +60,23 @@ You can run the script `./scripts/go_executable_build.sh` to build all the exec
### Build individual executables ### Build individual executables
Initialize BLS
```
source scripts/setup_bls_build_flags.sh
```
Harmony server / main node: Harmony server / main node:
``` ```bash
go build -o bin/harmony cmd/harmony/main.go ./scripts/go_executable_build.sh harmony
``` ```
Wallet: Wallet:
``` ```bash
go build -o bin/wallet cmd/client/wallet/main.go ./scripts/go_executable_build.sh wallet
``` ```
Tx Generator: Tx Generator:
``` ```bash
go build -o bin/txgen cmd/client/txgen/main.go ./scripts/go_executable_build.sh txgen
``` ```
## Usage ## Usage
@ -89,12 +85,24 @@ You may build the src/harmony.go locally and run local test.
### Running local test ### Running local test
The deploy.sh script creates a local environment of Harmony blockchain devnet based on the configuration file. The debug.sh script calls test/deploy.sh script to create a local environment of Harmony blockchain devnet based on the configuration file.
The configuration file configures number of nodes and their IP/Port. The configuration file configures number of nodes and their IP/Port.
The script starts one local beacon chain node, the blockchain nodes, and run a transactional generator program which generates and sends simulated transactions to the local blockchain. The script starts 2 shards and 7 nodes in each shard.
```bash
./test/debug.sh
```
### Test local blockchain
```bash
source scripts/setup_bls_build_flags.sh
./bin/wallet list
./bin/wallet -p local balances
```
### Terminate the local blockchain
```bash ```bash
./test/deploy.sh ./test/configs/beaconchain40.txt ./test/kill_nodes.sh
``` ```
## Testing ## Testing

@ -1,9 +1,7 @@
package client package client
import ( import (
"github.com/ethereum/go-ethereum/log"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/p2p" "github.com/harmony-one/harmony/p2p"
) )
@ -12,8 +10,6 @@ type Client struct {
ShardID uint32 // ShardID ShardID uint32 // ShardID
UpdateBlocks func([]*types.Block) // Closure function used to sync new block with the leader. Once the leader finishes the consensus on a new block, it will send it to the clients. Clients use this method to update their blockchain UpdateBlocks func([]*types.Block) // Closure function used to sync new block with the leader. Once the leader finishes the consensus on a new block, it will send it to the clients. Clients use this method to update their blockchain
log log.Logger // Log utility
// The p2p host used to send/receive p2p messages // The p2p host used to send/receive p2p messages
host p2p.Host host p2p.Host
} }
@ -23,7 +19,5 @@ func NewClient(host p2p.Host, shardID uint32) *Client {
client := Client{} client := Client{}
client.host = host client.host = host
client.ShardID = shardID client.ShardID = shardID
// Logger
client.log = utils.GetLogInstance()
return &client return &client
} }

@ -9,7 +9,6 @@ import (
proto "github.com/harmony-one/harmony/api/client/service/proto" proto "github.com/harmony-one/harmony/api/client/service/proto"
"github.com/harmony-one/harmony/core/state" "github.com/harmony-one/harmony/core/state"
common2 "github.com/harmony-one/harmony/internal/common" common2 "github.com/harmony-one/harmony/internal/common"
"github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -70,7 +69,7 @@ func (s *Server) Start(ip, port string) (*grpc.Server, error) {
proto.RegisterClientServiceServer(grpcServer, s) proto.RegisterClientServiceServer(grpcServer, s)
go func() { go func() {
if err := grpcServer.Serve(lis); err != nil { if err := grpcServer.Serve(lis); err != nil {
ctxerror.Warn(utils.GetLogger(), err, "grpcServer.Serve() failed") utils.Logger().Warn().Err(err).Msg("grpcServer.Serve() failed")
} }
}() }()
return grpcServer, nil return grpcServer, nil

@ -111,7 +111,7 @@ func GetPingMessage(payload []byte) (*PingMessageType, error) {
err := decoder.Decode(ping) err := decoder.Decode(ping)
if err != nil { if err != nil {
utils.GetLogInstance().Error("[GetPingMessage] Decode", "error", err) utils.Logger().Error().Err(err).Msg("[GetPingMessage] Decode")
return nil, fmt.Errorf("Decode Ping Error") return nil, fmt.Errorf("Decode Ping Error")
} }
@ -129,7 +129,7 @@ func GetPongMessage(payload []byte) (*PongMessageType, error) {
err := decoder.Decode(pong) err := decoder.Decode(pong)
if err != nil { if err != nil {
utils.GetLogInstance().Error("[GetPongMessage] Decode", "error", err) utils.Logger().Error().Err(err).Msg("[GetPongMessage] Decode")
return nil, fmt.Errorf("Decode Pong Error") return nil, fmt.Errorf("Decode Pong Error")
} }
@ -144,7 +144,7 @@ func (p PingMessageType) ConstructPingMessage() []byte {
encoder := gob.NewEncoder(byteBuffer) encoder := gob.NewEncoder(byteBuffer)
err := encoder.Encode(p) err := encoder.Encode(p)
if err != nil { if err != nil {
utils.GetLogInstance().Error("[ConstructPingMessage] Encode", "error", err) utils.Logger().Error().Err(err).Msg("[ConstructPingMessage] Encode")
return nil return nil
} }
return byteBuffer.Bytes() return byteBuffer.Bytes()
@ -158,7 +158,7 @@ func (p PongMessageType) ConstructPongMessage() []byte {
encoder := gob.NewEncoder(byteBuffer) encoder := gob.NewEncoder(byteBuffer)
err := encoder.Encode(p) err := encoder.Encode(p)
if err != nil { if err != nil {
utils.GetLogInstance().Error("[ConstructPongMessage] Encode", "error", err) utils.Logger().Error().Err(err).Msg("[ConstructPongMessage] Encode")
return nil return nil
} }
return byteBuffer.Bytes() return byteBuffer.Bytes()

@ -10,7 +10,6 @@ import (
"net" "net"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -45,11 +44,11 @@ func (s *Server) Process(ctx context.Context, message *Message) (*Response, erro
key, err := crypto.HexToECDSA(priKey) key, err := crypto.HexToECDSA(priKey)
if err != nil { if err != nil {
utils.GetLogInstance().Error("Error when HexToECDSA") utils.Logger().Error().Msg("Error when HexToECDSA")
} }
address := crypto.PubkeyToAddress(key.PublicKey) address := crypto.PubkeyToAddress(key.PublicKey)
utils.GetLogInstance().Info("Enter:", "amount", amount, "for address", address) utils.Logger().Info().Int64("amount", amount).Bytes("address", address[:]).Msg("Enter")
if err := s.CreateTransactionForEnterMethod(amount, priKey); err != nil { if err := s.CreateTransactionForEnterMethod(amount, priKey); err != nil {
return nil, ErrEnterMethod return nil, ErrEnterMethod
} }
@ -60,7 +59,7 @@ func (s *Server) Process(ctx context.Context, message *Message) (*Response, erro
for _, balance := range balances { for _, balance := range balances {
stringBalances = append(stringBalances, balance.String()) stringBalances = append(stringBalances, balance.String())
} }
utils.GetLogInstance().Info("getPlayers", "players", players, "balances", stringBalances) utils.Logger().Info().Strs("players", players).Strs("balances", stringBalances).Msg("getPlayers")
ret := &Response{ ret := &Response{
Response: &Response_LotteryResponse{ Response: &Response_LotteryResponse{
LotteryResponse: &LotteryResponse{ LotteryResponse: &LotteryResponse{
@ -91,7 +90,7 @@ func (s *Server) Start() (*grpc.Server, error) {
RegisterClientServiceServer(s.server, s) RegisterClientServiceServer(s.server, s)
go func() { go func() {
if err := s.server.Serve(lis); err != nil { if err := s.server.Serve(lis); err != nil {
ctxerror.Warn(utils.GetLogger(), err, "server.Serve() failed") utils.Logger().Warn().Err(err).Msg("server.Serve() failed")
} }
}() }()
return s.server, nil return s.server, nil

@ -104,8 +104,7 @@ func SerializeBlockchainSyncMessage(blockchainSyncMessage *BlockchainSyncMessage
encoder := gob.NewEncoder(&result) encoder := gob.NewEncoder(&result)
err := encoder.Encode(blockchainSyncMessage) err := encoder.Encode(blockchainSyncMessage)
if err != nil { if err != nil {
utils.GetLogger().Crit("Error", err) utils.Logger().Error().Err(err).Msg("Failed to serialize blockchain sync message")
panic(err)
} }
return result.Bytes() return result.Bytes()
} }
@ -116,7 +115,7 @@ func DeserializeBlockchainSyncMessage(d []byte) (*BlockchainSyncMessage, error)
decoder := gob.NewDecoder(bytes.NewReader(d)) decoder := gob.NewDecoder(bytes.NewReader(d))
err := decoder.Decode(&blockchainSyncMessage) err := decoder.Decode(&blockchainSyncMessage)
if err != nil { if err != nil {
utils.GetLogger().Crit("Error", err) utils.Logger().Error().Err(err).Msg("Failed to deserialize blockchain sync message")
} }
return &blockchainSyncMessage, err return &blockchainSyncMessage, err
} }
@ -166,7 +165,7 @@ func ConstructEpochShardStateMessage(epochShardState types.EpochShardState) []by
encoder := gob.NewEncoder(byteBuffer) encoder := gob.NewEncoder(byteBuffer)
err := encoder.Encode(epochShardState) err := encoder.Encode(epochShardState)
if err != nil { if err != nil {
utils.GetLogInstance().Error("[ConstructEpochShardStateMessage] Encode", "error", err) utils.Logger().Error().Err(err).Msg("[ConstructEpochShardStateMessage] Encode")
return nil return nil
} }
return byteBuffer.Bytes() return byteBuffer.Bytes()
@ -181,7 +180,7 @@ func DeserializeEpochShardStateFromMessage(payload []byte) (*types.EpochShardSta
err := decoder.Decode(epochShardState) err := decoder.Decode(epochShardState)
if err != nil { if err != nil {
utils.GetLogInstance().Error("[GetEpochShardStateFromMessage] Decode", "error", err) utils.Logger().Error().Err(err).Msg("[GetEpochShardStateFromMessage] Decode")
return nil, fmt.Errorf("Decode epoch shard state Error") return nil, fmt.Errorf("Decode epoch shard state Error")
} }

@ -40,10 +40,10 @@ func (s *Service) Run(stopChan chan struct{}, stoppedChan chan struct{}) {
// StopService stops block proposal service. // StopService stops block proposal service.
func (s *Service) StopService() { func (s *Service) StopService() {
utils.GetLogInstance().Info("Stopping block proposal service.") utils.Logger().Info().Msg("Stopping block proposal service.")
s.stopChan <- struct{}{} s.stopChan <- struct{}{}
<-s.stoppedChan <-s.stoppedChan
utils.GetLogInstance().Info("Role conversion stopped.") utils.Logger().Info().Msg("Role conversion stopped.")
} }
// NotifyService notify service // NotifyService notify service

@ -16,7 +16,6 @@ type NodeConfig struct {
Client p2p.GroupID // the client group ID of the shard Client p2p.GroupID // the client group ID of the shard
IsClient bool // whether this node is a client node, such as wallet/txgen IsClient bool // whether this node is a client node, such as wallet/txgen
IsBeacon bool // whether this node is a beacon node or not IsBeacon bool // whether this node is a beacon node or not
IsLeader bool // whether this node is a leader or not
ShardID uint32 // shardID of this node ShardID uint32 // shardID of this node
Actions map[p2p.GroupID]p2p.ActionType // actions on the groups Actions map[p2p.GroupID]p2p.ActionType // actions on the groups
} }

@ -25,7 +25,7 @@ func New(blockChannel chan *types.Block, consensus *consensus.Consensus, startCh
// StartService starts consensus service. // StartService starts consensus service.
func (s *Service) StartService() { func (s *Service) StartService() {
utils.GetLogInstance().Info("[consensus/service] Starting consensus service.") utils.Logger().Info().Msg("[consensus/service] Starting consensus service.")
s.stopChan = make(chan struct{}) s.stopChan = make(chan struct{})
s.stoppedChan = make(chan struct{}) s.stoppedChan = make(chan struct{})
s.consensus.Start(s.blockChannel, s.stopChan, s.stoppedChan, s.startChan) s.consensus.Start(s.blockChannel, s.stopChan, s.stoppedChan, s.startChan)
@ -34,10 +34,10 @@ func (s *Service) StartService() {
// StopService stops consensus service. // StopService stops consensus service.
func (s *Service) StopService() { func (s *Service) StopService() {
utils.GetLogInstance().Info("Stopping consensus service.") utils.Logger().Info().Msg("Stopping consensus service.")
s.stopChan <- struct{}{} s.stopChan <- struct{}{}
<-s.stoppedChan <-s.stoppedChan
utils.GetLogInstance().Info("Consensus service stopped.") utils.Logger().Info().Msg("Consensus service stopped.")
} }
// NotifyService notify service // NotifyService notify service

@ -42,16 +42,16 @@ func New(h p2p.Host, config service.NodeConfig, peerChan chan p2p.Peer, addPeer
// StartService starts discovery service. // StartService starts discovery service.
func (s *Service) StartService() { func (s *Service) StartService() {
utils.GetLogInstance().Info("Starting discovery service.") utils.Logger().Info().Msg("Starting discovery service")
s.Init() s.Init()
s.Run() s.Run()
} }
// StopService shutdowns discovery service. // StopService shutdowns discovery service.
func (s *Service) StopService() { func (s *Service) StopService() {
utils.GetLogInstance().Info("Shutting down discovery service.") utils.Logger().Info().Msg("Shutting down discovery service")
s.stopChan <- struct{}{} s.stopChan <- struct{}{}
utils.GetLogInstance().Info("discovery service stopped.") utils.Logger().Info().Msg("discovery service stopped")
} }
// NotifyService receives notification from service manager // NotifyService receives notification from service manager
@ -59,11 +59,11 @@ func (s *Service) NotifyService(params map[string]interface{}) {
data := params["peer"] data := params["peer"]
action, ok := data.(p2p.GroupAction) action, ok := data.(p2p.GroupAction)
if !ok { if !ok {
utils.GetLogInstance().Error("Wrong data type passed to NotifyService") utils.Logger().Error().Msg("Wrong data type passed to NotifyService")
return return
} }
utils.GetLogInstance().Info("[DISCOVERY]", "got notified", action) utils.Logger().Info().Interface("got notified", action).Msg("[DISCOVERY]")
s.actionChan <- action s.actionChan <- action
} }
@ -77,7 +77,7 @@ func (s *Service) contactP2pPeers() {
pingMsg := proto_discovery.NewPingMessage(s.host.GetSelfPeer(), s.config.IsClient) pingMsg := proto_discovery.NewPingMessage(s.host.GetSelfPeer(), s.config.IsClient)
utils.GetLogInstance().Info("Constructing Ping Message", "myPing", pingMsg) utils.Logger().Info().Interface("myPing", pingMsg).Msg("Constructing Ping Message")
msgBuf := host.ConstructP2pMessage(byte(0), pingMsg.ConstructPingMessage()) msgBuf := host.ConstructP2pMessage(byte(0), pingMsg.ConstructPingMessage())
s.sentPingMessage(s.config.ShardGroupID, msgBuf) s.sentPingMessage(s.config.ShardGroupID, msgBuf)
@ -85,7 +85,7 @@ func (s *Service) contactP2pPeers() {
select { select {
case peer, ok := <-s.peerChan: case peer, ok := <-s.peerChan:
if !ok { if !ok {
utils.GetLogInstance().Debug("[DISCOVERY] No More Peer!") utils.Logger().Debug().Msg("[DISCOVERY] No More Peer!")
break break
} }
// TODO (leo) this one assumes all peers received in the channel are beacon chain node // TODO (leo) this one assumes all peers received in the channel are beacon chain node
@ -97,9 +97,9 @@ func (s *Service) contactP2pPeers() {
} }
// Add to outgoing peer list // Add to outgoing peer list
// s.host.AddOutgoingPeer(peer) // s.host.AddOutgoingPeer(peer)
// utils.GetLogInstance().Debug("[DISCOVERY]", "add outgoing peer", peer) // utils.Logger().Debug().Interface("add outgoing peer", peer).Msg("[DISCOVERY]")
case <-s.stopChan: case <-s.stopChan:
utils.GetLogInstance().Debug("[DISCOVERY] stop pinging ...") utils.Logger().Debug().Msg("[DISCOVERY] stop pinging ...")
return return
case action := <-s.actionChan: case action := <-s.actionChan:
s.config.Actions[action.Name] = action.Action s.config.Actions[action.Name] = action.Action
@ -135,13 +135,13 @@ func (s *Service) sentPingMessage(g p2p.GroupID, msgBuf []byte) {
err = s.host.SendMessageToGroups([]p2p.GroupID{s.config.ShardGroupID}, msgBuf) err = s.host.SendMessageToGroups([]p2p.GroupID{s.config.ShardGroupID}, msgBuf)
} }
if err != nil { if err != nil {
utils.GetLogInstance().Error("Failed to send ping message", "group", g) utils.Logger().Error().Str("group", string(g)).Msg("Failed to send ping message")
} }
} }
// Init is to initialize for discoveryService. // Init is to initialize for discoveryService.
func (s *Service) Init() { func (s *Service) Init() {
utils.GetLogInstance().Info("Init discovery service") utils.Logger().Info().Msg("Init discovery service")
} }
// SetMessageChan sets up message channel to service. // SetMessageChan sets up message channel to service.

@ -59,18 +59,18 @@ func New(selfPeer *p2p.Peer, GetNodeIDs func() []libp2p_peer.ID, GetAccountBalan
// StartService starts explorer service. // StartService starts explorer service.
func (s *Service) StartService() { func (s *Service) StartService() {
utils.GetLogInstance().Info("Starting explorer service.") utils.Logger().Info().Msg("Starting explorer service.")
s.Init(true) s.Init(true)
s.server = s.Run() s.server = s.Run()
} }
// StopService shutdowns explorer service. // StopService shutdowns explorer service.
func (s *Service) StopService() { func (s *Service) StopService() {
utils.GetLogInstance().Info("Shutting down explorer service.") utils.Logger().Info().Msg("Shutting down explorer service.")
if err := s.server.Shutdown(context.Background()); err != nil { if err := s.server.Shutdown(context.Background()); err != nil {
utils.GetLogInstance().Error("Error when shutting down explorer server", "error", err) utils.Logger().Error().Err(err).Msg("Error when shutting down explorer server")
} else { } else {
utils.GetLogInstance().Info("Shutting down explorer server successufully") utils.Logger().Info().Msg("Shutting down explorer server successufully")
} }
} }
@ -79,7 +79,7 @@ func GetExplorerPort(nodePort string) string {
if port, err := strconv.Atoi(nodePort); err == nil { if port, err := strconv.Atoi(nodePort); err == nil {
return fmt.Sprintf("%d", port-explorerPortDifference) return fmt.Sprintf("%d", port-explorerPortDifference)
} }
utils.GetLogInstance().Error("error on parsing.") utils.Logger().Error().Msg("error on parsing.")
return "" return ""
} }
@ -115,11 +115,11 @@ func (s *Service) Run() *http.Server {
s.router.Path("/shard").HandlerFunc(s.GetExplorerShard) s.router.Path("/shard").HandlerFunc(s.GetExplorerShard)
// Do serving now. // Do serving now.
utils.GetLogInstance().Info("Listening on ", "port: ", GetExplorerPort(s.Port)) utils.Logger().Info().Str("port", GetExplorerPort(s.Port)).Msg("Listening")
server := &http.Server{Addr: addr, Handler: s.router} server := &http.Server{Addr: addr, Handler: s.router}
go func() { go func() {
if err := server.ListenAndServe(); err != nil { if err := server.ListenAndServe(); err != nil {
ctxerror.Warn(utils.GetLogger(), err, "server.ListenAndServe()") utils.Logger().Warn().Err(err).Msg("server.ListenAndServe()")
} }
}() }()
return server return server
@ -141,7 +141,7 @@ func (s *Service) ReadBlocksFromDB(from, to int) []*types.Block {
} }
block := new(types.Block) block := new(types.Block)
if rlp.DecodeBytes(data, block) != nil { if rlp.DecodeBytes(data, block) != nil {
utils.GetLogInstance().Error("Error on getting from db") utils.Logger().Error().Msg("Error on getting from db")
os.Exit(1) os.Exit(1)
} }
blocks = append(blocks, block) blocks = append(blocks, block)
@ -160,8 +160,7 @@ func (s *Service) GetExplorerBlocks(w http.ResponseWriter, r *http.Request) {
} }
defer func() { defer func() {
if err := json.NewEncoder(w).Encode(data.Blocks); err != nil { if err := json.NewEncoder(w).Encode(data.Blocks); err != nil {
ctxerror.Warn(utils.WithCallerSkip(utils.GetLogInstance(), 1), err, utils.Logger().Warn().Err(err).Msg("cannot JSON-encode blocks")
"cannot JSON-encode blocks")
} }
}() }()
@ -171,8 +170,7 @@ func (s *Service) GetExplorerBlocks(w http.ResponseWriter, r *http.Request) {
db := s.storage.GetDB() db := s.storage.GetDB()
fromInt, err := strconv.Atoi(from) fromInt, err := strconv.Atoi(from)
if err != nil { if err != nil {
ctxerror.Warn(utils.GetLogger(), err, "invalid from parameter", utils.Logger().Warn().Err(err).Str("from", from).Msg("invalid from parameter")
"from", from)
return return
} }
var toInt int var toInt int
@ -188,7 +186,7 @@ func (s *Service) GetExplorerBlocks(w http.ResponseWriter, r *http.Request) {
toInt, err = strconv.Atoi(to) toInt, err = strconv.Atoi(to)
} }
if err != nil { if err != nil {
ctxerror.Warn(utils.GetLogger(), err, "invalid to parameter", "to", to) utils.Logger().Warn().Err(err).Str("to", to).Msg("invalid to parameter")
return return
} }
@ -240,8 +238,7 @@ func (s *Service) GetExplorerTransaction(w http.ResponseWriter, r *http.Request)
data := &Data{} data := &Data{}
defer func() { defer func() {
if err := json.NewEncoder(w).Encode(data.TX); err != nil { if err := json.NewEncoder(w).Encode(data.TX); err != nil {
ctxerror.Warn(utils.WithCallerSkip(utils.GetLogInstance(), 1), err, utils.Logger().Warn().Err(err).Msg("cannot JSON-encode TX")
"cannot JSON-encode TX")
} }
}() }()
if id == "" { if id == "" {
@ -250,12 +247,12 @@ func (s *Service) GetExplorerTransaction(w http.ResponseWriter, r *http.Request)
db := s.storage.GetDB() db := s.storage.GetDB()
bytes, err := db.Get([]byte(GetTXKey(id))) bytes, err := db.Get([]byte(GetTXKey(id)))
if err != nil { if err != nil {
ctxerror.Warn(utils.GetLogger(), err, "cannot read TX", "id", id) utils.Logger().Warn().Err(err).Str("id", id).Msg("cannot read TX")
return return
} }
tx := new(Transaction) tx := new(Transaction)
if rlp.DecodeBytes(bytes, tx) != nil { if rlp.DecodeBytes(bytes, tx) != nil {
utils.GetLogger().Warn("cannot convert data from DB", "id", id) utils.Logger().Warn().Str("id", id).Msg("cannot convert data from DB")
return return
} }
data.TX = *tx data.TX = *tx
@ -267,7 +264,7 @@ func (s *Service) GetExplorerAddress(w http.ResponseWriter, r *http.Request) {
id := r.FormValue("id") id := r.FormValue("id")
key := GetAddressKey(id) key := GetAddressKey(id)
utils.GetLogInstance().Info("Querying address", "address", id) utils.Logger().Info().Str("address", id).Msg("Querying address")
data := &Data{} data := &Data{}
defer func() { defer func() {
if err := json.NewEncoder(w).Encode(data.Address); err != nil { if err := json.NewEncoder(w).Encode(data.Address); err != nil {
@ -292,11 +289,11 @@ func (s *Service) GetExplorerAddress(w http.ResponseWriter, r *http.Request) {
db := s.storage.GetDB() db := s.storage.GetDB()
bytes, err := db.Get([]byte(key)) bytes, err := db.Get([]byte(key))
if err != nil { if err != nil {
ctxerror.Warn(utils.GetLogger(), err, "cannot read address from db", "id", id) utils.Logger().Warn().Err(err).Str("id", id).Msg("cannot read address from db")
return return
} }
if err = rlp.DecodeBytes(bytes, &data.Address); err != nil { if err = rlp.DecodeBytes(bytes, &data.Address); err != nil {
utils.GetLogger().Warn("cannot convert data from DB", "id", id) utils.Logger().Warn().Str("id", id).Msg("cannot convert data from DB")
return return
} }
} }
@ -305,7 +302,7 @@ func (s *Service) GetExplorerAddress(w http.ResponseWriter, r *http.Request) {
func (s *Service) GetExplorerNodeCount(w http.ResponseWriter, r *http.Request) { func (s *Service) GetExplorerNodeCount(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(len(s.GetNodeIDs())); err != nil { if err := json.NewEncoder(w).Encode(len(s.GetNodeIDs())); err != nil {
ctxerror.Warn(utils.GetLogger(), err, "cannot JSON-encode node count") utils.Logger().Warn().Msg("cannot JSON-encode node count")
} }
} }
@ -319,7 +316,7 @@ func (s *Service) GetExplorerShard(w http.ResponseWriter, r *http.Request) {
}) })
} }
if err := json.NewEncoder(w).Encode(Shard{Nodes: nodes}); err != nil { if err := json.NewEncoder(w).Encode(Shard{Nodes: nodes}); err != nil {
ctxerror.Warn(utils.GetLogger(), err, "cannot JSON-encode shard info") utils.Logger().Warn().Msg("cannot JSON-encode shard info")
} }
} }

@ -67,11 +67,11 @@ func (storage *Storage) Init(ip, port string, remove bool) {
if remove { if remove {
var err = os.RemoveAll(dbFileName) var err = os.RemoveAll(dbFileName)
if err != nil { if err != nil {
utils.GetLogInstance().Error(err.Error()) utils.Logger().Error().Err(err).Msg("Failed to remove existing database files")
} }
} }
if storage.db, err = ethdb.NewLDBDatabase(dbFileName, 0, 0); err != nil { if storage.db, err = ethdb.NewLDBDatabase(dbFileName, 0, 0); err != nil {
utils.GetLogInstance().Error(err.Error()) utils.Logger().Error().Err(err).Msg("Failed to create new database")
} }
} }
@ -82,7 +82,7 @@ func (storage *Storage) GetDB() *ethdb.LDBDatabase {
// Dump extracts information from block and index them into lvdb for explorer. // Dump extracts information from block and index them into lvdb for explorer.
func (storage *Storage) Dump(block *types.Block, height uint64) { func (storage *Storage) Dump(block *types.Block, height uint64) {
utils.GetLogInstance().Info("Dumping block ", "block height", height) utils.Logger().Info().Uint64("block height", height).Msg("Dumping block")
if block == nil { if block == nil {
return return
} }
@ -90,17 +90,17 @@ func (storage *Storage) Dump(block *types.Block, height uint64) {
batch := storage.db.NewBatch() batch := storage.db.NewBatch()
// Update block height. // Update block height.
if err := batch.Put([]byte(BlockHeightKey), []byte(strconv.Itoa(int(height)))); err != nil { if err := batch.Put([]byte(BlockHeightKey), []byte(strconv.Itoa(int(height)))); err != nil {
ctxerror.Warn(utils.GetLogger(), err, "cannot batch block height") utils.Logger().Warn().Err(err).Msg("cannot batch block height")
} }
// Store block. // Store block.
blockData, err := rlp.EncodeToBytes(block) blockData, err := rlp.EncodeToBytes(block)
if err == nil { if err == nil {
if err := batch.Put([]byte(GetBlockKey(int(height))), blockData); err != nil { if err := batch.Put([]byte(GetBlockKey(int(height))), blockData); err != nil {
ctxerror.Warn(utils.GetLogger(), err, "cannot batch block data") utils.Logger().Warn().Err(err).Msg("cannot batch block data")
} }
} else { } else {
utils.GetLogInstance().Debug("Failed to serialize block ", "error", err) utils.Logger().Error().Err(err).Msg("Failed to serialize block")
} }
// Store txs // Store txs
@ -123,10 +123,10 @@ func (storage *Storage) UpdateTXStorage(batch ethdb.Batch, explorerTransaction *
if data, err := rlp.EncodeToBytes(explorerTransaction); err == nil { if data, err := rlp.EncodeToBytes(explorerTransaction); err == nil {
key := GetTXKey(tx.Hash().Hex()) key := GetTXKey(tx.Hash().Hex())
if err := batch.Put([]byte(key), data); err != nil { if err := batch.Put([]byte(key), data); err != nil {
ctxerror.Warn(utils.GetLogger(), err, "cannot batch TX") utils.Logger().Warn().Err(err).Msg("cannot batch TX")
} }
} else { } else {
utils.GetLogInstance().Error("EncodeRLP transaction error") utils.Logger().Error().Msg("EncodeRLP transaction error")
} }
} }
@ -146,18 +146,19 @@ func (storage *Storage) UpdateAddressStorage(batch ethdb.Batch, adr string, expl
if err == nil { if err == nil {
address.Balance.Add(address.Balance, tx.Value()) address.Balance.Add(address.Balance, tx.Value())
} else { } else {
utils.GetLogInstance().Error("Failed to error", "err", err) utils.Logger().Error().Err(err).Msg("Failed to error")
} }
} else { } else {
address.Balance = tx.Value() address.Balance = tx.Value()
} }
address.ID = adr address.ID = adr
address.TXs = append(address.TXs, explorerTransaction) address.TXs = append(address.TXs, explorerTransaction)
if encoded, err := rlp.EncodeToBytes(address); err == nil { encoded, err := rlp.EncodeToBytes(address)
if err == nil {
if err := batch.Put([]byte(key), encoded); err != nil { if err := batch.Put([]byte(key), encoded); err != nil {
ctxerror.Warn(utils.GetLogger(), err, "cannot batch address") utils.Logger().Warn().Err(err).Msg("cannot batch address")
} }
} else { } else {
utils.GetLogInstance().Error("Can not encode address account.") utils.Logger().Error().Err(err).Msg("cannot encode address account")
} }
} }

@ -92,7 +92,7 @@ func GetTransaction(tx *types.Transaction, accountBlock *types.Block) *Transacti
} }
msg, err := tx.AsMessage(types.HomesteadSigner{}) msg, err := tx.AsMessage(types.HomesteadSigner{})
if err != nil { if err != nil {
utils.GetLogger().Error("Error when parsing tx into message", "err", err) utils.Logger().Error().Err(err).Msg("Error when parsing tx into message")
} }
return &Transaction{ return &Transaction{
ID: tx.Hash().Hex(), ID: tx.Hash().Hex(),

@ -105,12 +105,12 @@ func (m *Manager) GetServices() map[Type]Interface {
// Register registers new service to service store. // Register registers new service to service store.
func (m *Manager) Register(t Type, service Interface) { func (m *Manager) Register(t Type, service Interface) {
utils.GetLogInstance().Info("Register Service", "service", t) utils.Logger().Info().Int("service", int(t)).Msg("Register Service")
if m.services == nil { if m.services == nil {
m.services = make(map[Type]Interface) m.services = make(map[Type]Interface)
} }
if _, ok := m.services[t]; ok { if _, ok := m.services[t]; ok {
utils.GetLogInstance().Error("This service is already included: ", "servie", t) utils.Logger().Error().Int("servie", int(t)).Msg("This service is already included")
return return
} }
m.services[t] = service m.services[t] = service
@ -140,7 +140,7 @@ func (m *Manager) SendAction(action *Action) {
// TakeAction is how service manager handles the action. // TakeAction is how service manager handles the action.
func (m *Manager) TakeAction(action *Action) { func (m *Manager) TakeAction(action *Action) {
if m.services == nil { if m.services == nil {
utils.GetLogInstance().Error("Service store is not initialized.") utils.Logger().Error().Msg("Service store is not initialized")
return return
} }
if service, ok := m.services[action.ServiceType]; ok { if service, ok := m.services[action.ServiceType]; ok {
@ -167,7 +167,7 @@ func (m *Manager) StartServiceManager() chan *Action {
return return
} }
case <-time.After(WaitForStatusUpdate): case <-time.After(WaitForStatusUpdate):
utils.GetLogInstance().Info("Waiting for new action.") utils.Logger().Info().Msg("Waiting for new action")
} }
} }
}() }()

@ -79,7 +79,7 @@ func New(h p2p.Host, rendezvous p2p.GroupID, peerChan chan p2p.Peer, bootnodes u
func (s *Service) StartService() { func (s *Service) StartService() {
err := s.Init() err := s.Init()
if err != nil { if err != nil {
utils.GetLogInstance().Error("Service Init Failed", "error", err) utils.Logger().Error().Err(err).Msg("Service Init Failed")
return return
} }
s.Run() s.Run()
@ -88,11 +88,11 @@ func (s *Service) StartService() {
// Init initializes role conversion service. // Init initializes role conversion service.
func (s *Service) Init() error { func (s *Service) Init() error {
utils.GetLogInstance().Info("Init networkinfo service") utils.Logger().Info().Msg("Init networkinfo service")
// Bootstrap the DHT. In the default configuration, this spawns a Background // Bootstrap the DHT. In the default configuration, this spawns a Background
// thread that will refresh the peer table every five minutes. // thread that will refresh the peer table every five minutes.
utils.GetLogInstance().Debug("Bootstrapping the DHT") utils.Logger().Debug().Msg("Bootstrapping the DHT")
if err := s.dht.Bootstrap(ctx); err != nil { if err := s.dht.Bootstrap(ctx); err != nil {
return fmt.Errorf("error bootstrap dht: %s", err) return fmt.Errorf("error bootstrap dht: %s", err)
} }
@ -111,10 +111,10 @@ func (s *Service) Init() error {
defer wg.Done() defer wg.Done()
for i := 0; i < ConnectionRetry; i++ { for i := 0; i < ConnectionRetry; i++ {
if err := s.Host.GetP2PHost().Connect(ctx, *peerinfo); err != nil { if err := s.Host.GetP2PHost().Connect(ctx, *peerinfo); err != nil {
utils.GetLogInstance().Warn("can't connect to bootnode", "error", err, "try", i) utils.Logger().Warn().Err(err).Int("try", i).Msg("can't connect to bootnode")
time.Sleep(waitInRetry) time.Sleep(waitInRetry)
} else { } else {
utils.GetLogInstance().Info("connected to bootnode", "node", *peerinfo, "try", i) utils.Logger().Info().Int("try", i).Interface("node", *peerinfo).Msg("connected to bootnode")
// it is okay if any bootnode is connected // it is okay if any bootnode is connected
connected = true connected = true
break break
@ -129,10 +129,10 @@ func (s *Service) Init() error {
} }
// We use a rendezvous point "shardID" to announce our location. // We use a rendezvous point "shardID" to announce our location.
utils.GetLogInstance().Info("Announcing ourselves...", "Rendezvous", string(s.Rendezvous)) utils.Logger().Info().Str("Rendezvous", string(s.Rendezvous)).Msg("Announcing ourselves...")
s.discovery = libp2pdis.NewRoutingDiscovery(s.dht) s.discovery = libp2pdis.NewRoutingDiscovery(s.dht)
libp2pdis.Advertise(ctx, s.discovery, string(s.Rendezvous)) libp2pdis.Advertise(ctx, s.discovery, string(s.Rendezvous))
utils.GetLogInstance().Info("Successfully announced!") utils.Logger().Info().Msg("Successfully announced!")
return nil return nil
} }
@ -141,7 +141,7 @@ func (s *Service) Init() error {
func (s *Service) Run() { func (s *Service) Run() {
defer close(s.stoppedChan) defer close(s.stoppedChan)
if s.discovery == nil { if s.discovery == nil {
utils.GetLogInstance().Error("discovery is not initialized") utils.Logger().Error().Msg("discovery is not initialized")
return return
} }
@ -157,12 +157,12 @@ func (s *Service) DoService() {
return return
case <-tick.C: case <-tick.C:
libp2pdis.Advertise(ctx, s.discovery, string(s.Rendezvous)) libp2pdis.Advertise(ctx, s.discovery, string(s.Rendezvous))
utils.GetLogInstance().Info("Successfully announced!", "Rendezvous", string(s.Rendezvous)) utils.Logger().Info().Str("Rendezvous", string(s.Rendezvous)).Msg("Successfully announced!")
default: default:
var err error var err error
s.peerInfo, err = s.discovery.FindPeers(ctx, string(s.Rendezvous)) s.peerInfo, err = s.discovery.FindPeers(ctx, string(s.Rendezvous))
if err != nil { if err != nil {
utils.GetLogInstance().Error("FindPeers", "error", err) utils.Logger().Error().Err(err).Msg("FindPeers")
return return
} }
@ -175,18 +175,22 @@ func (s *Service) DoService() {
func (s *Service) findPeers() { func (s *Service) findPeers() {
_, cgnPrefix, err := net.ParseCIDR("100.64.0.0/10") _, cgnPrefix, err := net.ParseCIDR("100.64.0.0/10")
if err != nil { if err != nil {
utils.GetLogInstance().Error("can't parse CIDR", "error", err) utils.Logger().Error().Err(err).Msg("can't parse CIDR")
return return
} }
for peer := range s.peerInfo { for peer := range s.peerInfo {
if peer.ID != s.Host.GetP2PHost().ID() && len(peer.ID) > 0 { if peer.ID != s.Host.GetP2PHost().ID() && len(peer.ID) > 0 {
// utils.GetLogInstance().Info("Found Peer", "peer", peer.ID, "addr", peer.Addrs, "my ID", s.Host.GetP2PHost().ID()) // utils.Logger().Info().
// Interface("peer", peer.ID).
// Interface("addr", peer.Addrs).
// Interface("my ID", s.Host.GetP2PHost().ID()).
// Msg("Found Peer")
if err := s.Host.GetP2PHost().Connect(ctx, peer); err != nil { if err := s.Host.GetP2PHost().Connect(ctx, peer); err != nil {
utils.GetLogInstance().Warn("can't connect to peer node", "error", err, "peer", peer) utils.Logger().Warn().Err(err).Interface("peer", peer).Msg("can't connect to peer node")
// break if the node can't connect to peers, waiting for another peer // break if the node can't connect to peers, waiting for another peer
break break
} else { } else {
utils.GetLogInstance().Info("connected to peer node", "peer", peer) utils.Logger().Info().Interface("peer", peer).Msg("connected to peer node")
} }
// figure out the public ip/port // figure out the public ip/port
var ip, port string var ip, port string
@ -204,30 +208,30 @@ func (s *Service) findPeers() {
} }
} }
p := p2p.Peer{IP: ip, Port: port, PeerID: peer.ID, Addrs: peer.Addrs} p := p2p.Peer{IP: ip, Port: port, PeerID: peer.ID, Addrs: peer.Addrs}
utils.GetLogInstance().Info("Notify peerChan", "peer", p) utils.Logger().Info().Interface("peer", p).Msg("Notify peerChan")
if s.peerChan != nil { if s.peerChan != nil {
s.peerChan <- p s.peerChan <- p
} }
} }
} }
utils.GetLogInstance().Info("PeerInfo Channel Closed.") utils.Logger().Info().Msg("PeerInfo Channel Closed")
return return
} }
// StopService stops network info service. // StopService stops network info service.
func (s *Service) StopService() { func (s *Service) StopService() {
utils.GetLogInstance().Info("Stopping network info service.") utils.Logger().Info().Msg("Stopping network info service")
defer s.cancel() defer s.cancel()
if !s.started { if !s.started {
utils.GetLogInstance().Info("Service didn't started. Exit.") utils.Logger().Info().Msg("Service didn't started. Exit")
return return
} }
s.stopChan <- struct{}{} s.stopChan <- struct{}{}
<-s.stoppedChan <-s.stoppedChan
utils.GetLogInstance().Info("Network info service stopped.") utils.Logger().Info().Msg("Network info service stopped")
} }
// NotifyService notify service // NotifyService notify service

@ -29,10 +29,10 @@ func (s *Service) StartService() {
// StopService stops randomness generation service. // StopService stops randomness generation service.
func (s *Service) StopService() { func (s *Service) StopService() {
utils.GetLogInstance().Info("Stopping random generation service.") utils.Logger().Info().Msg("Stopping random generation service")
s.stopChan <- struct{}{} s.stopChan <- struct{}{}
<-s.stoppedChan <-s.stoppedChan
utils.GetLogInstance().Info("Random generation stopped.") utils.Logger().Info().Msg("Random generation stopped")
} }
// NotifyService notify service // NotifyService notify service

@ -47,7 +47,7 @@ func (s *Service) Run(stopChan chan struct{}, stoppedChan chan struct{}) {
for { for {
select { select {
default: default:
utils.GetLogInstance().Info("Running role conversion") utils.Logger().Info().Msg("Running role conversion")
// TODO: Write some logic here. // TODO: Write some logic here.
s.DoService() s.DoService()
case <-stopChan: case <-stopChan:
@ -75,10 +75,10 @@ func (s *Service) DoService() {
// StopService stops role conversion service. // StopService stops role conversion service.
func (s *Service) StopService() { func (s *Service) StopService() {
utils.GetLogInstance().Info("Stopping role conversion service.") utils.Logger().Info().Msg("Stopping role conversion service")
s.stopChan <- struct{}{} s.stopChan <- struct{}{}
<-s.stoppedChan <-s.stoppedChan
utils.GetLogInstance().Info("Role conversion stopped.") utils.Logger().Info().Msg("Role conversion stopped")
} }
// NotifyService notify service // NotifyService notify service

@ -24,7 +24,6 @@ import (
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
common2 "github.com/harmony-one/harmony/internal/common" common2 "github.com/harmony-one/harmony/internal/common"
"github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/genesis" "github.com/harmony-one/harmony/internal/genesis"
hmykey "github.com/harmony-one/harmony/internal/keystore" hmykey "github.com/harmony-one/harmony/internal/keystore"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
@ -73,7 +72,7 @@ func New(host p2p.Host, account accounts.Account, beaconChain *core.BlockChain,
// StartService starts staking service. // StartService starts staking service.
func (s *Service) StartService() { func (s *Service) StartService() {
utils.GetLogger().Info("Start Staking Service") utils.Logger().Info().Msg("Start Staking Service")
s.Run() s.Run()
} }
@ -108,20 +107,20 @@ func (s *Service) IsStaked() bool {
// DoService does staking. // DoService does staking.
func (s *Service) DoService() { func (s *Service) DoService() {
utils.GetLogInstance().Info("Trying to send a staking transaction.") utils.Logger().Info().Msg("Trying to send a staking transaction.")
// TODO: no need to sync beacon chain to stake // TODO: no need to sync beacon chain to stake
//if s.beaconChain == nil { //if s.beaconChain == nil {
// utils.GetLogInstance().Info("Can not send a staking transaction because of nil beacon chain.") // utils.Logger().Info().Msg("Can not send a staking transaction because of nil beacon chain.")
// return // return
//} //}
if msg := s.createStakingMessage(); msg == nil { if msg := s.createStakingMessage(); msg == nil {
utils.GetLogInstance().Error("Can not create staking transaction") utils.Logger().Error().Msg("Can not create staking transaction")
} else if err := s.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, host.ConstructP2pMessage(byte(17), msg)); err != nil { } else if err := s.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, host.ConstructP2pMessage(byte(17), msg)); err != nil {
ctxerror.Warn(utils.GetLogger(), err, "cannot send staking message") utils.Logger().Warn().Err(err).Msg("cannot send staking message")
} else { } else {
utils.GetLogInstance().Info("Sent staking transaction to the network.") utils.Logger().Info().Msg("Sent staking transaction to the network.")
} }
} }
@ -129,17 +128,17 @@ func (s *Service) getStakingInfo() *proto.StakingContractInfoResponse {
address := s.account.Address address := s.account.Address
state, err := s.beaconChain.State() state, err := s.beaconChain.State()
if err != nil { if err != nil {
utils.GetLogInstance().Error("error to get beacon chain state when getting staking info") utils.Logger().Error().Msg("error to get beacon chain state when getting staking info")
return nil return nil
} }
balance := state.GetBalance(address) balance := state.GetBalance(address)
if balance == common.Big0 { if balance == common.Big0 {
utils.GetLogInstance().Error("account balance empty when getting staking info") utils.Logger().Error().Msg("account balance empty when getting staking info")
return nil return nil
} }
nonce := state.GetNonce(address) nonce := state.GetNonce(address)
if nonce == 0 { if nonce == 0 {
utils.GetLogInstance().Error("nonce zero when getting staking info") utils.Logger().Error().Msg("nonce zero when getting staking info")
return nil return nil
} }
return &proto.StakingContractInfoResponse{ return &proto.StakingContractInfoResponse{
@ -181,7 +180,7 @@ func constructStakingMessage(ts types.Transactions) []byte {
return data return data
} }
} }
utils.GetLogInstance().Error("Error when creating staking message", "error", err) utils.Logger().Error().Err(err).Msg("Error when creating staking message")
return nil return nil
} }
@ -192,12 +191,12 @@ func (s *Service) createRawStakingMessage() []byte {
abi, err := abi.JSON(strings.NewReader(contracts.StakeLockContractABI)) abi, err := abi.JSON(strings.NewReader(contracts.StakeLockContractABI))
if err != nil { if err != nil {
utils.GetLogInstance().Error("Failed to generate staking contract's ABI", "error", err) utils.Logger().Error().Err(err).Msg("Failed to generate staking contract's ABI")
} }
// TODO: the bls address should be signed by the bls private key // TODO: the bls address should be signed by the bls private key
blsPubKeyBytes := s.blsPublicKey.Serialize() blsPubKeyBytes := s.blsPublicKey.Serialize()
if len(blsPubKeyBytes) != 96 { if len(blsPubKeyBytes) != 96 {
utils.GetLogInstance().Error("Wrong bls pubkey size", "size", len(blsPubKeyBytes)) utils.Logger().Error().Int("size", len(blsPubKeyBytes)).Msg("Wrong bls pubkey size")
return []byte{} return []byte{}
} }
blsPubKeyPart1 := [32]byte{} blsPubKeyPart1 := [32]byte{}
@ -209,7 +208,7 @@ func (s *Service) createRawStakingMessage() []byte {
bytesData, err := abi.Pack("lock", blsPubKeyPart1, blsPubKeyPart2, blsPubKeyPart3) bytesData, err := abi.Pack("lock", blsPubKeyPart1, blsPubKeyPart2, blsPubKeyPart3)
if err != nil { if err != nil {
utils.GetLogInstance().Error("Failed to generate ABI function bytes data", "error", err) utils.Logger().Error().Err(err).Msg("Failed to generate ABI function bytes data")
} }
tx := types.NewTransaction( tx := types.NewTransaction(
@ -240,10 +239,10 @@ func (s *Service) createStakingMessage() []byte {
// StopService stops staking service. // StopService stops staking service.
func (s *Service) StopService() { func (s *Service) StopService() {
utils.GetLogInstance().Info("Stopping staking service.") utils.Logger().Info().Msg("Stopping staking service.")
s.stopChan <- struct{}{} s.stopChan <- struct{}{}
<-s.stoppedChan <-s.stoppedChan
utils.GetLogInstance().Info("Role conversion stopped.") utils.Logger().Info().Msg("Role conversion stopped.")
} }
// NotifyService notify service // NotifyService notify service

@ -24,10 +24,10 @@ func ClientSetup(ip, port string) *Client {
var err error var err error
client.conn, err = grpc.Dial(fmt.Sprintf(ip+":"+port), client.opts...) client.conn, err = grpc.Dial(fmt.Sprintf(ip+":"+port), client.opts...)
if err != nil { if err != nil {
utils.GetLogInstance().Info("[SYNC] client.go:ClientSetup fail to dial: ", "IP", ip, "error", err) utils.Logger().Error().Err(err).Str("ip", ip).Msg("[SYNC] client.go:ClientSetup fail to dial")
return nil return nil
} }
utils.GetLogInstance().Info("[SYNC] grpc connect successfully", "IP", ip) utils.Logger().Info().Str("ip", ip).Msg("[SYNC] grpc connect successfully")
client.dlClient = pb.NewDownloaderClient(client.conn) client.dlClient = pb.NewDownloaderClient(client.conn)
return &client return &client
} }
@ -36,7 +36,7 @@ func ClientSetup(ip, port string) *Client {
func (client *Client) Close() { func (client *Client) Close() {
err := client.conn.Close() err := client.conn.Close()
if err != nil { if err != nil {
utils.GetLogInstance().Info("[SYNC] unable to close connection ") utils.Logger().Info().Msg("[SYNC] unable to close connection")
} }
} }
@ -47,7 +47,7 @@ func (client *Client) GetBlockHashes(startHash []byte, size uint32) *pb.Download
request := &pb.DownloaderRequest{Type: pb.DownloaderRequest_HEADER, BlockHash: startHash, Size: size} request := &pb.DownloaderRequest{Type: pb.DownloaderRequest_HEADER, BlockHash: startHash, Size: size}
response, err := client.dlClient.Query(ctx, request) response, err := client.dlClient.Query(ctx, request)
if err != nil { if err != nil {
utils.GetLogInstance().Info("[SYNC] GetBlockHashes query failed", "error", err) utils.Logger().Error().Err(err).Msg("[SYNC] GetBlockHashes query failed")
} }
return response return response
} }
@ -64,7 +64,7 @@ func (client *Client) GetBlocks(hashes [][]byte) *pb.DownloaderResponse {
} }
response, err := client.dlClient.Query(ctx, request) response, err := client.dlClient.Query(ctx, request)
if err != nil { if err != nil {
utils.GetLogInstance().Info("[SYNC] downloader/client.go:GetBlocks query failed.", "error", err) utils.Logger().Error().Err(err).Msg("[SYNC] downloader/client.go:GetBlocks query failed")
} }
return response return response
} }
@ -81,7 +81,7 @@ func (client *Client) Register(hash []byte, ip, port string) *pb.DownloaderRespo
request.Port = port request.Port = port
response, err := client.dlClient.Query(ctx, request) response, err := client.dlClient.Query(ctx, request)
if err != nil || response == nil { if err != nil || response == nil {
utils.GetLogInstance().Info("[SYNC] client.go:Register failed.", "error", err, "response", response) utils.Logger().Error().Err(err).Interface("response", response).Msg("[SYNC] client.go:Register failed")
} }
return response return response
} }
@ -103,7 +103,7 @@ func (client *Client) PushNewBlock(selfPeerHash [20]byte, blockHash []byte, time
response, err := client.dlClient.Query(ctx, request) response, err := client.dlClient.Query(ctx, request)
if err != nil { if err != nil {
utils.GetLogInstance().Info("[SYNC] unable to send new block to unsync node", "error", err) utils.Logger().Error().Err(err).Msg("[SYNC] unable to send new block to unsync node")
} }
return response return response
} }
@ -115,7 +115,7 @@ func (client *Client) GetBlockChainHeight() *pb.DownloaderResponse {
request := &pb.DownloaderRequest{Type: pb.DownloaderRequest_BLOCKHEIGHT} request := &pb.DownloaderRequest{Type: pb.DownloaderRequest_BLOCKHEIGHT}
response, err := client.dlClient.Query(ctx, request) response, err := client.dlClient.Query(ctx, request)
if err != nil { if err != nil {
utils.GetLogInstance().Info("[SYNC] unable to get blockchain height", "error", err) utils.Logger().Error().Err(err).Msg("[SYNC] unable to get blockchain height")
} }
return response return response
} }

@ -6,7 +6,6 @@ import (
"net" "net"
pb "github.com/harmony-one/harmony/api/service/syncing/downloader/proto" pb "github.com/harmony-one/harmony/api/service/syncing/downloader/proto"
"github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -44,7 +43,7 @@ func (s *Server) Start(ip, port string) (*grpc.Server, error) {
pb.RegisterDownloaderServer(grpcServer, s) pb.RegisterDownloaderServer(grpcServer, s)
go func() { go func() {
if err := grpcServer.Serve(lis); err != nil { if err := grpcServer.Serve(lis); err != nil {
ctxerror.Warn(utils.GetLogger(), err, "[SYNC] (*grpc.Server).Serve failed") utils.Logger().Warn().Err(err).Msg("[SYNC] (*grpc.Server).Serve failed")
} }
}() }()

@ -169,7 +169,10 @@ func (ss *StateSync) AddNewBlock(peerHash []byte, block *types.Block) {
pc.mux.Lock() pc.mux.Lock()
defer pc.mux.Unlock() defer pc.mux.Unlock()
pc.newBlocks = append(pc.newBlocks, block) pc.newBlocks = append(pc.newBlocks, block)
utils.GetLogInstance().Debug("[SYNC] new block received", "total", len(pc.newBlocks), "blockHeight", block.NumberU64()) utils.Logger().Debug().
Int("total", len(pc.newBlocks)).
Uint64("blockHeight", block.NumberU64()).
Msg("[SYNC] new block received")
} }
// CreateTestSyncPeerConfig used for testing. // CreateTestSyncPeerConfig used for testing.
@ -207,7 +210,11 @@ func (peerConfig *SyncPeerConfig) GetBlocks(hashes [][]byte) ([][]byte, error) {
// CreateSyncConfig creates SyncConfig for StateSync object. // CreateSyncConfig creates SyncConfig for StateSync object.
func (ss *StateSync) CreateSyncConfig(peers []p2p.Peer, isBeacon bool) error { func (ss *StateSync) CreateSyncConfig(peers []p2p.Peer, isBeacon bool) error {
utils.GetLogInstance().Debug("[SYNC] CreateSyncConfig: len of peers", "len", len(peers), "isBeacon", isBeacon) utils.Logger().Debug().
Int("len", len(peers)).
Bool("isBeacon", isBeacon).
Msg("[SYNC] CreateSyncConfig: len of peers")
if len(peers) == 0 { if len(peers) == 0 {
return ctxerror.New("[SYNC] no peers to connect to") return ctxerror.New("[SYNC] no peers to connect to")
} }
@ -230,7 +237,10 @@ func (ss *StateSync) CreateSyncConfig(peers []p2p.Peer, isBeacon bool) error {
}(peer) }(peer)
} }
wg.Wait() wg.Wait()
utils.GetLogInstance().Info("[SYNC] Finished making connection to peers.", "len", len(ss.syncConfig.peers), "isBeacon", isBeacon) utils.Logger().Info().
Int("len", len(ss.syncConfig.peers)).
Bool("isBeacon", isBeacon).
Msg("[SYNC] Finished making connection to peers")
return nil return nil
} }
@ -304,7 +314,10 @@ func (sc *SyncConfig) GetBlockHashesConsensusAndCleanUp() bool {
return CompareSyncPeerConfigByblockHashes(sc.peers[i], sc.peers[j]) == -1 return CompareSyncPeerConfigByblockHashes(sc.peers[i], sc.peers[j]) == -1
}) })
maxFirstID, maxCount := sc.getHowManyMaxConsensus() maxFirstID, maxCount := sc.getHowManyMaxConsensus()
utils.GetLogInstance().Info("[SYNC] block consensus hashes", "maxFirstID", maxFirstID, "maxCount", maxCount) utils.Logger().Info().
Int("maxFirstID", maxFirstID).
Int("maxCount", maxCount).
Msg("[SYNC] block consensus hashes")
if float64(maxCount) >= ConsensusRatio*float64(len(sc.peers)) { if float64(maxCount) >= ConsensusRatio*float64(len(sc.peers)) {
sc.cleanUpPeers(maxFirstID) sc.cleanUpPeers(maxFirstID)
return true return true
@ -326,7 +339,10 @@ func (ss *StateSync) GetConsensusHashes(startHash []byte, size uint32) bool {
return return
} }
if len(response.Payload) > int(size+1) { if len(response.Payload) > int(size+1) {
utils.GetLogInstance().Warn("[SYNC] GetConsensusHashes: receive more blockHahses than request!", "requestSize", size, "respondSize", len(response.Payload)) utils.Logger().Warn().
Uint32("requestSize", size).
Int("respondSize", len(response.Payload)).
Msg("[SYNC] GetConsensusHashes: receive more blockHahses than request!")
peerConfig.blockHashes = response.Payload[:size+1] peerConfig.blockHashes = response.Payload[:size+1]
} else { } else {
peerConfig.blockHashes = response.Payload peerConfig.blockHashes = response.Payload
@ -339,13 +355,13 @@ func (ss *StateSync) GetConsensusHashes(startHash []byte, size uint32) bool {
break break
} }
if count > TimesToFail { if count > TimesToFail {
utils.GetLogInstance().Info("[SYNC] GetConsensusHashes: reached retry limit") utils.Logger().Info().Msg("[SYNC] GetConsensusHashes: reached retry limit")
return false return false
} }
count++ count++
time.Sleep(SleepTimeAfterNonConsensusBlockHashes) time.Sleep(SleepTimeAfterNonConsensusBlockHashes)
} }
utils.GetLogInstance().Info("[SYNC] Finished getting consensus block hashes.") utils.Logger().Info().Msg("[SYNC] Finished getting consensus block hashes")
return true return true
} }
@ -354,14 +370,17 @@ func (ss *StateSync) generateStateSyncTaskQueue(bc *core.BlockChain) {
ss.syncConfig.ForEachPeer(func(configPeer *SyncPeerConfig) (brk bool) { ss.syncConfig.ForEachPeer(func(configPeer *SyncPeerConfig) (brk bool) {
for id, blockHash := range configPeer.blockHashes { for id, blockHash := range configPeer.blockHashes {
if err := ss.stateSyncTaskQueue.Put(SyncBlockTask{index: id, blockHash: blockHash}); err != nil { if err := ss.stateSyncTaskQueue.Put(SyncBlockTask{index: id, blockHash: blockHash}); err != nil {
ctxerror.Warn(utils.GetLogger(), err, "cannot add task", utils.Logger().Warn().
"taskIndex", id, "taskBlock", hex.EncodeToString(blockHash)) Err(err).
Int("taskIndex", id).
Str("taskBlock", hex.EncodeToString(blockHash)).
Msg("cannot add task")
} }
} }
brk = true brk = true
return return
}) })
utils.GetLogInstance().Info("[SYNC] Finished generateStateSyncTaskQueue", "length", ss.stateSyncTaskQueue.Len()) utils.Logger().Info().Int64("length", ss.stateSyncTaskQueue.Len()).Msg("[SYNC] Finished generateStateSyncTaskQueue")
} }
// downloadBlocks downloads blocks from state sync task queue. // downloadBlocks downloads blocks from state sync task queue.
@ -376,7 +395,7 @@ func (ss *StateSync) downloadBlocks(bc *core.BlockChain) {
for !stateSyncTaskQueue.Empty() { for !stateSyncTaskQueue.Empty() {
task, err := ss.stateSyncTaskQueue.Poll(1, time.Millisecond) task, err := ss.stateSyncTaskQueue.Poll(1, time.Millisecond)
if err == queue.ErrTimeout || len(task) == 0 { if err == queue.ErrTimeout || len(task) == 0 {
utils.GetLogInstance().Debug("[SYNC] ss.stateSyncTaskQueue poll timeout", "error", err) utils.Logger().Error().Err(err).Msg("[SYNC] ss.stateSyncTaskQueue poll timeout")
break break
} }
syncTask := task[0].(SyncBlockTask) syncTask := task[0].(SyncBlockTask)
@ -384,14 +403,16 @@ func (ss *StateSync) downloadBlocks(bc *core.BlockChain) {
payload, err := peerConfig.GetBlocks([][]byte{syncTask.blockHash}) payload, err := peerConfig.GetBlocks([][]byte{syncTask.blockHash})
if err != nil || len(payload) == 0 { if err != nil || len(payload) == 0 {
count++ count++
utils.GetLogInstance().Debug("[SYNC] GetBlocks failed", "failNumber", count) utils.Logger().Error().Err(err).Int("failNumber", count).Msg("[SYNC] GetBlocks failed")
if count > TimesToFail { if count > TimesToFail {
break break
} }
if err := ss.stateSyncTaskQueue.Put(syncTask); err != nil { if err := ss.stateSyncTaskQueue.Put(syncTask); err != nil {
ctxerror.Warn(utils.GetLogger(), err, "cannot add task", utils.Logger().Warn().
"taskIndex", syncTask.index, Err(err).
"taskBlock", hex.EncodeToString(syncTask.blockHash)) Int("taskIndex", syncTask.index).
Str("taskBlock", hex.EncodeToString(syncTask.blockHash)).
Msg("cannot add task")
} }
continue continue
} }
@ -402,14 +423,16 @@ func (ss *StateSync) downloadBlocks(bc *core.BlockChain) {
if err != nil { if err != nil {
count++ count++
utils.GetLogInstance().Debug("[SYNC] downloadBlocks: failed to DecodeBytes from received new block") utils.Logger().Error().Err(err).Msg("[SYNC] downloadBlocks: failed to DecodeBytes from received new block")
if count > TimesToFail { if count > TimesToFail {
break break
} }
if err := ss.stateSyncTaskQueue.Put(syncTask); err != nil { if err := ss.stateSyncTaskQueue.Put(syncTask); err != nil {
ctxerror.Warn(utils.GetLogger(), err, "cannot add task", utils.Logger().Warn().
"taskIndex", syncTask.index, Err(err).
"taskBlock", hex.EncodeToString(syncTask.blockHash)) Int("taskIndex", syncTask.index).
Str("taskBlock", hex.EncodeToString(syncTask.blockHash)).
Msg("cannot add task")
} }
continue continue
} }
@ -421,7 +444,7 @@ func (ss *StateSync) downloadBlocks(bc *core.BlockChain) {
return return
}) })
wg.Wait() wg.Wait()
utils.GetLogInstance().Info("[SYNC] Finished downloadBlocks.") utils.Logger().Info().Msg("[SYNC] Finished downloadBlocks")
} }
// CompareBlockByHash compares two block by hash, it will be used in sort the blocks // CompareBlockByHash compares two block by hash, it will be used in sort the blocks
@ -475,7 +498,12 @@ func (ss *StateSync) getMaxConsensusBlockFromParentHash(parentHash common.Hash)
return CompareBlockByHash(candidateBlocks[i], candidateBlocks[j]) == -1 return CompareBlockByHash(candidateBlocks[i], candidateBlocks[j]) == -1
}) })
maxFirstID, maxCount := GetHowManyMaxConsensus(candidateBlocks) maxFirstID, maxCount := GetHowManyMaxConsensus(candidateBlocks)
utils.GetLogInstance().Debug("[SYNC] Find block with matching parenthash", "parentHash", parentHash, "hash", candidateBlocks[maxFirstID].Hash(), "maxCount", maxCount) hash := candidateBlocks[maxFirstID].Hash()
utils.Logger().Debug().
Bytes("parentHash", parentHash[:]).
Bytes("hash", hash[:]).
Int("maxCount", maxCount).
Msg("[SYNC] Find block with matching parenthash")
return candidateBlocks[maxFirstID] return candidateBlocks[maxFirstID]
} }
@ -500,21 +528,24 @@ func (ss *StateSync) getBlockFromLastMileBlocksByParentHash(parentHash common.Ha
} }
func (ss *StateSync) updateBlockAndStatus(block *types.Block, bc *core.BlockChain, worker *worker.Worker) bool { func (ss *StateSync) updateBlockAndStatus(block *types.Block, bc *core.BlockChain, worker *worker.Worker) bool {
utils.GetLogInstance().Info("[SYNC] Current Block", "blockHex", bc.CurrentBlock().Hash().Hex()) utils.Logger().Info().Str("blockHex", bc.CurrentBlock().Hash().Hex()).Msg("[SYNC] Current Block")
_, err := bc.InsertChain([]*types.Block{block}) _, err := bc.InsertChain([]*types.Block{block})
if err != nil { if err != nil {
utils.GetLogInstance().Debug("[SYNC] Error adding new block to blockchain", "Error", err) utils.Logger().Error().Err(err).Msg("[SYNC] Error adding new block to blockchain")
utils.GetLogInstance().Debug("[SYNC] Rolling back current block!", "block", bc.CurrentBlock()) utils.Logger().Debug().Interface("block", bc.CurrentBlock()).Msg("[SYNC] Rolling back current block!")
bc.Rollback([]common.Hash{bc.CurrentBlock().Hash()}) bc.Rollback([]common.Hash{bc.CurrentBlock().Hash()})
return false return false
} }
ss.syncMux.Lock() ss.syncMux.Lock()
if err := worker.UpdateCurrent(block.Header().Coinbase); err != nil { if err := worker.UpdateCurrent(block.Header().Coinbase); err != nil {
ctxerror.Warn(utils.GetLogger(), err, "[SYNC] (*Worker).UpdateCurrent failed") utils.Logger().Warn().Err(err).Msg("[SYNC] (*Worker).UpdateCurrent failed")
} }
ss.syncMux.Unlock() ss.syncMux.Unlock()
utils.GetLogInstance().Info("[SYNC] new block added to blockchain", "blockHeight", bc.CurrentBlock().NumberU64(), "blockHex", bc.CurrentBlock().Hash().Hex()) utils.Logger().Info().
Uint64("blockHeight", bc.CurrentBlock().NumberU64()).
Str("blockHex", bc.CurrentBlock().Hash().Hex()).
Msg("[SYNC] new block added to blockchain")
return true return true
} }
@ -578,7 +609,7 @@ func (ss *StateSync) generateNewState(bc *core.BlockChain, worker *worker.Worker
func (ss *StateSync) ProcessStateSync(startHash []byte, size uint32, bc *core.BlockChain, worker *worker.Worker) { func (ss *StateSync) ProcessStateSync(startHash []byte, size uint32, bc *core.BlockChain, worker *worker.Worker) {
// Gets consensus hashes. // Gets consensus hashes.
if !ss.GetConsensusHashes(startHash, size) { if !ss.GetConsensusHashes(startHash, size) {
utils.GetLogInstance().Debug("[SYNC] ProcessStateSync unable to reach consensus on ss.GetConsensusHashes") utils.Logger().Debug().Msg("[SYNC] ProcessStateSync unable to reach consensus on ss.GetConsensusHashes")
return return
} }
ss.generateStateSyncTaskQueue(bc) ss.generateStateSyncTaskQueue(bc)
@ -603,26 +634,34 @@ func (peerConfig *SyncPeerConfig) registerToBroadcast(peerHash []byte, ip, port
// return number of successful registration // return number of successful registration
func (ss *StateSync) RegisterNodeInfo() int { func (ss *StateSync) RegisterNodeInfo() int {
registrationNumber := RegistrationNumber registrationNumber := RegistrationNumber
utils.GetLogInstance().Debug("[SYNC] node registration to peers", utils.Logger().Debug().
"registrationNumber", registrationNumber, Int("registrationNumber", registrationNumber).
"activePeerNumber", len(ss.syncConfig.peers)) Int("activePeerNumber", len(ss.syncConfig.peers)).
Msg("[SYNC] node registration to peers")
count := 0 count := 0
ss.syncConfig.ForEachPeer(func(peerConfig *SyncPeerConfig) (brk bool) { ss.syncConfig.ForEachPeer(func(peerConfig *SyncPeerConfig) (brk bool) {
logger := utils.Logger().With().Str("peerPort", peerConfig.port).Str("peerIP", peerConfig.ip).Logger()
if count >= registrationNumber { if count >= registrationNumber {
brk = true brk = true
return return
} }
if peerConfig.ip == ss.selfip && peerConfig.port == GetSyncingPort(ss.selfport) { if peerConfig.ip == ss.selfip && peerConfig.port == GetSyncingPort(ss.selfport) {
utils.GetLogInstance().Debug("[SYNC] skip self", "peerport", peerConfig.port, "selfport", ss.selfport, "selfsyncport", GetSyncingPort(ss.selfport)) logger.Debug().
Str("selfport", ss.selfport).
Str("selfsyncport", GetSyncingPort(ss.selfport)).
Msg("[SYNC] skip self")
return return
} }
err := peerConfig.registerToBroadcast(ss.selfPeerHash[:], ss.selfip, ss.selfport) err := peerConfig.registerToBroadcast(ss.selfPeerHash[:], ss.selfip, ss.selfport)
if err != nil { if err != nil {
utils.GetLogInstance().Debug("[SYNC] register failed to peer", "ip", peerConfig.ip, "port", peerConfig.port, "selfPeerHash", ss.selfPeerHash) logger.Debug().
Bytes("selfPeerHash", ss.selfPeerHash[:]).
Msg("[SYNC] register failed to peer")
return return
} }
utils.GetLogInstance().Debug("[SYNC] register success", "ip", peerConfig.ip, "port", peerConfig.port)
logger.Debug().Msg("[SYNC] register success")
count++ count++
return return
}) })
@ -638,7 +677,7 @@ func (ss *StateSync) getMaxPeerHeight() uint64 {
go func() { go func() {
defer wg.Done() defer wg.Done()
//debug //debug
//utils.GetLogInstance().Warn("[Sync] getMaxPeerHeight", "IP", peerConfig.ip, "Port", peerConfig.port) // utils.Logger().Warn().Str("IP", peerConfig.ip).Str("Port", peerConfig.port).Msg("[Sync] getMaxPeerHeight")
response := peerConfig.client.GetBlockChainHeight() response := peerConfig.client.GetBlockChainHeight()
ss.syncMux.Lock() ss.syncMux.Lock()
if response != nil && maxHeight < response.BlockHeight { if response != nil && maxHeight < response.BlockHeight {
@ -663,7 +702,11 @@ func (ss *StateSync) IsSameBlockchainHeight(bc *core.BlockChain) (uint64, bool)
func (ss *StateSync) IsOutOfSync(bc *core.BlockChain) bool { func (ss *StateSync) IsOutOfSync(bc *core.BlockChain) bool {
otherHeight := ss.getMaxPeerHeight() otherHeight := ss.getMaxPeerHeight()
currentHeight := bc.CurrentBlock().NumberU64() currentHeight := bc.CurrentBlock().NumberU64()
utils.GetLogInstance().Debug("[SYNC] Checking sync status", "OtherHeight", otherHeight, "MyHeight", currentHeight, "IsOutOfSync", currentHeight+inSyncThreshold < otherHeight) utils.Logger().Debug().
Uint64("OtherHeight", otherHeight).
Uint64("MyHeight", currentHeight).
Bool("IsOutOfSync", currentHeight+inSyncThreshold < otherHeight).
Msg("[SYNC] Checking sync status")
return currentHeight+inSyncThreshold < otherHeight return currentHeight+inSyncThreshold < otherHeight
} }
@ -676,7 +719,7 @@ func (ss *StateSync) SyncLoop(bc *core.BlockChain, worker *worker.Worker, willJo
otherHeight := ss.getMaxPeerHeight() otherHeight := ss.getMaxPeerHeight()
currentHeight := bc.CurrentBlock().NumberU64() currentHeight := bc.CurrentBlock().NumberU64()
if currentHeight >= otherHeight { if currentHeight >= otherHeight {
utils.GetLogInstance().Info("[SYNC] Node is now IN SYNC!") utils.Logger().Info().Msg("[SYNC] Node is now IN SYNC!")
break break
} }
startHash := bc.CurrentBlock().Hash() startHash := bc.CurrentBlock().Hash()

@ -51,10 +51,7 @@ func main() {
// Logging setup // Logging setup
utils.SetLogContext(*port, *ip) utils.SetLogContext(*port, *ip)
utils.SetLogVerbosity(log.Lvl(*verbosity)) utils.SetLogVerbosity(log.Lvl(*verbosity))
filename := fmt.Sprintf("%v/bootnode-%v-%v.log", *logFolder, *ip, *port) utils.AddLogFile(fmt.Sprintf("%v/bootnode-%v-%v.log", *logFolder, *ip, *port), *logMaxSize)
if err := utils.AddLogFile(filename, *logMaxSize); err != nil {
panic(err)
}
privKey, _, err := utils.LoadKeyFromFile(*keyFile) privKey, _, err := utils.LoadKeyFromFile(*keyFile)
if err != nil { if err != nil {

@ -106,8 +106,9 @@ func setUpTXGen() *node.Node {
consensusObj.SetStakeInfoFinder(gsif) consensusObj.SetStakeInfoFinder(gsif)
consensusObj.ChainReader = txGen.Blockchain() consensusObj.ChainReader = txGen.Blockchain()
consensusObj.PublicKeys = nil consensusObj.PublicKeys = nil
genesisShardingConfig := core.ShardingSchedule.InstanceForEpoch(big.NewInt(core.GenesisEpoch))
startIdx := 0 startIdx := 0
endIdx := startIdx + core.GenesisShardSize endIdx := startIdx + genesisShardingConfig.NumNodesPerShard()
for _, acct := range genesis.HarmonyAccounts[startIdx:endIdx] { for _, acct := range genesis.HarmonyAccounts[startIdx:endIdx] {
pub := &bls2.PublicKey{} pub := &bls2.PublicKey{}
if err := pub.DeserializeHexStr(acct.BlsPublicKey); err != nil { if err := pub.DeserializeHexStr(acct.BlsPublicKey); err != nil {
@ -128,6 +129,7 @@ func setUpTXGen() *node.Node {
return txGen return txGen
} }
func main() { func main() {
flag.Var(&utils.BootNodes, "bootnodes", "a list of bootnode multiaddress") flag.Var(&utils.BootNodes, "bootnodes", "a list of bootnode multiaddress")
flag.Parse() flag.Parse()

@ -24,11 +24,17 @@ rpc = s3.t.hmny.io:14555
[local] [local]
bootnode = /ip4/127.0.0.1/tcp/19876/p2p/Qmc1V6W7BwX8Ugb42Ti8RnXF1rY5PF7nnZ6bKBryCgi6cv bootnode = /ip4/127.0.0.1/tcp/19876/p2p/Qmc1V6W7BwX8Ugb42Ti8RnXF1rY5PF7nnZ6bKBryCgi6cv
shards = 1 shards = 2
[local.shard0.rpc] [local.shard0.rpc]
rpc = 127.0.0.1:14555 rpc = 127.0.0.1:14555
rpc = 127.0.0.1:14557
rpc = 127.0.0.1:14559
[local.shard1.rpc]
rpc = 127.0.0.1:14556 rpc = 127.0.0.1:14556
rpc = 127.0.0.1:14558
rpc = 127.0.0.1:14560
[devnet] [devnet]
bootnode = /ip4/100.26.90.187/tcp/9871/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv bootnode = /ip4/100.26.90.187/tcp/9871/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv

@ -794,13 +794,18 @@ func FetchBalance(address common.Address) []*AccountState {
defer wg.Done() defer wg.Done()
balance := big.NewInt(0) balance := big.NewInt(0)
var nonce uint64 var nonce uint64
result[uint32(shardID)] = &AccountState{balance, 0} result[uint32(shardID)] = &AccountState{balance, 0}
LOOP: var wgShard sync.WaitGroup
for j := 0; j < len(walletProfile.RPCServer[shardID]); j++ { wgShard.Add(len(walletProfile.RPCServer[shardID]))
var mutexAccountState = &sync.Mutex{}
for rpcServerID := 0; rpcServerID < len(walletProfile.RPCServer[shardID]); rpcServerID++ {
go func(rpcServerID int) {
for retry := 0; retry < rpcRetry; retry++ { for retry := 0; retry < rpcRetry; retry++ {
server := walletProfile.RPCServer[shardID][j]
server := walletProfile.RPCServer[shardID][rpcServerID]
client, err := clientService.NewClient(server.IP, server.Port) client, err := clientService.NewClient(server.IP, server.Port)
if err != nil { if err != nil {
continue continue
@ -816,13 +821,20 @@ func FetchBalance(address common.Address) []*AccountState {
log.Debug("FetchBalance", "response", response) log.Debug("FetchBalance", "response", response)
respBalance := big.NewInt(0) respBalance := big.NewInt(0)
respBalance.SetBytes(response.Balance) respBalance.SetBytes(response.Balance)
mutexAccountState.Lock()
if balance.Cmp(respBalance) < 0 { if balance.Cmp(respBalance) < 0 {
balance.SetBytes(response.Balance) balance.SetBytes(response.Balance)
nonce = response.Nonce nonce = response.Nonce
} }
break LOOP mutexAccountState.Unlock()
break
} }
wgShard.Done()
}(rpcServerID)
} }
wgShard.Wait()
result[shardID] = &AccountState{balance, nonce} result[shardID] = &AccountState{balance, nonce}
}(shardID) }(shardID)
} }

@ -4,6 +4,7 @@ import (
"encoding/hex" "encoding/hex"
"flag" "flag"
"fmt" "fmt"
"math/big"
"math/rand" "math/rand"
"os" "os"
"path" "path"
@ -14,17 +15,16 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/harmony-one/bls/ffi/go/bls" "github.com/harmony-one/bls/ffi/go/bls"
"github.com/harmony-one/harmony/accounts/keystore"
"github.com/harmony-one/harmony/consensus" "github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/internal/blsgen" "github.com/harmony-one/harmony/internal/blsgen"
"github.com/harmony-one/harmony/internal/common" "github.com/harmony-one/harmony/internal/common"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node" nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
shardingconfig "github.com/harmony-one/harmony/internal/configs/sharding"
"github.com/harmony-one/harmony/internal/ctxerror" "github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/genesis" "github.com/harmony-one/harmony/internal/genesis"
hmykey "github.com/harmony-one/harmony/internal/keystore" hmykey "github.com/harmony-one/harmony/internal/keystore"
"github.com/harmony-one/harmony/internal/memprofiling" "github.com/harmony-one/harmony/internal/memprofiling"
"github.com/harmony-one/harmony/internal/profiler"
"github.com/harmony-one/harmony/internal/shardchain" "github.com/harmony-one/harmony/internal/shardchain"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/node" "github.com/harmony-one/harmony/node"
@ -86,7 +86,7 @@ var (
// isExplorer indicates this node is a node to serve explorer // isExplorer indicates this node is a node to serve explorer
isExplorer = flag.Bool("is_explorer", false, "true means this node is a node to serve explorer") isExplorer = flag.Bool("is_explorer", false, "true means this node is a node to serve explorer")
// networkType indicates the type of the network // networkType indicates the type of the network
networkType = flag.String("network_type", "mainnet", "type of the network: mainnet, testnet, devnet...") networkType = flag.String("network_type", "mainnet", "type of the network: mainnet, testnet, devnet, localnet")
// blockPeriod indicates the how long the leader waits to propose a new block. // blockPeriod indicates the how long the leader waits to propose a new block.
blockPeriod = flag.Int("block_period", 8, "how long in second the leader waits to propose a new block.") blockPeriod = flag.Int("block_period", 8, "how long in second the leader waits to propose a new block.")
// isNewNode indicates this node is a new node // isNewNode indicates this node is a new node
@ -100,21 +100,17 @@ var (
blsPass = flag.String("blspass", "", "The file containing passphrase to decrypt the encrypted bls file.") blsPass = flag.String("blspass", "", "The file containing passphrase to decrypt the encrypted bls file.")
blsPassphrase string blsPassphrase string
// Sharding configuration parameters for devnet
devnetNumShards = flag.Uint("dn_num_shards", 2, "number of shards for -network_type=devnet (default: 2)")
devnetShardSize = flag.Int("dn_shard_size", 10, "number of nodes per shard for -network_type=devnet (default 10)")
devnetHarmonySize = flag.Int("dn_hmy_size", -1, "number of Harmony-operated nodes per shard for -network_type=devnet; negative (default) means equal to -dn_shard_size")
// logConn logs incoming/outgoing connections // logConn logs incoming/outgoing connections
logConn = flag.Bool("log_conn", false, "log incoming/outgoing connections") logConn = flag.Bool("log_conn", false, "log incoming/outgoing connections")
keystoreDir = flag.String("keystore", hmykey.DefaultKeyStoreDir, "The default keystore directory") keystoreDir = flag.String("keystore", hmykey.DefaultKeyStoreDir, "The default keystore directory")
// -nopass is false by default. The keyfile must be encrypted. genesisAccount = &genesis.DeployAccount{}
hmyNoPass = flag.Bool("nopass", false, "No passphrase for the key (testing only)")
// -pass takes on "pass:password", "env:var", "file:pathname",
// "fd:number", or "stdin" form.
// See “PASS PHRASE ARGUMENTS” section of openssl(1) for details.
hmyPass = flag.String("pass", "", "how to get passphrase for the key")
ks *keystore.KeyStore
genesisAccount *genesis.DeployAccount
accountIndex int
// logging verbosity // logging verbosity
verbosity = flag.Int("verbosity", 5, "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail (default: 5)") verbosity = flag.Int("verbosity", 5, "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail (default: 5)")
@ -123,18 +119,10 @@ var (
dbDir = flag.String("db_dir", "", "blockchain database directory") dbDir = flag.String("db_dir", "", "blockchain database directory")
// Disable view change. // Disable view change.
disableViewChange = flag.Bool("disable_view_change", false, disableViewChange = flag.Bool("disable_view_change", false, "Do not propose view change (testing only)")
"Do not propose view change (testing only)")
) )
func initSetup() { func initSetup() {
flag.Var(&utils.BootNodes, "bootnodes", "a list of bootnode multiaddress (delimited by ,)")
flag.Parse()
nodeconfig.SetVersion(fmt.Sprintf("Harmony (C) 2019. %v, version %v-%v (%v %v)", path.Base(os.Args[0]), version, commit, builtBy, builtAt))
if *versionFlag {
printVersion()
}
// maybe request passphrase for bls key. // maybe request passphrase for bls key.
passphraseForBls() passphraseForBls()
@ -142,10 +130,8 @@ func initSetup() {
// Configure log parameters // Configure log parameters
utils.SetLogContext(*port, *ip) utils.SetLogContext(*port, *ip)
utils.SetLogVerbosity(log.Lvl(*verbosity)) utils.SetLogVerbosity(log.Lvl(*verbosity))
filename := fmt.Sprintf("%v/validator-%v-%v.log", *logFolder, *ip, *port) utils.AddLogFile(fmt.Sprintf("%v/validator-%v-%v.log", *logFolder, *ip, *port), *logMaxSize)
if err := utils.AddLogFile(filename, *logMaxSize); err != nil {
panic(err)
}
if *onlyLogTps { if *onlyLogTps {
matchFilterHandler := log.MatchFilterHandler("msg", "TPS Report", utils.GetLogInstance().GetHandler()) matchFilterHandler := log.MatchFilterHandler("msg", "TPS Report", utils.GetLogInstance().GetHandler())
utils.GetLogInstance().SetHandler(matchFilterHandler) utils.GetLogInstance().SetHandler(matchFilterHandler)
@ -175,13 +161,6 @@ func initSetup() {
utils.BootNodes = bootNodeAddrs utils.BootNodes = bootNodeAddrs
} }
if !*isExplorer { // Explorer node doesn't need the following setup
setupECDSAKeys()
} else {
genesisAccount = &genesis.DeployAccount{}
genesisAccount.ShardID = uint32(*shardID)
}
// Set up manual call for garbage collection. // Set up manual call for garbage collection.
if *enableGC { if *enableGC {
memprofiling.MaybeCallGCPeriodically() memprofiling.MaybeCallGCPeriodically()
@ -202,34 +181,40 @@ func passphraseForBls() {
blsPassphrase = passphrase blsPassphrase = passphrase
} }
func setupECDSAKeys() { func setupGenesisAccount() (isLeader bool) {
ks = hmykey.GetHmyKeyStore() genesisShardingConfig := core.ShardingSchedule.InstanceForEpoch(big.NewInt(core.GenesisEpoch))
pubKey := setUpConsensusKey(nodeconfig.GetDefaultConfig())
// TODO: lc try to enable multiple staking accounts per node reshardingEpoch := genesisShardingConfig.ReshardingEpoch()
accountIndex, genesisAccount = setUpConsensusKeyAndReturnIndex(nodeconfig.GetDefaultConfig()) if reshardingEpoch != nil && len(reshardingEpoch) > 0 {
for _, epoch := range reshardingEpoch {
config := core.ShardingSchedule.InstanceForEpoch(epoch)
isLeader, genesisAccount = config.FindAccount(pubKey.SerializeToHexStr())
if genesisAccount != nil {
break
}
}
} else {
isLeader, genesisAccount = genesisShardingConfig.FindAccount(pubKey.SerializeToHexStr())
}
genesisAccount.ShardID = uint32(accountIndex % core.GenesisShardNum) if genesisAccount == nil {
fmt.Printf("cannot find your BLS key in the genesis/FN tables: %s\n", pubKey.SerializeToHexStr())
os.Exit(100)
}
fmt.Printf("My Genesis Account: %v\n", *genesisAccount) fmt.Printf("My Genesis Account: %v\n", *genesisAccount)
// Set up manual call for garbage collection. return isLeader
if *enableGC {
memprofiling.MaybeCallGCPeriodically()
}
} }
func setUpConsensusKeyAndReturnIndex(nodeConfig *nodeconfig.ConfigType) (int, *genesis.DeployAccount) { func setUpConsensusKey(nodeConfig *nodeconfig.ConfigType) *bls.PublicKey {
consensusPriKey, err := blsgen.LoadBlsKeyWithPassPhrase(*blsKeyFile, blsPassphrase) consensusPriKey, err := blsgen.LoadBlsKeyWithPassPhrase(*blsKeyFile, blsPassphrase)
if err != nil { if err != nil {
fmt.Printf("error when loading bls key, err :%v\n", err) fmt.Printf("error when loading bls key, err :%v\n", err)
os.Exit(100) os.Exit(100)
} }
pubKey := consensusPriKey.GetPublicKey() pubKey := consensusPriKey.GetPublicKey()
index, acc := genesis.IsBlsPublicKeyIndex(pubKey.SerializeToHexStr())
if index < 0 {
fmt.Printf("cannot find your BLS key in the genesis/FN tables: %s\n", pubKey.SerializeToHexStr())
os.Exit(100)
}
// Consensus keys are the BLS12-381 keys used to sign consensus messages // Consensus keys are the BLS12-381 keys used to sign consensus messages
nodeConfig.ConsensusPriKey, nodeConfig.ConsensusPubKey = consensusPriKey, consensusPriKey.GetPublicKey() nodeConfig.ConsensusPriKey, nodeConfig.ConsensusPubKey = consensusPriKey, consensusPriKey.GetPublicKey()
@ -237,43 +222,17 @@ func setUpConsensusKeyAndReturnIndex(nodeConfig *nodeconfig.ConfigType) (int, *g
fmt.Println("error to get consensus keys.") fmt.Println("error to get consensus keys.")
os.Exit(100) os.Exit(100)
} }
return index, acc return pubKey
} }
func createGlobalConfig() *nodeconfig.ConfigType { func createGlobalConfig(isLeader bool) *nodeconfig.ConfigType {
var err error var err error
var myShardID uint32
nodeConfig := nodeconfig.GetDefaultConfig()
nodeConfig := nodeconfig.GetShardConfig(genesisAccount.ShardID)
if !*isExplorer { if !*isExplorer {
// Specified Shard ID override calculated Shard ID
if *shardID >= 0 {
utils.GetLogInstance().Info("ShardID Override", "original", genesisAccount.ShardID, "override", *shardID)
genesisAccount.ShardID = uint32(*shardID)
}
if !*isNewNode {
nodeConfig = nodeconfig.GetShardConfig(uint32(genesisAccount.ShardID))
} else {
myShardID = 0 // This should be default value as new node doesn't belong to any shard.
if *shardID >= 0 {
utils.GetLogInstance().Info("ShardID Override", "original", myShardID, "override", *shardID)
myShardID = uint32(*shardID)
nodeConfig = nodeconfig.GetShardConfig(myShardID)
}
}
// Set up consensus keys. // Set up consensus keys.
setUpConsensusKeyAndReturnIndex(nodeConfig) setUpConsensusKey(nodeConfig)
// P2p private key is used for secure message transfer between p2p nodes.
nodeConfig.P2pPriKey, _, err = utils.LoadKeyFromFile(*keyFile)
if err != nil {
panic(err)
}
} else { } else {
nodeConfig = nodeconfig.GetShardConfig(uint32(*shardID))
nodeConfig.ConsensusPriKey = &bls.SecretKey{} // set dummy bls key for consensus object nodeConfig.ConsensusPriKey = &bls.SecretKey{} // set dummy bls key for consensus object
} }
@ -283,27 +242,28 @@ func createGlobalConfig() *nodeconfig.ConfigType {
nodeConfig.SetNetworkType(nodeconfig.Mainnet) nodeConfig.SetNetworkType(nodeconfig.Mainnet)
case nodeconfig.Testnet: case nodeconfig.Testnet:
nodeConfig.SetNetworkType(nodeconfig.Testnet) nodeConfig.SetNetworkType(nodeconfig.Testnet)
case nodeconfig.Localnet:
nodeConfig.SetNetworkType(nodeconfig.Localnet)
case nodeconfig.Devnet: case nodeconfig.Devnet:
nodeConfig.SetNetworkType(nodeconfig.Devnet) nodeConfig.SetNetworkType(nodeconfig.Devnet)
default: default:
panic(fmt.Sprintf("invalid network type: %s", *networkType)) panic(fmt.Sprintf("invalid network type: %s", *networkType))
} }
// P2p private key is used for secure message transfer between p2p nodes.
nodeConfig.P2pPriKey, _, err = utils.LoadKeyFromFile(*keyFile)
if err != nil {
panic(err)
}
nodeConfig.SelfPeer = p2p.Peer{IP: *ip, Port: *port, ConsensusPubKey: nodeConfig.ConsensusPubKey} nodeConfig.SelfPeer = p2p.Peer{IP: *ip, Port: *port, ConsensusPubKey: nodeConfig.ConsensusPubKey}
if accountIndex < core.GenesisShardNum && !*isExplorer && !*leaderOverride { // The first node in a shard is the leader at genesis if isLeader && !*isExplorer && !*leaderOverride { // The first node in a shard is the leader at genesis
nodeConfig.Leader = nodeConfig.SelfPeer nodeConfig.Leader = nodeConfig.SelfPeer
nodeConfig.StringRole = "leader" nodeConfig.StringRole = "leader"
} else { } else {
nodeConfig.StringRole = "validator" nodeConfig.StringRole = "validator"
} }
// P2p private key is used for secure message transfer between p2p nodes.
nodeConfig.P2pPriKey, _, err = utils.LoadKeyFromFile(*keyFile)
if err != nil {
panic(err)
}
nodeConfig.Host, err = p2pimpl.NewHost(&nodeConfig.SelfPeer, nodeConfig.P2pPriKey) nodeConfig.Host, err = p2pimpl.NewHost(&nodeConfig.SelfPeer, nodeConfig.P2pPriKey)
if *logConn && nodeConfig.GetNetworkType() != nodeconfig.Mainnet { if *logConn && nodeConfig.GetNetworkType() != nodeconfig.Mainnet {
nodeConfig.Host.GetP2PHost().Network().Notify(utils.NewConnLogger(utils.GetLogInstance())) nodeConfig.Host.GetP2PHost().Network().Notify(utils.NewConnLogger(utils.GetLogInstance()))
@ -340,6 +300,11 @@ func setUpConsensusAndNode(nodeConfig *nodeconfig.ConfigType) *node.Node {
} }
currentConsensus.SetCommitDelay(commitDelay) currentConsensus.SetCommitDelay(commitDelay)
currentConsensus.MinPeers = *minPeers currentConsensus.MinPeers = *minPeers
if *isNewNode {
currentConsensus.SetMode(consensus.Listening)
}
if *disableViewChange { if *disableViewChange {
currentConsensus.DisableViewChangeForTestingOnly() currentConsensus.DisableViewChangeForTestingOnly()
} }
@ -368,49 +333,37 @@ func setUpConsensusAndNode(nodeConfig *nodeconfig.ConfigType) *node.Node {
nodeConfig.SetIsBeacon(true) nodeConfig.SetIsBeacon(true)
if nodeConfig.StringRole == "leader" { if nodeConfig.StringRole == "leader" {
currentNode.NodeConfig.SetRole(nodeconfig.BeaconLeader) currentNode.NodeConfig.SetRole(nodeconfig.BeaconLeader)
currentNode.NodeConfig.SetIsLeader(true)
} else { } else {
currentNode.NodeConfig.SetRole(nodeconfig.BeaconValidator) currentNode.NodeConfig.SetRole(nodeconfig.BeaconValidator)
currentNode.NodeConfig.SetIsLeader(false)
} }
currentNode.NodeConfig.SetShardGroupID(p2p.GroupIDBeacon) currentNode.NodeConfig.SetShardGroupID(p2p.GroupIDBeacon)
currentNode.NodeConfig.SetClientGroupID(p2p.GroupIDBeaconClient) currentNode.NodeConfig.SetClientGroupID(p2p.GroupIDBeaconClient)
} else { } else {
if nodeConfig.StringRole == "leader" { if nodeConfig.StringRole == "leader" {
currentNode.NodeConfig.SetRole(nodeconfig.ShardLeader) currentNode.NodeConfig.SetRole(nodeconfig.ShardLeader)
currentNode.NodeConfig.SetIsLeader(true)
} else { } else {
currentNode.NodeConfig.SetRole(nodeconfig.ShardValidator) currentNode.NodeConfig.SetRole(nodeconfig.ShardValidator)
currentNode.NodeConfig.SetIsLeader(false)
} }
currentNode.NodeConfig.SetShardGroupID(p2p.NewGroupIDByShardID(p2p.ShardID(nodeConfig.ShardID))) currentNode.NodeConfig.SetShardGroupID(p2p.NewGroupIDByShardID(p2p.ShardID(nodeConfig.ShardID)))
currentNode.NodeConfig.SetClientGroupID(p2p.NewClientGroupIDByShardID(p2p.ShardID(nodeConfig.ShardID))) currentNode.NodeConfig.SetClientGroupID(p2p.NewClientGroupIDByShardID(p2p.ShardID(nodeConfig.ShardID)))
} }
} else { } else {
if *isNewNode { if *isNewNode {
currentNode.NodeConfig.SetRole(nodeconfig.NewNode) if nodeConfig.ShardID == 0 { // Beacon chain
nodeConfig.SetIsBeacon(true)
currentNode.NodeConfig.SetRole(nodeconfig.BeaconValidator)
currentNode.NodeConfig.SetShardGroupID(p2p.GroupIDBeacon)
currentNode.NodeConfig.SetClientGroupID(p2p.GroupIDBeaconClient) currentNode.NodeConfig.SetClientGroupID(p2p.GroupIDBeaconClient)
currentNode.NodeConfig.SetBeaconGroupID(p2p.GroupIDBeacon) } else {
if *shardID > -1 {
// I will be a validator (single leader is fixed for now)
currentNode.NodeConfig.SetRole(nodeconfig.ShardValidator) currentNode.NodeConfig.SetRole(nodeconfig.ShardValidator)
currentNode.NodeConfig.SetIsLeader(false)
currentNode.NodeConfig.SetShardGroupID(p2p.NewGroupIDByShardID(p2p.ShardID(nodeConfig.ShardID))) currentNode.NodeConfig.SetShardGroupID(p2p.NewGroupIDByShardID(p2p.ShardID(nodeConfig.ShardID)))
currentNode.NodeConfig.SetClientGroupID(p2p.NewClientGroupIDByShardID(p2p.ShardID(nodeConfig.ShardID))) currentNode.NodeConfig.SetClientGroupID(p2p.NewClientGroupIDByShardID(p2p.ShardID(nodeConfig.ShardID)))
} }
} else if *isExplorer { }
if *isExplorer {
currentNode.NodeConfig.SetRole(nodeconfig.ExplorerNode) currentNode.NodeConfig.SetRole(nodeconfig.ExplorerNode)
currentNode.NodeConfig.SetIsLeader(false)
currentNode.NodeConfig.SetShardGroupID(p2p.NewGroupIDByShardID(p2p.ShardID(*shardID))) currentNode.NodeConfig.SetShardGroupID(p2p.NewGroupIDByShardID(p2p.ShardID(*shardID)))
currentNode.NodeConfig.SetClientGroupID(p2p.NewClientGroupIDByShardID(p2p.ShardID(*shardID))) currentNode.NodeConfig.SetClientGroupID(p2p.NewClientGroupIDByShardID(p2p.ShardID(*shardID)))
} else if nodeConfig.StringRole == "leader" {
currentNode.NodeConfig.SetRole(nodeconfig.ShardLeader)
currentNode.NodeConfig.SetIsLeader(true)
currentNode.NodeConfig.SetShardGroupID(p2p.GroupIDUnknown)
} else {
currentNode.NodeConfig.SetRole(nodeconfig.ShardValidator)
currentNode.NodeConfig.SetIsLeader(false)
currentNode.NodeConfig.SetShardGroupID(p2p.GroupIDUnknown)
} }
} }
currentNode.NodeConfig.ConsensusPubKey = nodeConfig.ConsensusPubKey currentNode.NodeConfig.ConsensusPubKey = nodeConfig.ConsensusPubKey
@ -453,18 +406,55 @@ func setUpConsensusAndNode(nodeConfig *nodeconfig.ConfigType) *node.Node {
} }
func main() { func main() {
flag.Var(&utils.BootNodes, "bootnodes", "a list of bootnode multiaddress (delimited by ,)")
flag.Parse()
nodeconfig.SetVersion(fmt.Sprintf("Harmony (C) 2019. %v, version %v-%v (%v %v)", path.Base(os.Args[0]), version, commit, builtBy, builtAt))
if *versionFlag {
printVersion()
}
switch *networkType {
case nodeconfig.Mainnet:
core.ShardingSchedule = shardingconfig.MainnetSchedule
case nodeconfig.Testnet:
core.ShardingSchedule = shardingconfig.TestnetSchedule
case nodeconfig.Localnet:
core.ShardingSchedule = shardingconfig.LocalnetSchedule
case nodeconfig.Devnet:
if *devnetHarmonySize < 0 {
*devnetHarmonySize = *devnetShardSize
}
// TODO (leo): use a passing list of accounts here
devnetConfig, err := shardingconfig.NewInstance(
uint32(*devnetNumShards), *devnetShardSize, *devnetHarmonySize, genesis.HarmonyAccounts, genesis.FoundationalNodeAccounts, nil)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "invalid devnet sharding config: %s",
err)
os.Exit(1)
}
core.ShardingSchedule = shardingconfig.NewFixedSchedule(devnetConfig)
}
initSetup() initSetup()
nodeConfig := createGlobalConfig()
// Start Profiler for leader if profile argument is on // Set up manual call for garbage collection.
if nodeConfig.StringRole == "leader" && (*profile || *metricsReportURL != "") { if *enableGC {
prof := profiler.GetProfiler() memprofiling.MaybeCallGCPeriodically()
prof.Config(nodeConfig.ShardID, *metricsReportURL)
if *profile {
prof.Start()
} }
isLeader := false
if !*isExplorer { // Explorer node doesn't need the following setup
isLeader = setupGenesisAccount()
} }
if *shardID >= 0 {
utils.GetLogInstance().Info("ShardID Override", "original", genesisAccount.ShardID, "override", *shardID)
genesisAccount.ShardID = uint32(*shardID)
}
nodeConfig := createGlobalConfig(isLeader)
currentNode := setUpConsensusAndNode(nodeConfig) currentNode := setUpConsensusAndNode(nodeConfig)
//if consensus.ShardID != 0 { //if consensus.ShardID != 0 {
// go currentNode.SupportBeaconSyncing() // go currentNode.SupportBeaconSyncing()
//} //}

@ -0,0 +1,65 @@
package main
import (
"context"
"fmt"
"math/big"
"time"
"github.com/ethereum/go-ethereum/rpc"
"github.com/harmony-one/harmony/hmyclient"
)
// newRPCClient creates a rpc client with specified node URL.
func newRPCClient(url string) *rpc.Client {
client, err := rpc.Dial(url)
if err != nil {
fmt.Errorf("Failed to connect to Ethereum node: %v", err)
}
return client
}
func main() {
ctx, cancelFn := context.WithTimeout(context.Background(), 10*time.Second)
defer cancelFn()
rpcClient := newRPCClient("http://localhost:9500")
if rpcClient == nil {
fmt.Errorf("Failed to create rpc client")
}
client := hmyclient.NewClient(rpcClient)
if client == nil {
fmt.Errorf("Failed to create client")
}
networkID, err := client.NetworkID(ctx)
if err != nil {
fmt.Errorf("Failed to get net_version: %v", err)
}
fmt.Printf("net_version: %v\n", networkID)
blockNumber, err := client.BlockNumber(ctx)
if err != nil {
fmt.Errorf("Failed to get hmy_blockNumber: %v", err)
}
fmt.Printf("hmy_blockNumber: %v\n", blockNumber)
block, err := client.BlockByNumber(ctx, new(big.Int).SetUint64(uint64(blockNumber)))
if err != nil {
fmt.Errorf("Failed to get hmy_getBlockByNumber %v: %v", blockNumber, err)
}
fmt.Printf("hmy_getBlockByNumber(%v):\n", blockNumber)
fmt.Printf("number: %v\n", block.Number().Text(16))
fmt.Printf("hash: %v\n", block.Hash().String())
fmt.Printf("parentHash: %v\n", block.ParentHash().String())
fmt.Printf("timestamp: %v\n", block.Time().Text(16))
fmt.Printf("size: %v\n", block.Size())
fmt.Printf("miner: %v\n", block.Coinbase().String())
fmt.Printf("receiptsRoot: %v\n", block.ReceiptHash().String())
fmt.Printf("transactionsRoot: %v\n", block.TxHash().String())
block, err = client.BlockByNumber(ctx, nil)
if err != nil {
fmt.Errorf("Failed to get block: %v", err)
}
fmt.Printf("hmy_getBlockByNumber(latest): %v", block)
}

@ -11,7 +11,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log"
"github.com/harmony-one/bls/ffi/go/bls" "github.com/harmony-one/bls/ffi/go/bls"
"github.com/harmony-one/harmony/common/denominations" "github.com/harmony-one/harmony/common/denominations"
@ -21,7 +20,6 @@ import (
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
bls_cosi "github.com/harmony-one/harmony/crypto/bls" bls_cosi "github.com/harmony-one/harmony/crypto/bls"
common2 "github.com/harmony-one/harmony/internal/common" common2 "github.com/harmony-one/harmony/internal/common"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
"github.com/harmony-one/harmony/internal/ctxerror" "github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/genesis" "github.com/harmony-one/harmony/internal/genesis"
"github.com/harmony-one/harmony/internal/memprofiling" "github.com/harmony-one/harmony/internal/memprofiling"
@ -99,6 +97,9 @@ type Consensus struct {
// the publickey of leader // the publickey of leader
LeaderPubKey *bls.PublicKey LeaderPubKey *bls.PublicKey
// number of publickeys of previous epoch
numPrevPubKeys int
viewID uint64 viewID uint64
// Blockhash - 32 byte // Blockhash - 32 byte
@ -203,6 +204,11 @@ func (consensus *Consensus) Quorum() int {
return len(consensus.PublicKeys)*2/3 + 1 return len(consensus.PublicKeys)*2/3 + 1
} }
// PreviousQuorum returns the quorum size of previous epoch
func (consensus *Consensus) PreviousQuorum() int {
return consensus.numPrevPubKeys*2/3 + 1
}
// RewardThreshold returns the threshold to stop accepting commit messages // RewardThreshold returns the threshold to stop accepting commit messages
// when leader receives enough signatures for block reward // when leader receives enough signatures for block reward
func (consensus *Consensus) RewardThreshold() int { func (consensus *Consensus) RewardThreshold() int {
@ -236,13 +242,6 @@ func New(host p2p.Host, ShardID uint32, leader p2p.Peer, blsPriKey *bls.SecretKe
// pbft timeout // pbft timeout
consensus.consensusTimeout = createTimeout() consensus.consensusTimeout = createTimeout()
selfPeer := host.GetSelfPeer()
if leader.Port == selfPeer.Port && leader.IP == selfPeer.IP {
nodeconfig.GetDefaultConfig().SetIsLeader(true)
} else {
nodeconfig.GetDefaultConfig().SetIsLeader(false)
}
consensus.prepareSigs = map[string]*bls.Sign{} consensus.prepareSigs = map[string]*bls.Sign{}
consensus.commitSigs = map[string]*bls.Sign{} consensus.commitSigs = map[string]*bls.Sign{}
@ -253,9 +252,9 @@ func New(host p2p.Host, ShardID uint32, leader p2p.Peer, blsPriKey *bls.SecretKe
if blsPriKey != nil { if blsPriKey != nil {
consensus.priKey = blsPriKey consensus.priKey = blsPriKey
consensus.PubKey = blsPriKey.GetPublicKey() consensus.PubKey = blsPriKey.GetPublicKey()
utils.GetLogInstance().Info("my pubkey is", "pubkey", consensus.PubKey.SerializeToHexStr()) utils.Logger().Info().Str("publicKey", consensus.PubKey.SerializeToHexStr()).Msg("My Public Key")
} else { } else {
utils.GetLogInstance().Error("the bls key is nil") utils.Logger().Error().Msg("the bls key is nil")
return nil, fmt.Errorf("nil bls key, aborting") return nil, fmt.Errorf("nil bls key, aborting")
} }
@ -283,8 +282,6 @@ func New(host p2p.Host, ShardID uint32, leader p2p.Peer, blsPriKey *bls.SecretKe
func accumulateRewards( func accumulateRewards(
bc consensus_engine.ChainReader, state *state.DB, header *types.Header, bc consensus_engine.ChainReader, state *state.DB, header *types.Header,
) error { ) error {
logger := header.Logger(utils.GetLogInstance())
getLogger := func() log.Logger { return utils.WithCallerSkip(logger, 1) }
blockNum := header.Number.Uint64() blockNum := header.Number.Uint64()
if blockNum == 0 { if blockNum == 0 {
// Epoch block has no parent to reward. // Epoch block has no parent to reward.
@ -356,10 +353,11 @@ func accumulateRewards(
totalAmount = new(big.Int).Add(totalAmount, diff) totalAmount = new(big.Int).Add(totalAmount, diff)
last = cur last = cur
} }
getLogger().Debug("【Block Reward] Successfully paid out block reward", header.Logger(utils.Logger()).Debug().
"NumAccounts", numAccounts, Str("NumAccounts", numAccounts.String()).
"TotalAmount", totalAmount, Str("TotalAmount", totalAmount.String()).
"Signers", signers) Strs("Signers", signers).
Msg("[Block Reward] Successfully paid out block reward")
return nil return nil
} }
@ -379,9 +377,7 @@ func (f *GenesisStakeInfoFinder) FindStakeInfoByNodeKey(
) []*structs.StakeInfo { ) []*structs.StakeInfo {
var pk types.BlsPublicKey var pk types.BlsPublicKey
if err := pk.FromLibBLSPublicKey(key); err != nil { if err := pk.FromLibBLSPublicKey(key); err != nil {
ctxerror.Log15(utils.GetLogInstance().Warn, ctxerror.New( utils.Logger().Warn().Err(err).Msg("cannot convert BLS public key")
"cannot convert BLS public key",
).WithCause(err))
return nil return nil
} }
l, _ := f.byNodeKey[pk] l, _ := f.byNodeKey[pk]

@ -25,7 +25,7 @@ func (consensus *Consensus) constructAnnounceMessage() []byte {
marshaledMessage, err := consensus.signAndMarshalConsensusMessage(message) marshaledMessage, err := consensus.signAndMarshalConsensusMessage(message)
if err != nil { if err != nil {
utils.GetLogInstance().Error("Failed to sign and marshal the Announce message", "error", err) utils.Logger().Error().Err(err).Msg("Failed to sign and marshal the Announce message")
} }
return proto.ConstructConsensusMessage(marshaledMessage) return proto.ConstructConsensusMessage(marshaledMessage)
} }
@ -60,7 +60,7 @@ func (consensus *Consensus) constructPreparedMessage() ([]byte, *bls.Sign) {
marshaledMessage, err := consensus.signAndMarshalConsensusMessage(message) marshaledMessage, err := consensus.signAndMarshalConsensusMessage(message)
if err != nil { if err != nil {
utils.GetLogInstance().Error("Failed to sign and marshal the Prepared message", "error", err) utils.Logger().Error().Err(err).Msg("Failed to sign and marshal the Prepared message")
} }
return proto.ConstructConsensusMessage(marshaledMessage), aggSig return proto.ConstructConsensusMessage(marshaledMessage), aggSig
} }
@ -93,7 +93,7 @@ func (consensus *Consensus) constructCommittedMessage() ([]byte, *bls.Sign) {
marshaledMessage, err := consensus.signAndMarshalConsensusMessage(message) marshaledMessage, err := consensus.signAndMarshalConsensusMessage(message)
if err != nil { if err != nil {
utils.GetLogInstance().Error("Failed to sign and marshal the Committed message", "error", err) utils.Logger().Error().Err(err).Msg("Failed to sign and marshal the Committed message")
} }
return proto.ConstructConsensusMessage(marshaledMessage), aggSig return proto.ConstructConsensusMessage(marshaledMessage), aggSig
} }

@ -10,11 +10,11 @@ import (
"github.com/harmony-one/harmony/crypto/hash" "github.com/harmony-one/harmony/crypto/hash"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
protobuf "github.com/golang/protobuf/proto" protobuf "github.com/golang/protobuf/proto"
"github.com/harmony-one/bls/ffi/go/bls" "github.com/harmony-one/bls/ffi/go/bls"
libp2p_peer "github.com/libp2p/go-libp2p-peer" libp2p_peer "github.com/libp2p/go-libp2p-peer"
"github.com/rs/zerolog"
"golang.org/x/crypto/sha3" "golang.org/x/crypto/sha3"
msg_pb "github.com/harmony-one/harmony/api/proto/message" msg_pb "github.com/harmony-one/harmony/api/proto/message"
@ -22,7 +22,6 @@ import (
"github.com/harmony-one/harmony/core/state" "github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
bls_cosi "github.com/harmony-one/harmony/crypto/bls" bls_cosi "github.com/harmony-one/harmony/crypto/bls"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
"github.com/harmony-one/harmony/internal/ctxerror" "github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/profiler" "github.com/harmony-one/harmony/internal/profiler"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
@ -73,7 +72,7 @@ func (consensus *Consensus) SealHash(header *types.Header) (hash common.Hash) {
header.Time, header.Time,
header.Extra, header.Extra,
}); err != nil { }); err != nil {
ctxerror.Warn(utils.GetLogger(), err, "rlp.Encode failed") utils.Logger().Warn().Err(err).Msg("rlp.Encode failed")
} }
hasher.Sum(hash[:0]) hasher.Sum(hash[:0])
return hash return hash
@ -109,7 +108,9 @@ func (consensus *Consensus) populateMessageFields(request *msg_pb.ConsensusReque
// sender address // sender address
request.SenderPubkey = consensus.PubKey.Serialize() request.SenderPubkey = consensus.PubKey.Serialize()
consensus.getLogger().Debug("[populateMessageFields]", "SenderKey", consensus.PubKey.SerializeToHexStr()) consensus.getLogger().Debug().
Str("senderKey", consensus.PubKey.SerializeToHexStr()).
Msg("[populateMessageFields]")
} }
// Signs the consensus message and returns the marshaled message. // Signs the consensus message and returns the marshaled message.
@ -158,12 +159,11 @@ func (consensus *Consensus) GetViewID() uint64 {
// DebugPrintPublicKeys print all the PublicKeys in string format in Consensus // DebugPrintPublicKeys print all the PublicKeys in string format in Consensus
func (consensus *Consensus) DebugPrintPublicKeys() { func (consensus *Consensus) DebugPrintPublicKeys() {
var keys []string
for _, k := range consensus.PublicKeys { for _, k := range consensus.PublicKeys {
str := fmt.Sprintf("%s", hex.EncodeToString(k.Serialize())) keys = append(keys, hex.EncodeToString(k.Serialize()))
utils.GetLogInstance().Debug("pk:", "string", str)
} }
utils.Logger().Debug().Strs("PublicKeys", keys).Int("count", len(keys)).Msgf("Debug Public Keys")
utils.GetLogInstance().Debug("PublicKeys:", "#", len(consensus.PublicKeys))
} }
// UpdatePublicKeys updates the PublicKeys variable, protected by a mutex // UpdatePublicKeys updates the PublicKeys variable, protected by a mutex
@ -171,16 +171,16 @@ func (consensus *Consensus) UpdatePublicKeys(pubKeys []*bls.PublicKey) int {
consensus.pubKeyLock.Lock() consensus.pubKeyLock.Lock()
consensus.PublicKeys = append(pubKeys[:0:0], pubKeys...) consensus.PublicKeys = append(pubKeys[:0:0], pubKeys...)
consensus.CommitteePublicKeys = map[string]bool{} consensus.CommitteePublicKeys = map[string]bool{}
utils.GetLogInstance().Info("My Committee") utils.Logger().Info().Msg("My Committee updated")
for _, pubKey := range consensus.PublicKeys { for i, pubKey := range consensus.PublicKeys {
utils.GetLogInstance().Info("Member", "BlsPubKey", pubKey.SerializeToHexStr()) utils.Logger().Info().Int("index", i).Str("BlsPubKey", pubKey.SerializeToHexStr()).Msg("Member")
consensus.CommitteePublicKeys[pubKey.SerializeToHexStr()] = true consensus.CommitteePublicKeys[pubKey.SerializeToHexStr()] = true
} }
// TODO: use pubkey to identify leader rather than p2p.Peer. // TODO: use pubkey to identify leader rather than p2p.Peer.
consensus.leader = p2p.Peer{ConsensusPubKey: pubKeys[0]} consensus.leader = p2p.Peer{ConsensusPubKey: pubKeys[0]}
consensus.LeaderPubKey = pubKeys[0] consensus.LeaderPubKey = pubKeys[0]
utils.GetLogInstance().Info("My Leader", "info", consensus.LeaderPubKey.SerializeToHexStr()) utils.Logger().Info().Str("info", consensus.LeaderPubKey.SerializeToHexStr()).Msg("My Leader")
consensus.pubKeyLock.Unlock() consensus.pubKeyLock.Unlock()
// reset states after update public keys // reset states after update public keys
consensus.ResetState() consensus.ResetState()
@ -267,8 +267,7 @@ func (consensus *Consensus) VerifySeal(chain consensus_engine.ChainReader, heade
if err != nil { if err != nil {
return ctxerror.New("[VerifySeal] Unable to deserialize the LastCommitSignature and LastCommitBitmap in Block Header").WithCause(err) return ctxerror.New("[VerifySeal] Unable to deserialize the LastCommitSignature and LastCommitBitmap in Block Header").WithCause(err)
} }
// TODO: use the quorum of last block instead if count := utils.CountOneBits(mask.Bitmap); count < consensus.PreviousQuorum() {
if count := utils.CountOneBits(mask.Bitmap); count < consensus.Quorum() {
return ctxerror.New("[VerifySeal] Not enough signature in LastCommitSignature from Block Header", "need", consensus.Quorum(), "got", count) return ctxerror.New("[VerifySeal] Not enough signature in LastCommitSignature from Block Header", "need", consensus.Quorum(), "got", count)
} }
@ -377,7 +376,9 @@ func (consensus *Consensus) GetViewIDSigsArray() []*bls.Sign {
// ResetState resets the state of the consensus // ResetState resets the state of the consensus
func (consensus *Consensus) ResetState() { func (consensus *Consensus) ResetState() {
consensus.getLogger().Debug("[ResetState] Resetting consensus state", "Phase", consensus.phase) consensus.getLogger().Debug().
Str("Phase", consensus.phase.String()).
Msg("[ResetState] Resetting consensus state")
consensus.switchPhase(Announce, true) consensus.switchPhase(Announce, true)
consensus.blockHash = [32]byte{} consensus.blockHash = [32]byte{}
consensus.blockHeader = []byte{} consensus.blockHeader = []byte{}
@ -396,7 +397,7 @@ func (consensus *Consensus) ResetState() {
// Returns a string representation of this consensus // Returns a string representation of this consensus
func (consensus *Consensus) String() string { func (consensus *Consensus) String() string {
var duty string var duty string
if nodeconfig.GetDefaultConfig().IsLeader() { if consensus.IsLeader() {
duty = "LDR" // leader duty = "LDR" // leader
} else { } else {
duty = "VLD" // validator duty = "VLD" // validator
@ -472,6 +473,16 @@ func (consensus *Consensus) SetViewID(height uint64) {
consensus.viewID = height consensus.viewID = height
} }
// SetMode sets the mode of consensus
func (consensus *Consensus) SetMode(mode Mode) {
consensus.mode.SetMode(mode)
}
// Mode returns the mode of consensus
func (consensus *Consensus) Mode() Mode {
return consensus.mode.Mode()
}
// RegisterPRndChannel registers the channel for receiving randomness preimage from DRG protocol // RegisterPRndChannel registers the channel for receiving randomness preimage from DRG protocol
func (consensus *Consensus) RegisterPRndChannel(pRndChannel chan []byte) { func (consensus *Consensus) RegisterPRndChannel(pRndChannel chan []byte) {
consensus.PRndChannel = pRndChannel consensus.PRndChannel = pRndChannel
@ -494,8 +505,14 @@ func (consensus *Consensus) checkViewID(msg *PbftMessage) error {
consensus.LeaderPubKey = msg.SenderPubkey consensus.LeaderPubKey = msg.SenderPubkey
consensus.ignoreViewIDCheck = false consensus.ignoreViewIDCheck = false
consensus.consensusTimeout[timeoutConsensus].Start() consensus.consensusTimeout[timeoutConsensus].Start()
utils.GetLogger().Debug("viewID and leaderKey override", "viewID", consensus.viewID, "leaderKey", consensus.LeaderPubKey.SerializeToHexStr()[:20]) utils.Logger().Debug().
utils.GetLogger().Debug("Start consensus timer", "viewID", consensus.viewID, "block", consensus.blockNum) Uint64("viewID", consensus.viewID).
Str("leaderKey", consensus.LeaderPubKey.SerializeToHexStr()[:20]).
Msg("viewID and leaderKey override")
utils.Logger().Debug().
Uint64("viewID", consensus.viewID).
Uint64("block", consensus.blockNum).
Msg("Start consensus timer")
return nil return nil
} else if msg.ViewID > consensus.viewID { } else if msg.ViewID > consensus.viewID {
return consensus_engine.ErrViewIDNotMatch return consensus_engine.ErrViewIDNotMatch
@ -543,11 +560,11 @@ func readSignatureBitmapByPublicKeys(recvPayload []byte, publicKeys []*bls.Publi
} }
mask, err := bls_cosi.NewMask(publicKeys, nil) mask, err := bls_cosi.NewMask(publicKeys, nil)
if err != nil { if err != nil {
utils.GetLogInstance().Warn("onNewView unable to setup mask for prepared message", "err", err) utils.Logger().Warn().Err(err).Msg("onNewView unable to setup mask for prepared message")
return nil, nil, errors.New("unable to setup mask from payload") return nil, nil, errors.New("unable to setup mask from payload")
} }
if err := mask.SetMask(bitmap); err != nil { if err := mask.SetMask(bitmap); err != nil {
ctxerror.Warn(utils.GetLogger(), err, "mask.SetMask failed") utils.Logger().Warn().Err(err).Msg("mask.SetMask failed")
} }
return &aggSig, mask, nil return &aggSig, mask, nil
} }
@ -557,13 +574,14 @@ func (consensus *Consensus) reportMetrics(block types.Block) {
timeElapsed := endTime.Sub(startTime) timeElapsed := endTime.Sub(startTime)
numOfTxs := len(block.Transactions()) numOfTxs := len(block.Transactions())
tps := float64(numOfTxs) / timeElapsed.Seconds() tps := float64(numOfTxs) / timeElapsed.Seconds()
utils.GetLogInstance().Info("TPS Report", utils.Logger().Info().
"numOfTXs", numOfTxs, Int("numOfTXs", numOfTxs).
"startTime", startTime, Time("startTime", startTime).
"endTime", endTime, Time("endTime", endTime).
"timeElapsed", timeElapsed, Dur("timeElapsed", endTime.Sub(startTime)).
"TPS", tps, Float64("TPS", tps).
"consensus", consensus) Interface("consensus", consensus).
Msg("TPS Report")
// Post metrics // Post metrics
profiler := profiler.GetProfiler() profiler := profiler.GetProfiler()
@ -588,20 +606,15 @@ func (consensus *Consensus) reportMetrics(block types.Block) {
profiler.LogMetrics(metrics) profiler.LogMetrics(metrics)
} }
// logger returns a sub-logger with consensus contexts added.
func (consensus *Consensus) logger(logger log.Logger) log.Logger {
return logger.New(
"myBlock", consensus.blockNum,
"myViewID", consensus.viewID,
"phase", consensus.phase,
"mode", consensus.mode.Mode(),
)
}
// getLogger returns logger for consensus contexts added // getLogger returns logger for consensus contexts added
func (consensus *Consensus) getLogger() log.Logger { func (consensus *Consensus) getLogger() *zerolog.Logger {
logger := consensus.logger(utils.GetLogInstance()) logger := utils.Logger().With().
return logger Uint64("myBlock", consensus.blockNum).
Uint64("myViewID", consensus.viewID).
Interface("phase", consensus.phase).
Str("mode", consensus.mode.Mode().String()).
Logger()
return &logger
} }
// retrieve corresponding blsPublicKey from Coinbase Address // retrieve corresponding blsPublicKey from Coinbase Address
@ -644,10 +657,12 @@ func (consensus *Consensus) updateConsensusInformation() {
consensus.SetViewID(header.ViewID.Uint64() + 1) consensus.SetViewID(header.ViewID.Uint64() + 1)
leaderPubKey, err := consensus.getLeaderPubKeyFromCoinbase(header) leaderPubKey, err := consensus.getLeaderPubKeyFromCoinbase(header)
if err != nil || leaderPubKey == nil { if err != nil || leaderPubKey == nil {
consensus.getLogger().Debug("[SYNC] Unable to get leaderPubKey from coinbase", "error", err) consensus.getLogger().Debug().Err(err).Msg("[SYNC] Unable to get leaderPubKey from coinbase")
consensus.ignoreViewIDCheck = true consensus.ignoreViewIDCheck = true
} else { } else {
consensus.getLogger().Debug("[SYNC] Most Recent LeaderPubKey Updated Based on BlockChain", "leaderPubKey", leaderPubKey.SerializeToHexStr()) consensus.getLogger().Debug().
Str("leaderPubKey", leaderPubKey.SerializeToHexStr()).
Msg("[SYNC] Most Recent LeaderPubKey Updated Based on BlockChain")
consensus.LeaderPubKey = leaderPubKey consensus.LeaderPubKey = leaderPubKey
} }
} }
@ -681,3 +696,12 @@ func (consensus *Consensus) RecoveryBlockNumber(shardID uint32) uint64 {
} }
return 0 return 0
} }
// IsLeader check if the node is a leader or not by comparing the public key of
// the node with the leader public key
func (consensus *Consensus) IsLeader() bool {
if consensus.PubKey != nil && consensus.LeaderPubKey != nil {
return consensus.PubKey.IsEqual(consensus.LeaderPubKey)
}
return false
}

@ -4,7 +4,6 @@ import (
"testing" "testing"
"github.com/harmony-one/harmony/crypto/bls" "github.com/harmony-one/harmony/crypto/bls"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/p2p" "github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/p2p/p2pimpl" "github.com/harmony-one/harmony/p2p/p2pimpl"
@ -25,10 +24,6 @@ func TestNew(test *testing.T) {
test.Errorf("Consensus Id is initialized to the wrong value: %d", consensus.viewID) test.Errorf("Consensus Id is initialized to the wrong value: %d", consensus.viewID)
} }
if !nodeconfig.GetDefaultConfig().IsLeader() {
test.Error("Consensus should belong to a leader")
}
if consensus.ReadySignal == nil { if consensus.ReadySignal == nil {
test.Error("Consensus ReadySignal should be initialized") test.Error("Consensus ReadySignal should be initialized")
} }

File diff suppressed because it is too large Load Diff

@ -27,7 +27,7 @@ func (consensus *Consensus) constructPrepareMessage() []byte {
marshaledMessage, err := consensus.signAndMarshalConsensusMessage(message) marshaledMessage, err := consensus.signAndMarshalConsensusMessage(message)
if err != nil { if err != nil {
utils.GetLogInstance().Error("Failed to sign and marshal the Prepare message", "error", err) utils.Logger().Error().Err(err).Msg("Failed to sign and marshal the Prepare message")
} }
return proto.ConstructConsensusMessage(marshaledMessage) return proto.ConstructConsensusMessage(marshaledMessage)
} }
@ -53,7 +53,7 @@ func (consensus *Consensus) constructCommitMessage(commitPayload []byte) []byte
marshaledMessage, err := consensus.signAndMarshalConsensusMessage(message) marshaledMessage, err := consensus.signAndMarshalConsensusMessage(message)
if err != nil { if err != nil {
utils.GetLogInstance().Error("Failed to sign and marshal the Commit message", "error", err) utils.Logger().Error().Err(err).Msg("Failed to sign and marshal the Commit message")
} }
return proto.ConstructConsensusMessage(marshaledMessage) return proto.ConstructConsensusMessage(marshaledMessage)
} }

@ -42,13 +42,16 @@ func (consensus *Consensus) constructViewChangeMessage() []byte {
vcMsg.Payload = append(msgToSign[:0:0], msgToSign...) vcMsg.Payload = append(msgToSign[:0:0], msgToSign...)
} }
consensus.getLogger().Debug("[constructViewChangeMessage]", "m1Payload", vcMsg.Payload, "pubKey", consensus.PubKey.SerializeToHexStr()) consensus.getLogger().Debug().
Bytes("m1Payload", vcMsg.Payload).
Str("pubKey", consensus.PubKey.SerializeToHexStr()).
Msg("[constructViewChangeMessage]")
sign := consensus.priKey.SignHash(msgToSign) sign := consensus.priKey.SignHash(msgToSign)
if sign != nil { if sign != nil {
vcMsg.ViewchangeSig = sign.Serialize() vcMsg.ViewchangeSig = sign.Serialize()
} else { } else {
utils.GetLogger().Error("unable to serialize m1/m2 view change message signature") utils.Logger().Error().Msg("unable to serialize m1/m2 view change message signature")
} }
viewIDBytes := make([]byte, 8) viewIDBytes := make([]byte, 8)
@ -57,12 +60,12 @@ func (consensus *Consensus) constructViewChangeMessage() []byte {
if sign1 != nil { if sign1 != nil {
vcMsg.ViewidSig = sign1.Serialize() vcMsg.ViewidSig = sign1.Serialize()
} else { } else {
utils.GetLogger().Error("unable to serialize viewID signature") utils.Logger().Error().Msg("unable to serialize viewID signature")
} }
marshaledMessage, err := consensus.signAndMarshalConsensusMessage(message) marshaledMessage, err := consensus.signAndMarshalConsensusMessage(message)
if err != nil { if err != nil {
utils.GetLogInstance().Error("[constructViewChangeMessage] failed to sign and marshal the viewchange message", "error", err) utils.Logger().Error().Err(err).Msg("[constructViewChangeMessage] failed to sign and marshal the viewchange message")
} }
return proto.ConstructConsensusMessage(marshaledMessage) return proto.ConstructConsensusMessage(marshaledMessage)
} }
@ -86,7 +89,7 @@ func (consensus *Consensus) constructNewViewMessage() []byte {
vcMsg.Payload = consensus.m1Payload vcMsg.Payload = consensus.m1Payload
sig2arr := consensus.GetNilSigsArray() sig2arr := consensus.GetNilSigsArray()
consensus.getLogger().Debug("[constructNewViewMessage] M2 (NIL) type signatures", "len", len(sig2arr)) consensus.getLogger().Debug().Int("len", len(sig2arr)).Msg("[constructNewViewMessage] M2 (NIL) type signatures")
if len(sig2arr) > 0 { if len(sig2arr) > 0 {
m2Sig := bls_cosi.AggregateSig(sig2arr) m2Sig := bls_cosi.AggregateSig(sig2arr)
vcMsg.M2Aggsigs = m2Sig.Serialize() vcMsg.M2Aggsigs = m2Sig.Serialize()
@ -94,7 +97,7 @@ func (consensus *Consensus) constructNewViewMessage() []byte {
} }
sig3arr := consensus.GetViewIDSigsArray() sig3arr := consensus.GetViewIDSigsArray()
consensus.getLogger().Debug("[constructNewViewMessage] M3 (ViewID) type signatures", "len", len(sig3arr)) consensus.getLogger().Debug().Int("len", len(sig3arr)).Msg("[constructNewViewMessage] M3 (ViewID) type signatures")
// even we check here for safty, m3 type signatures must >= 2f+1 // even we check here for safty, m3 type signatures must >= 2f+1
if len(sig3arr) > 0 { if len(sig3arr) > 0 {
m3Sig := bls_cosi.AggregateSig(sig3arr) m3Sig := bls_cosi.AggregateSig(sig3arr)
@ -104,7 +107,7 @@ func (consensus *Consensus) constructNewViewMessage() []byte {
marshaledMessage, err := consensus.signAndMarshalConsensusMessage(message) marshaledMessage, err := consensus.signAndMarshalConsensusMessage(message)
if err != nil { if err != nil {
utils.GetLogInstance().Error("[constructNewViewMessage] failed to sign and marshal the new view message", "error", err) utils.Logger().Error().Err(err).Msg("[constructNewViewMessage] failed to sign and marshal the new view message")
} }
return proto.ConstructConsensusMessage(marshaledMessage) return proto.ConstructConsensusMessage(marshaledMessage)
} }

@ -257,26 +257,26 @@ func ParseViewChangeMessage(msg *msg_pb.Message) (*PbftMessage, error) {
pubKey, err := bls_cosi.BytesToBlsPublicKey(vcMsg.SenderPubkey) pubKey, err := bls_cosi.BytesToBlsPublicKey(vcMsg.SenderPubkey)
if err != nil { if err != nil {
utils.GetLogInstance().Warn("ParseViewChangeMessage failed to parse senderpubkey", "error", err) utils.Logger().Warn().Err(err).Msg("ParseViewChangeMessage failed to parse senderpubkey")
return nil, err return nil, err
} }
leaderKey, err := bls_cosi.BytesToBlsPublicKey(vcMsg.LeaderPubkey) leaderKey, err := bls_cosi.BytesToBlsPublicKey(vcMsg.LeaderPubkey)
if err != nil { if err != nil {
utils.GetLogInstance().Warn("ParseViewChangeMessage failed to parse leaderpubkey", "error", err) utils.Logger().Warn().Err(err).Msg("ParseViewChangeMessage failed to parse leaderpubkey")
return nil, err return nil, err
} }
vcSig := bls.Sign{} vcSig := bls.Sign{}
err = vcSig.Deserialize(vcMsg.ViewchangeSig) err = vcSig.Deserialize(vcMsg.ViewchangeSig)
if err != nil { if err != nil {
utils.GetLogInstance().Warn("ParseViewChangeMessage failed to deserialize the viewchange signature", "error", err) utils.Logger().Warn().Err(err).Msg("ParseViewChangeMessage failed to deserialize the viewchange signature")
return nil, err return nil, err
} }
vcSig1 := bls.Sign{} vcSig1 := bls.Sign{}
err = vcSig1.Deserialize(vcMsg.ViewidSig) err = vcSig1.Deserialize(vcMsg.ViewidSig)
if err != nil { if err != nil {
utils.GetLogInstance().Warn("ParseViewChangeMessage failed to deserialize the viewid signature", "error", err) utils.Logger().Warn().Err(err).Msg("ParseViewChangeMessage failed to deserialize the viewid signature")
return nil, err return nil, err
} }
pbftMsg.SenderPubkey = pubKey pbftMsg.SenderPubkey = pubKey
@ -303,7 +303,7 @@ func (consensus *Consensus) ParseNewViewMessage(msg *msg_pb.Message) (*PbftMessa
pubKey, err := bls_cosi.BytesToBlsPublicKey(vcMsg.SenderPubkey) pubKey, err := bls_cosi.BytesToBlsPublicKey(vcMsg.SenderPubkey)
if err != nil { if err != nil {
utils.GetLogInstance().Warn("ParseViewChangeMessage failed to parse senderpubkey", "error", err) utils.Logger().Warn().Err(err).Msg("ParseViewChangeMessage failed to parse senderpubkey")
return nil, err return nil, err
} }
pbftMsg.SenderPubkey = pubKey pbftMsg.SenderPubkey = pubKey
@ -312,12 +312,12 @@ func (consensus *Consensus) ParseNewViewMessage(msg *msg_pb.Message) (*PbftMessa
m3Sig := bls.Sign{} m3Sig := bls.Sign{}
err = m3Sig.Deserialize(vcMsg.M3Aggsigs) err = m3Sig.Deserialize(vcMsg.M3Aggsigs)
if err != nil { if err != nil {
utils.GetLogInstance().Warn("ParseViewChangeMessage failed to deserialize the multi signature for M3 viewID signature", "error", err) utils.Logger().Warn().Err(err).Msg("ParseViewChangeMessage failed to deserialize the multi signature for M3 viewID signature")
return nil, err return nil, err
} }
m3mask, err := bls_cosi.NewMask(consensus.PublicKeys, nil) m3mask, err := bls_cosi.NewMask(consensus.PublicKeys, nil)
if err != nil { if err != nil {
utils.GetLogInstance().Warn("ParseViewChangeMessage failed to create mask for multi signature", "error", err) utils.Logger().Warn().Err(err).Msg("ParseViewChangeMessage failed to create mask for multi signature")
return nil, err return nil, err
} }
m3mask.SetMask(vcMsg.M3Bitmap) m3mask.SetMask(vcMsg.M3Bitmap)
@ -329,12 +329,12 @@ func (consensus *Consensus) ParseNewViewMessage(msg *msg_pb.Message) (*PbftMessa
m2Sig := bls.Sign{} m2Sig := bls.Sign{}
err = m2Sig.Deserialize(vcMsg.M2Aggsigs) err = m2Sig.Deserialize(vcMsg.M2Aggsigs)
if err != nil { if err != nil {
utils.GetLogInstance().Warn("ParseViewChangeMessage failed to deserialize the multi signature for M2 aggregated signature", "error", err) utils.Logger().Warn().Err(err).Msg("ParseViewChangeMessage failed to deserialize the multi signature for M2 aggregated signature")
return nil, err return nil, err
} }
m2mask, err := bls_cosi.NewMask(consensus.PublicKeys, nil) m2mask, err := bls_cosi.NewMask(consensus.PublicKeys, nil)
if err != nil { if err != nil {
utils.GetLogInstance().Warn("ParseViewChangeMessage failed to create mask for multi signature", "error", err) utils.Logger().Warn().Err(err).Msg("ParseViewChangeMessage failed to create mask for multi signature")
return nil, err return nil, err
} }
m2mask.SetMask(vcMsg.M2Bitmap) m2mask.SetMask(vcMsg.M2Bitmap)

@ -33,6 +33,7 @@ const (
Normal Mode = iota Normal Mode = iota
ViewChanging ViewChanging
Syncing Syncing
Listening
) )
// PbftMode contains mode and viewID of viewchanging // PbftMode contains mode and viewID of viewchanging
@ -55,6 +56,8 @@ func (mode Mode) String() string {
return "ViewChanging" return "ViewChanging"
} else if mode == Syncing { } else if mode == Syncing {
return "Sycning" return "Sycning"
} else if mode == Listening {
return "Listening"
} }
return "Unknown" return "Unknown"
} }
@ -120,7 +123,9 @@ func (consensus *Consensus) switchPhase(desirePhase PbftPhase, override bool) {
func (consensus *Consensus) GetNextLeaderKey() *bls.PublicKey { func (consensus *Consensus) GetNextLeaderKey() *bls.PublicKey {
idx := consensus.getIndexOfPubKey(consensus.LeaderPubKey) idx := consensus.getIndexOfPubKey(consensus.LeaderPubKey)
if idx == -1 { if idx == -1 {
consensus.getLogger().Warn("GetNextLeaderKey: currentLeaderKey not found", "key", consensus.LeaderPubKey.SerializeToHexStr()) consensus.getLogger().Warn().
Str("key", consensus.LeaderPubKey.SerializeToHexStr()).
Msg("GetNextLeaderKey: currentLeaderKey not found")
} }
idx = (idx + 1) % len(consensus.PublicKeys) idx = (idx + 1) % len(consensus.PublicKeys)
return consensus.PublicKeys[idx] return consensus.PublicKeys[idx]
@ -137,7 +142,9 @@ func (consensus *Consensus) getIndexOfPubKey(pubKey *bls.PublicKey) int {
// ResetViewChangeState reset the state for viewchange // ResetViewChangeState reset the state for viewchange
func (consensus *Consensus) ResetViewChangeState() { func (consensus *Consensus) ResetViewChangeState() {
consensus.getLogger().Debug("[ResetViewChangeState] Resetting view change state", "Phase", consensus.phase) consensus.getLogger().Debug().
Str("Phase", consensus.phase.String()).
Msg("[ResetViewChangeState] Resetting view change state")
consensus.mode.SetMode(Normal) consensus.mode.SetMode(Normal)
bhpBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, nil) bhpBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, nil)
nilBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, nil) nilBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, nil)
@ -173,20 +180,26 @@ func (consensus *Consensus) startViewChange(viewID uint64) {
diff := viewID - consensus.viewID diff := viewID - consensus.viewID
duration := time.Duration(int64(diff) * int64(viewChangeDuration)) duration := time.Duration(int64(diff) * int64(viewChangeDuration))
consensus.getLogger().Info("[startViewChange]", "ViewChangingID", viewID, "timeoutDuration", duration, "NextLeader", consensus.LeaderPubKey.SerializeToHexStr()) consensus.getLogger().Info().
Uint64("ViewChangingID", viewID).
Dur("timeoutDuration", duration).
Str("NextLeader", consensus.LeaderPubKey.SerializeToHexStr()).
Msg("[startViewChange]")
msgToSend := consensus.constructViewChangeMessage() msgToSend := consensus.constructViewChangeMessage()
consensus.host.SendMessageToGroups([]p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)) consensus.host.SendMessageToGroups([]p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend))
consensus.consensusTimeout[timeoutViewChange].SetDuration(duration) consensus.consensusTimeout[timeoutViewChange].SetDuration(duration)
consensus.consensusTimeout[timeoutViewChange].Start() consensus.consensusTimeout[timeoutViewChange].Start()
consensus.getLogger().Debug("[startViewChange] start view change timer", "ViewChangingID", consensus.mode.ViewID()) consensus.getLogger().Debug().
Uint64("ViewChangingID", consensus.mode.ViewID()).
Msg("[startViewChange] start view change timer")
} }
func (consensus *Consensus) onViewChange(msg *msg_pb.Message) { func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
recvMsg, err := ParseViewChangeMessage(msg) recvMsg, err := ParseViewChangeMessage(msg)
if err != nil { if err != nil {
consensus.getLogger().Warn("[onViewChange] Unable To Parse Viewchange Message") consensus.getLogger().Warn().Msg("[onViewChange] Unable To Parse Viewchange Message")
return return
} }
newLeaderKey := recvMsg.LeaderPubkey newLeaderKey := recvMsg.LeaderPubkey
@ -195,33 +208,44 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
} }
if len(consensus.viewIDSigs) >= consensus.Quorum() { if len(consensus.viewIDSigs) >= consensus.Quorum() {
consensus.getLogger().Debug("[onViewChange] Received Enough View Change Messages", "have", len(consensus.viewIDSigs), "need", consensus.Quorum(), "validatorPubKey", recvMsg.SenderPubkey.SerializeToHexStr()) consensus.getLogger().Debug().
Int("have", len(consensus.viewIDSigs)).
Int("need", consensus.Quorum()).
Str("validatorPubKey", recvMsg.SenderPubkey.SerializeToHexStr()).
Msg("[onViewChange] Received Enough View Change Messages")
return return
} }
senderKey, err := consensus.verifyViewChangeSenderKey(msg) senderKey, err := consensus.verifyViewChangeSenderKey(msg)
if err != nil { if err != nil {
consensus.getLogger().Debug("[onViewChange] VerifySenderKey Failed", "error", err) consensus.getLogger().Debug().Err(err).Msg("[onViewChange] VerifySenderKey Failed")
return return
} }
// TODO: if difference is only one, new leader can still propose the same committed block to avoid another view change // TODO: if difference is only one, new leader can still propose the same committed block to avoid another view change
if consensus.blockNum > recvMsg.BlockNum { if consensus.blockNum > recvMsg.BlockNum {
consensus.getLogger().Debug("[onViewChange] Message BlockNum Is Low", "MsgBlockNum", recvMsg.BlockNum) consensus.getLogger().Debug().
Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[onViewChange] Message BlockNum Is Low")
return return
} }
if consensus.blockNum < recvMsg.BlockNum { if consensus.blockNum < recvMsg.BlockNum {
consensus.getLogger().Warn("[onViewChange] New Leader Has Lower Blocknum", "MsgBlockNum", recvMsg.BlockNum) consensus.getLogger().Warn().
Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[onViewChange] New Leader Has Lower Blocknum")
return return
} }
if consensus.mode.Mode() == ViewChanging && consensus.mode.ViewID() > recvMsg.ViewID { if consensus.mode.Mode() == ViewChanging && consensus.mode.ViewID() > recvMsg.ViewID {
consensus.getLogger().Warn("[onViewChange] ViewChanging ID Is Low", "MyViewChangingID", consensus.mode.ViewID(), "MsgViewChangingID", recvMsg.ViewID) consensus.getLogger().Warn().
Uint64("MyViewChangingID", consensus.mode.ViewID()).
Uint64("MsgViewChangingID", recvMsg.ViewID).
Msg("[onViewChange] ViewChanging ID Is Low")
return return
} }
if err = verifyMessageSig(senderKey, msg); err != nil { if err = verifyMessageSig(senderKey, msg); err != nil {
consensus.getLogger().Debug("[onViewChange] Failed To Verify Sender's Signature", "error", err) consensus.getLogger().Debug().Err(err).Msg("[onViewChange] Failed To Verify Sender's Signature")
return return
} }
@ -236,11 +260,11 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
preparedMsgs := consensus.PbftLog.GetMessagesByTypeSeq(msg_pb.MessageType_PREPARED, recvMsg.BlockNum) preparedMsgs := consensus.PbftLog.GetMessagesByTypeSeq(msg_pb.MessageType_PREPARED, recvMsg.BlockNum)
preparedMsg := consensus.PbftLog.FindMessageByMaxViewID(preparedMsgs) preparedMsg := consensus.PbftLog.FindMessageByMaxViewID(preparedMsgs)
if preparedMsg == nil { if preparedMsg == nil {
consensus.getLogger().Debug("[onViewChange] add my M2(NIL) type messaage") consensus.getLogger().Debug().Msg("[onViewChange] add my M2(NIL) type messaage")
consensus.nilSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(NIL) consensus.nilSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(NIL)
consensus.nilBitmap.SetKey(consensus.PubKey, true) consensus.nilBitmap.SetKey(consensus.PubKey, true)
} else { } else {
consensus.getLogger().Debug("[onViewChange] add my M1 type messaage") consensus.getLogger().Debug().Msg("[onViewChange] add my M1 type messaage")
msgToSign := append(preparedMsg.BlockHash[:], preparedMsg.Payload...) msgToSign := append(preparedMsg.BlockHash[:], preparedMsg.Payload...)
consensus.bhpSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(msgToSign) consensus.bhpSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(msgToSign)
consensus.bhpBitmap.SetKey(consensus.PubKey, true) consensus.bhpBitmap.SetKey(consensus.PubKey, true)
@ -259,50 +283,63 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
if len(recvMsg.Payload) == 0 { if len(recvMsg.Payload) == 0 {
_, ok := consensus.nilSigs[senderKey.SerializeToHexStr()] _, ok := consensus.nilSigs[senderKey.SerializeToHexStr()]
if ok { if ok {
consensus.getLogger().Debug("[onViewChange] Already Received M2 message from validator", "validatorPubKey", senderKey.SerializeToHexStr()) consensus.getLogger().Debug().
Str("validatorPubKey", senderKey.SerializeToHexStr()).
Msg("[onViewChange] Already Received M2 message from validator")
return return
} }
if !recvMsg.ViewchangeSig.VerifyHash(senderKey, NIL) { if !recvMsg.ViewchangeSig.VerifyHash(senderKey, NIL) {
consensus.getLogger().Warn("[onViewChange] Failed To Verify Signature For M2 Type Viewchange Message") consensus.getLogger().Warn().Msg("[onViewChange] Failed To Verify Signature For M2 Type Viewchange Message")
return return
} }
consensus.getLogger().Debug("[onViewChange] Add M2 (NIL) type message", "validatorPubKey", senderKey.SerializeToHexStr()) consensus.getLogger().Debug().
Str("validatorPubKey", senderKey.SerializeToHexStr()).
Msg("[onViewChange] Add M2 (NIL) type message")
consensus.nilSigs[senderKey.SerializeToHexStr()] = recvMsg.ViewchangeSig consensus.nilSigs[senderKey.SerializeToHexStr()] = recvMsg.ViewchangeSig
consensus.nilBitmap.SetKey(recvMsg.SenderPubkey, true) // Set the bitmap indicating that this validator signed. consensus.nilBitmap.SetKey(recvMsg.SenderPubkey, true) // Set the bitmap indicating that this validator signed.
} else { // m1 type message } else { // m1 type message
_, ok := consensus.bhpSigs[senderKey.SerializeToHexStr()] _, ok := consensus.bhpSigs[senderKey.SerializeToHexStr()]
if ok { if ok {
consensus.getLogger().Debug("[onViewChange] Already Received M1 Message From the Validator", "validatorPubKey", senderKey.SerializeToHexStr()) consensus.getLogger().Debug().
Str("validatorPubKey", senderKey.SerializeToHexStr()).
Msg("[onViewChange] Already Received M1 Message From the Validator")
return return
} }
if !recvMsg.ViewchangeSig.VerifyHash(recvMsg.SenderPubkey, recvMsg.Payload) { if !recvMsg.ViewchangeSig.VerifyHash(recvMsg.SenderPubkey, recvMsg.Payload) {
consensus.getLogger().Warn("[onViewChange] Failed to Verify Signature for M1 Type Viewchange Message") consensus.getLogger().Warn().Msg("[onViewChange] Failed to Verify Signature for M1 Type Viewchange Message")
return return
} }
// first time receive m1 type message, need verify validity of prepared message // first time receive m1 type message, need verify validity of prepared message
if len(consensus.m1Payload) == 0 || !bytes.Equal(consensus.m1Payload, recvMsg.Payload) { if len(consensus.m1Payload) == 0 || !bytes.Equal(consensus.m1Payload, recvMsg.Payload) {
if len(recvMsg.Payload) <= 32 { if len(recvMsg.Payload) <= 32 {
consensus.getLogger().Debug("[onViewChange] M1 RecvMsg Payload Not Enough Length", "len", len(recvMsg.Payload)) consensus.getLogger().Debug().
Int("len", len(recvMsg.Payload)).
Msg("[onViewChange] M1 RecvMsg Payload Not Enough Length")
return return
} }
blockHash := recvMsg.Payload[:32] blockHash := recvMsg.Payload[:32]
aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 32) aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 32)
if err != nil { if err != nil {
consensus.getLogger().Error("[onViewChange] M1 RecvMsg Payload Read Error", "error", err) consensus.getLogger().Error().Err(err).Msg("[onViewChange] M1 RecvMsg Payload Read Error")
return return
} }
// check has 2f+1 signature in m1 type message // check has 2f+1 signature in m1 type message
if count := utils.CountOneBits(mask.Bitmap); count < consensus.Quorum() { if count := utils.CountOneBits(mask.Bitmap); count < consensus.Quorum() {
consensus.getLogger().Debug("[onViewChange] M1 Payload Not Have Enough Signature", "need", consensus.Quorum(), "have", count) consensus.getLogger().Debug().
Int("need", consensus.Quorum()).
Int("have", count).
Msg("[onViewChange] M1 Payload Not Have Enough Signature")
return return
} }
// Verify the multi-sig for prepare phase // Verify the multi-sig for prepare phase
if !aggSig.VerifyHash(mask.AggregatePublic, blockHash[:]) { if !aggSig.VerifyHash(mask.AggregatePublic, blockHash[:]) {
consensus.getLogger().Warn("[onViewChange] failed to verify multi signature for m1 prepared payload", "blockHash", blockHash) consensus.getLogger().Warn().
Bytes("blockHash", blockHash).
Msg("[onViewChange] failed to verify multi signature for m1 prepared payload")
return return
} }
@ -316,11 +353,13 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
preparedMsg.Payload = make([]byte, len(recvMsg.Payload)-32) preparedMsg.Payload = make([]byte, len(recvMsg.Payload)-32)
copy(preparedMsg.Payload[:], recvMsg.Payload[32:]) copy(preparedMsg.Payload[:], recvMsg.Payload[32:])
preparedMsg.SenderPubkey = consensus.PubKey preparedMsg.SenderPubkey = consensus.PubKey
consensus.getLogger().Info("[onViewChange] New Leader Prepared Message Added") consensus.getLogger().Info().Msg("[onViewChange] New Leader Prepared Message Added")
consensus.PbftLog.AddMessage(&preparedMsg) consensus.PbftLog.AddMessage(&preparedMsg)
} }
} }
consensus.getLogger().Debug("[onViewChange] Add M1 (prepared) type message", "validatorPubKey", senderKey.SerializeToHexStr()) consensus.getLogger().Debug().
Str("validatorPubKey", senderKey.SerializeToHexStr()).
Msg("[onViewChange] Add M1 (prepared) type message")
consensus.bhpSigs[senderKey.SerializeToHexStr()] = recvMsg.ViewchangeSig consensus.bhpSigs[senderKey.SerializeToHexStr()] = recvMsg.ViewchangeSig
consensus.bhpBitmap.SetKey(recvMsg.SenderPubkey, true) // Set the bitmap indicating that this validator signed. consensus.bhpBitmap.SetKey(recvMsg.SenderPubkey, true) // Set the bitmap indicating that this validator signed.
} }
@ -328,19 +367,28 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
// check and add viewID (m3 type) message signature // check and add viewID (m3 type) message signature
_, ok := consensus.viewIDSigs[senderKey.SerializeToHexStr()] _, ok := consensus.viewIDSigs[senderKey.SerializeToHexStr()]
if ok { if ok {
consensus.getLogger().Debug("[onViewChange] Already Received M3(ViewID) message from the validator", "senderKey.SerializeToHexStr()", senderKey.SerializeToHexStr()) consensus.getLogger().Debug().
Str("validatorPubKey", senderKey.SerializeToHexStr()).
Msg("[onViewChange] Already Received M3(ViewID) message from the validator")
return return
} }
viewIDHash := make([]byte, 8) viewIDHash := make([]byte, 8)
binary.LittleEndian.PutUint64(viewIDHash, recvMsg.ViewID) binary.LittleEndian.PutUint64(viewIDHash, recvMsg.ViewID)
if !recvMsg.ViewidSig.VerifyHash(recvMsg.SenderPubkey, viewIDHash) { if !recvMsg.ViewidSig.VerifyHash(recvMsg.SenderPubkey, viewIDHash) {
consensus.getLogger().Warn("[onViewChange] Failed to Verify M3 Message Signature", "MsgViewID", recvMsg.ViewID) consensus.getLogger().Warn().
Uint64("MsgViewID", recvMsg.ViewID).
Msg("[onViewChange] Failed to Verify M3 Message Signature")
return return
} }
consensus.getLogger().Debug("[onViewChange] Add M3 (ViewID) type message", "validatorPubKey", senderKey.SerializeToHexStr()) consensus.getLogger().Debug().
Str("validatorPubKey", senderKey.SerializeToHexStr()).
Msg("[onViewChange] Add M3 (ViewID) type message")
consensus.viewIDSigs[senderKey.SerializeToHexStr()] = recvMsg.ViewidSig consensus.viewIDSigs[senderKey.SerializeToHexStr()] = recvMsg.ViewidSig
consensus.viewIDBitmap.SetKey(recvMsg.SenderPubkey, true) // Set the bitmap indicating that this validator signed. consensus.viewIDBitmap.SetKey(recvMsg.SenderPubkey, true) // Set the bitmap indicating that this validator signed.
consensus.getLogger().Debug("[onViewChange]", "numSigs", len(consensus.viewIDSigs), "needed", consensus.Quorum()) consensus.getLogger().Debug().
Int("numSigs", len(consensus.viewIDSigs)).
Int("needed", consensus.Quorum()).
Msg("[onViewChange]")
// received enough view change messages, change state to normal consensus // received enough view change messages, change state to normal consensus
if len(consensus.viewIDSigs) >= consensus.Quorum() { if len(consensus.viewIDSigs) >= consensus.Quorum() {
@ -352,12 +400,15 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
consensus.ReadySignal <- struct{}{} consensus.ReadySignal <- struct{}{}
}() }()
} else { } else {
consensus.getLogger().Debug("[OnViewChange] Switching phase", "From", consensus.phase, "To", Commit) consensus.getLogger().Debug().
Str("From", consensus.phase.String()).
Str("To", Commit.String()).
Msg("[OnViewChange] Switching phase")
consensus.switchPhase(Commit, true) consensus.switchPhase(Commit, true)
copy(consensus.blockHash[:], consensus.m1Payload[:32]) copy(consensus.blockHash[:], consensus.m1Payload[:32])
aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 32) aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 32)
if err != nil { if err != nil {
consensus.getLogger().Error("[onViewChange] ReadSignatureBitmapPayload Fail", "error", err) consensus.getLogger().Error().Err(err).Msg("[onViewChange] ReadSignatureBitmapPayload Fail")
return return
} }
consensus.aggregatedPrepareSig = aggSig consensus.aggregatedPrepareSig = aggSig
@ -369,48 +420,57 @@ func (consensus *Consensus) onViewChange(msg *msg_pb.Message) {
commitPayload := append(blockNumBytes, consensus.blockHash[:]...) commitPayload := append(blockNumBytes, consensus.blockHash[:]...)
consensus.commitSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(commitPayload) consensus.commitSigs[consensus.PubKey.SerializeToHexStr()] = consensus.priKey.SignHash(commitPayload)
if err = consensus.commitBitmap.SetKey(consensus.PubKey, true); err != nil { if err = consensus.commitBitmap.SetKey(consensus.PubKey, true); err != nil {
consensus.getLogger().Debug("[OnViewChange] New Leader commit bitmap set failed") consensus.getLogger().Debug().Msg("[OnViewChange] New Leader commit bitmap set failed")
return return
} }
} }
consensus.mode.SetViewID(recvMsg.ViewID) consensus.mode.SetViewID(recvMsg.ViewID)
msgToSend := consensus.constructNewViewMessage() msgToSend := consensus.constructNewViewMessage()
consensus.getLogger().Warn("[onViewChange] Sent NewView Message", "len(M1Payload)", len(consensus.m1Payload), "M1Payload", consensus.m1Payload) consensus.getLogger().Warn().
Int("payloadSize", len(consensus.m1Payload)).
Bytes("M1Payload", consensus.m1Payload).
Msg("[onViewChange] Sent NewView Message")
consensus.host.SendMessageToGroups([]p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)) consensus.host.SendMessageToGroups([]p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend))
consensus.viewID = recvMsg.ViewID consensus.viewID = recvMsg.ViewID
consensus.ResetViewChangeState() consensus.ResetViewChangeState()
consensus.consensusTimeout[timeoutViewChange].Stop() consensus.consensusTimeout[timeoutViewChange].Stop()
consensus.consensusTimeout[timeoutConsensus].Start() consensus.consensusTimeout[timeoutConsensus].Start()
consensus.getLogger().Debug("[onViewChange] New Leader Start Consensus Timer and Stop View Change Timer", "viewChangingID", consensus.mode.ViewID()) consensus.getLogger().Debug().
consensus.getLogger().Debug("[onViewChange] I am the New Leader", "myKey", consensus.PubKey.SerializeToHexStr(), "viewID", consensus.viewID, "block", consensus.blockNum) Uint64("viewChangingID", consensus.mode.ViewID()).
Msg("[onViewChange] New Leader Start Consensus Timer and Stop View Change Timer")
consensus.getLogger().Debug().
Str("myKey", consensus.PubKey.SerializeToHexStr()).
Uint64("viewID", consensus.viewID).
Uint64("block", consensus.blockNum).
Msg("[onViewChange] I am the New Leader")
} }
} }
// TODO: move to consensus_leader.go later // TODO: move to consensus_leader.go later
func (consensus *Consensus) onNewView(msg *msg_pb.Message) { func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
consensus.getLogger().Debug("[onNewView] Received NewView Message") consensus.getLogger().Debug().Msg("[onNewView] Received NewView Message")
senderKey, err := consensus.verifyViewChangeSenderKey(msg) senderKey, err := consensus.verifyViewChangeSenderKey(msg)
if err != nil { if err != nil {
consensus.getLogger().Warn("[onNewView] VerifySenderKey Failed", "error", err) consensus.getLogger().Warn().Err(err).Msg("[onNewView] VerifySenderKey Failed")
return return
} }
recvMsg, err := consensus.ParseNewViewMessage(msg) recvMsg, err := consensus.ParseNewViewMessage(msg)
if err != nil { if err != nil {
consensus.getLogger().Warn("[onNewView] Unable to Parse NewView Message", "error", err) consensus.getLogger().Warn().Err(err).Msg("[onNewView] Unable to Parse NewView Message")
return return
} }
if err = verifyMessageSig(senderKey, msg); err != nil { if err = verifyMessageSig(senderKey, msg); err != nil {
consensus.getLogger().Error("[onNewView] Failed to Verify New Leader's Signature", "error", err) consensus.getLogger().Error().Err(err).Msg("[onNewView] Failed to Verify New Leader's Signature")
return return
} }
consensus.vcLock.Lock() consensus.vcLock.Lock()
defer consensus.vcLock.Unlock() defer consensus.vcLock.Unlock()
if recvMsg.M3AggSig == nil || recvMsg.M3Bitmap == nil { if recvMsg.M3AggSig == nil || recvMsg.M3Bitmap == nil {
consensus.getLogger().Error("[onNewView] M3AggSig or M3Bitmap is nil") consensus.getLogger().Error().Msg("[onNewView] M3AggSig or M3Bitmap is nil")
return return
} }
m3Sig := recvMsg.M3AggSig m3Sig := recvMsg.M3AggSig
@ -420,21 +480,28 @@ func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
binary.LittleEndian.PutUint64(viewIDBytes, recvMsg.ViewID) binary.LittleEndian.PutUint64(viewIDBytes, recvMsg.ViewID)
// check total number of sigs >= 2f+1 // check total number of sigs >= 2f+1
if count := utils.CountOneBits(m3Mask.Bitmap); count < consensus.Quorum() { if count := utils.CountOneBits(m3Mask.Bitmap); count < consensus.Quorum() {
consensus.getLogger().Debug("[onNewView] Not Have Enough M3 (ViewID) Signature", "need", consensus.Quorum(), "have", count) consensus.getLogger().Debug().
Int("need", consensus.Quorum()).
Int("have", count).
Msg("[onNewView] Not Have Enough M3 (ViewID) Signature")
return return
} }
if !m3Sig.VerifyHash(m3Mask.AggregatePublic, viewIDBytes) { if !m3Sig.VerifyHash(m3Mask.AggregatePublic, viewIDBytes) {
consensus.getLogger().Warn("[onNewView] Unable to Verify Aggregated Signature of M3 (ViewID) payload", "m3Sig", m3Sig.SerializeToHexStr(), "m3Mask", m3Mask.Bitmap, "MsgViewID", recvMsg.ViewID) consensus.getLogger().Warn().
Str("m3Sig", m3Sig.SerializeToHexStr()).
Bytes("m3Mask", m3Mask.Bitmap).
Uint64("MsgViewID", recvMsg.ViewID).
Msg("[onNewView] Unable to Verify Aggregated Signature of M3 (ViewID) payload")
return return
} }
m2Mask := recvMsg.M2Bitmap m2Mask := recvMsg.M2Bitmap
if recvMsg.M2AggSig != nil { if recvMsg.M2AggSig != nil {
consensus.getLogger().Debug("[onNewView] M2AggSig (NIL) is Not Empty") consensus.getLogger().Debug().Msg("[onNewView] M2AggSig (NIL) is Not Empty")
m2Sig := recvMsg.M2AggSig m2Sig := recvMsg.M2AggSig
if !m2Sig.VerifyHash(m2Mask.AggregatePublic, NIL) { if !m2Sig.VerifyHash(m2Mask.AggregatePublic, NIL) {
consensus.getLogger().Warn("[onNewView] Unable to Verify Aggregated Signature of M2 (NIL) payload") consensus.getLogger().Warn().Msg("[onNewView] Unable to Verify Aggregated Signature of M2 (NIL) payload")
return return
} }
} }
@ -442,18 +509,18 @@ func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
// check when M3 sigs > M2 sigs, then M1 (recvMsg.Payload) should not be empty // check when M3 sigs > M2 sigs, then M1 (recvMsg.Payload) should not be empty
if m2Mask == nil || m2Mask.Bitmap == nil || (m2Mask != nil && m2Mask.Bitmap != nil && utils.CountOneBits(m3Mask.Bitmap) > utils.CountOneBits(m2Mask.Bitmap)) { if m2Mask == nil || m2Mask.Bitmap == nil || (m2Mask != nil && m2Mask.Bitmap != nil && utils.CountOneBits(m3Mask.Bitmap) > utils.CountOneBits(m2Mask.Bitmap)) {
if len(recvMsg.Payload) <= 32 { if len(recvMsg.Payload) <= 32 {
consensus.getLogger().Debug("[onNewView] M1 (prepared) Type Payload Not Have Enough Length") consensus.getLogger().Debug().Msg("[onNewView] M1 (prepared) Type Payload Not Have Enough Length")
return return
} }
// m1 is not empty, check it's valid // m1 is not empty, check it's valid
blockHash := recvMsg.Payload[:32] blockHash := recvMsg.Payload[:32]
aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 32) aggSig, mask, err := consensus.ReadSignatureBitmapPayload(recvMsg.Payload, 32)
if err != nil { if err != nil {
consensus.getLogger().Error("[onNewView] ReadSignatureBitmapPayload Failed", "error", err) consensus.getLogger().Error().Err(err).Msg("[onNewView] ReadSignatureBitmapPayload Failed")
return return
} }
if !aggSig.VerifyHash(mask.AggregatePublic, blockHash) { if !aggSig.VerifyHash(mask.AggregatePublic, blockHash) {
consensus.getLogger().Warn("[onNewView] Failed to Verify Signature for M1 (prepare) message") consensus.getLogger().Warn().Msg("[onNewView] Failed to Verify Signature for M1 (prepare) message")
return return
} }
copy(consensus.blockHash[:], blockHash) copy(consensus.blockHash[:], blockHash)
@ -478,7 +545,10 @@ func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
// change view and leaderKey to keep in sync with network // change view and leaderKey to keep in sync with network
if consensus.blockNum != recvMsg.BlockNum { if consensus.blockNum != recvMsg.BlockNum {
consensus.getLogger().Debug("[onNewView] New Leader Changed", "newLeaderKey", consensus.LeaderPubKey.SerializeToHexStr(), "MsgBlockNum", recvMsg.BlockNum) consensus.getLogger().Debug().
Str("newLeaderKey", consensus.LeaderPubKey.SerializeToHexStr()).
Uint64("MsgBlockNum", recvMsg.BlockNum).
Msg("[onNewView] New Leader Changed")
return return
} }
@ -490,16 +560,21 @@ func (consensus *Consensus) onNewView(msg *msg_pb.Message) {
commitPayload := append(blockNumHash, consensus.blockHash[:]...) commitPayload := append(blockNumHash, consensus.blockHash[:]...)
msgToSend := consensus.constructCommitMessage(commitPayload) msgToSend := consensus.constructCommitMessage(commitPayload)
consensus.getLogger().Info("onNewView === commit") consensus.getLogger().Info().Msg("onNewView === commit")
consensus.host.SendMessageToGroups([]p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend)) consensus.host.SendMessageToGroups([]p2p.GroupID{p2p.NewGroupIDByShardID(p2p.ShardID(consensus.ShardID))}, host.ConstructP2pMessage(byte(17), msgToSend))
consensus.getLogger().Debug("[OnViewChange] Switching phase", "From", consensus.phase, "To", Commit) consensus.getLogger().Debug().
Str("From", consensus.phase.String()).
Str("To", Commit.String()).
Msg("[OnViewChange] Switching phase")
consensus.switchPhase(Commit, true) consensus.switchPhase(Commit, true)
} else { } else {
consensus.ResetState() consensus.ResetState()
consensus.getLogger().Info("onNewView === announce") consensus.getLogger().Info().Msg("onNewView === announce")
} }
consensus.getLogger().Debug("new leader changed", "newLeaderKey", consensus.LeaderPubKey.SerializeToHexStr()) consensus.getLogger().Debug().
consensus.getLogger().Debug("validator start consensus timer and stop view change timer") Str("newLeaderKey", consensus.LeaderPubKey.SerializeToHexStr()).
Msg("new leader changed")
consensus.getLogger().Debug().Msg("validator start consensus timer and stop view change timer")
consensus.consensusTimeout[timeoutConsensus].Start() consensus.consensusTimeout[timeoutConsensus].Start()
consensus.consensusTimeout[timeoutViewChange].Stop() consensus.consensusTimeout[timeoutViewChange].Stop()
} }

@ -68,14 +68,6 @@ const (
commitsCacheLimit = 10 commitsCacheLimit = 10
epochCacheLimit = 10 epochCacheLimit = 10
// BlocksPerEpoch is the number of blocks in one epoch
// currently set to small number for testing
// in future, this need to be adjusted dynamically instead of constant
// TODO ek – inflate to disable resharding until we can 1) fix shard
// state mutation bug and 2) implement key passphrase recycle across
// process restart (exec) for shard migration
BlocksPerEpoch = 1000000000000
// BlockChainVersion ensures that an incompatible database forces a resync from scratch. // BlockChainVersion ensures that an incompatible database forces a resync from scratch.
BlockChainVersion = 3 BlockChainVersion = 3
) )
@ -236,13 +228,14 @@ func (bc *BlockChain) ValidateNewBlock(block *types.Block) error {
} }
// IsEpochBlock returns whether this block is the first block of an epoch. // IsEpochBlock returns whether this block is the first block of an epoch.
// TODO: lc this is not used
func IsEpochBlock(block *types.Block) bool { func IsEpochBlock(block *types.Block) bool {
return block.NumberU64()%BlocksPerEpoch == 0 return block.NumberU64()%ShardingSchedule.BlocksPerEpoch() == 0
} }
// IsEpochLastBlock returns whether this block is the last block of an epoch. // IsEpochLastBlock returns whether this block is the last block of an epoch.
func IsEpochLastBlock(block *types.Block) bool { func IsEpochLastBlock(block *types.Block) bool {
return block.NumberU64()%BlocksPerEpoch == BlocksPerEpoch-1 return ShardingSchedule.IsLastBlock(block.NumberU64())
} }
func (bc *BlockChain) getProcInterrupt() bool { func (bc *BlockChain) getProcInterrupt() bool {
@ -256,20 +249,23 @@ func (bc *BlockChain) loadLastState() error {
head := rawdb.ReadHeadBlockHash(bc.db) head := rawdb.ReadHeadBlockHash(bc.db)
if head == (common.Hash{}) { if head == (common.Hash{}) {
// Corrupt or empty database, init from scratch // Corrupt or empty database, init from scratch
utils.GetLogger().Warn("Empty database, resetting chain") utils.Logger().Warn().Msg("Empty database, resetting chain")
return bc.Reset() return bc.Reset()
} }
// Make sure the entire head block is available // Make sure the entire head block is available
currentBlock := bc.GetBlockByHash(head) currentBlock := bc.GetBlockByHash(head)
if currentBlock == nil { if currentBlock == nil {
// Corrupt or empty database, init from scratch // Corrupt or empty database, init from scratch
utils.GetLogger().Warn("Head block missing, resetting chain", "hash", head) utils.Logger().Warn().Bytes("hash", head.Bytes()).Msg("Head block missing, resetting chain")
return bc.Reset() return bc.Reset()
} }
// Make sure the state associated with the block is available // Make sure the state associated with the block is available
if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
// Dangling block without a state associated, init from scratch // Dangling block without a state associated, init from scratch
utils.GetLogger().Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) utils.Logger().Warn().
Str("number", currentBlock.Number().String()).
Str("hash", currentBlock.Hash().Hex()).
Msg("Head state missing, repairing chain")
if err := bc.repair(&currentBlock); err != nil { if err := bc.repair(&currentBlock); err != nil {
return err return err
} }
@ -301,9 +297,24 @@ func (bc *BlockChain) loadLastState() error {
blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
utils.GetLogger().Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(currentHeader.Time.Int64(), 0))) utils.Logger().Info().
utils.GetLogger().Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0))) Str("number", currentHeader.Number.String()).
utils.GetLogger().Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0))) Str("hash", currentHeader.Hash().Hex()).
Str("td", headerTd.String()).
Str("age", common.PrettyAge(time.Unix(currentHeader.Time.Int64(), 0)).String()).
Msg("Loaded most recent local header")
utils.Logger().Info().
Str("number", currentBlock.Number().String()).
Str("hash", currentBlock.Hash().Hex()).
Str("td", blockTd.String()).
Str("age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0)).String()).
Msg("Loaded most recent local full block")
utils.Logger().Info().
Str("number", currentFastBlock.Number().String()).
Str("hash", currentFastBlock.Hash().Hex()).
Str("td", fastTd.String()).
Str("age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0)).String()).
Msg("Loaded most recent local fast block")
return nil return nil
} }
@ -313,7 +324,7 @@ func (bc *BlockChain) loadLastState() error {
// though, the head may be further rewound if block bodies are missing (non-archive // though, the head may be further rewound if block bodies are missing (non-archive
// nodes after a fast sync). // nodes after a fast sync).
func (bc *BlockChain) SetHead(head uint64) error { func (bc *BlockChain) SetHead(head uint64) error {
utils.GetLogger().Warn("Rewinding blockchain", "target", head) utils.Logger().Warn().Uint64("target", head).Msg("Rewinding blockchain")
bc.mu.Lock() bc.mu.Lock()
defer bc.mu.Unlock() defer bc.mu.Unlock()
@ -379,7 +390,10 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
bc.currentBlock.Store(block) bc.currentBlock.Store(block)
bc.mu.Unlock() bc.mu.Unlock()
utils.GetLogger().Info("Committed new head block", "number", block.Number(), "hash", hash) utils.Logger().Info().
Str("number", block.Number().String()).
Str("hash", hash.Hex()).
Msg("Committed new head block")
return nil return nil
} }
@ -481,7 +495,10 @@ func (bc *BlockChain) repair(head **types.Block) error {
for { for {
// Abort if we've rewound to a head block that does have associated state // Abort if we've rewound to a head block that does have associated state
if _, err := state.New((*head).Root(), bc.stateCache); err == nil { if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
utils.GetLogger().Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) utils.Logger().Info().
Str("number", (*head).Number().String()).
Str("hash", (*head).Hash().Hex()).
Msg("Rewound blockchain to past state")
return nil return nil
} }
// Otherwise rewind one block and recheck state availability there // Otherwise rewind one block and recheck state availability there
@ -502,7 +519,7 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
if first > last { if first > last {
return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
} }
utils.GetLogger().Info("Exporting batch of blocks", "count", last-first+1) utils.Logger().Info().Uint64("count", last-first+1).Msg("Exporting batch of blocks")
start, reported := time.Now(), time.Now() start, reported := time.Now(), time.Now()
for nr := first; nr <= last; nr++ { for nr := first; nr <= last; nr++ {
@ -514,7 +531,10 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
return err return err
} }
if time.Since(reported) >= statsReportLimit { if time.Since(reported) >= statsReportLimit {
utils.GetLogger().Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start))) utils.Logger().Info().
Uint64("exported", block.NumberU64()-first).
Str("elapsed", common.PrettyDuration(time.Since(start)).String()).
Msg("Exporting blocks")
reported = time.Now() reported = time.Now()
} }
} }
@ -730,9 +750,13 @@ func (bc *BlockChain) Stop() {
if number := bc.CurrentBlock().NumberU64(); number > offset { if number := bc.CurrentBlock().NumberU64(); number > offset {
recent := bc.GetBlockByNumber(number - offset) recent := bc.GetBlockByNumber(number - offset)
utils.GetLogger().Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) utils.Logger().Info().
Str("block", recent.Number().String()).
Str("hash", recent.Hash().Hex()).
Str("root", recent.Root().Hex()).
Msg("Writing cached state to disk")
if err := triedb.Commit(recent.Root(), true); err != nil { if err := triedb.Commit(recent.Root(), true); err != nil {
utils.GetLogger().Error("Failed to commit recent state trie", "err", err) utils.Logger().Error().Err(err).Msg("Failed to commit recent state trie")
} }
} }
} }
@ -740,10 +764,10 @@ func (bc *BlockChain) Stop() {
triedb.Dereference(bc.triegc.PopItem().(common.Hash)) triedb.Dereference(bc.triegc.PopItem().(common.Hash))
} }
if size, _ := triedb.Size(); size != 0 { if size, _ := triedb.Size(); size != 0 {
utils.GetLogger().Error("Dangling trie nodes after full cleanup") utils.Logger().Error().Msg("Dangling trie nodes after full cleanup")
} }
} }
utils.GetLogger().Info("Blockchain manager stopped") utils.Logger().Info().Msg("Blockchain manager stopped")
} }
func (bc *BlockChain) procFutureBlocks() { func (bc *BlockChain) procFutureBlocks() {
@ -846,8 +870,13 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// Do a sanity check that the provided chain is actually ordered and linked // Do a sanity check that the provided chain is actually ordered and linked
for i := 1; i < len(blockChain); i++ { for i := 1; i < len(blockChain); i++ {
if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
utils.GetLogger().Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), utils.Logger().Error().
"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) Str("number", blockChain[i].Number().String()).
Str("hash", blockChain[i].Hash().Hex()).
Str("parent", blockChain[i].ParentHash().Hex()).
Str("prevnumber", blockChain[i-1].Number().String()).
Str("prevhash", blockChain[i-1].Hash().Hex()).
Msg("Non contiguous receipt insert")
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
} }
@ -912,15 +941,15 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
} }
bc.mu.Unlock() bc.mu.Unlock()
context := []interface{}{ utils.Logger().Info().
"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), Int32("count", stats.processed).
"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(head.Time().Int64(), 0)), Str("elapsed", common.PrettyDuration(time.Since(start)).String()).
"size", common.StorageSize(bytes), Str("age", common.PrettyAge(time.Unix(head.Time().Int64(), 0)).String()).
} Str("head", head.Number().String()).
if stats.ignored > 0 { Str("hash", head.Hash().Hex()).
context = append(context, []interface{}{"ignored", stats.ignored}...) Str("size", common.StorageSize(bytes).String()).
} Int32("ignored", stats.ignored).
utils.GetLogger().Info("Imported new block receipts", context...) Msg("Imported new block receipts")
return 0, nil return 0, nil
} }
@ -989,7 +1018,11 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
// If we're exceeding limits but haven't reached a large enough memory gap, // If we're exceeding limits but haven't reached a large enough memory gap,
// warn the user that the system is becoming unstable. // warn the user that the system is becoming unstable.
if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
utils.GetLogger().Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory) utils.Logger().Info().
Dur("time", bc.gcproc).
Dur("allowance", bc.cacheConfig.TrieTimeLimit).
Float64("optimum", float64(chosen-lastWrite)/triesInMemory).
Msg("State in memory for too long, committing")
} }
// Flush an entire trie and restart the counters // Flush an entire trie and restart the counters
triedb.Commit(header.Root, true) triedb.Commit(header.Root, true)
@ -1057,15 +1090,16 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
if err == nil { if err == nil {
for idx, block := range chain { for idx, block := range chain {
header := block.Header() header := block.Header()
header.Logger(utils.GetLogger()).Info("added block to chain", header.Logger(utils.Logger()).Info().
"segmentIndex", idx, Int("segmentIndex", idx).
"parentHash", header.ParentHash) Str("parentHash", header.ParentHash.Hex()).
Msg("added block to chain")
if header.ShardStateHash != (common.Hash{}) { if header.ShardStateHash != (common.Hash{}) {
epoch := new(big.Int).Add(header.Epoch, common.Big1) epoch := new(big.Int).Add(header.Epoch, common.Big1)
err = bc.WriteShardStateBytes(epoch, header.ShardState) err = bc.WriteShardStateBytes(epoch, header.ShardState)
if err != nil { if err != nil {
ctxerror.Log15(header.Logger(utils.GetLogger()).Warn, header.Logger(utils.Logger()).Warn().Err(err).Msg("cannot store shard state")
ctxerror.New("cannot store shard state").WithCause(err))
} }
} }
} }
@ -1085,8 +1119,13 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
for i := 1; i < len(chain); i++ { for i := 1; i < len(chain); i++ {
if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
// Chain broke ancestry, log a message (programming error) and skip insertion // Chain broke ancestry, log a message (programming error) and skip insertion
utils.GetLogger().Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(), utils.Logger().Error().
"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash()) Str("number", chain[i].Number().String()).
Str("hash", chain[i].Hash().Hex()).
Str("parent", chain[i].ParentHash().Hex()).
Str("prevnumber", chain[i-1].Number().String()).
Str("prevhash", chain[i-1].Hash().Hex()).
Msg("Non contiguous block insert")
return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
@ -1126,7 +1165,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
for i, block := range chain { for i, block := range chain {
// If the chain is terminating, stop processing blocks // If the chain is terminating, stop processing blocks
if atomic.LoadInt32(&bc.procInterrupt) == 1 { if atomic.LoadInt32(&bc.procInterrupt) == 1 {
utils.GetLogger().Debug("Premature abort during blocks processing") utils.Logger().Debug().Msg("Premature abort during blocks processing")
break break
} }
// Wait for the block's verification to complete // Wait for the block's verification to complete
@ -1230,11 +1269,17 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
if err != nil { if err != nil {
return i, events, coalescedLogs, err return i, events, coalescedLogs, err
} }
logger := utils.Logger().With().
Str("number", block.Number().String()).
Str("hash", block.Hash().Hex()).
Int("uncles", len(block.Uncles())).
Int("txs", len(block.Transactions())).
Uint64("gas", block.GasUsed()).
Str("elapsed", common.PrettyDuration(time.Since(bstart)).String()).
Logger()
switch status { switch status {
case CanonStatTy: case CanonStatTy:
utils.GetLogger().Info("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()), logger.Info().Msg("Inserted new block")
"txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
coalescedLogs = append(coalescedLogs, logs...) coalescedLogs = append(coalescedLogs, logs...)
blockInsertTimer.UpdateSince(bstart) blockInsertTimer.UpdateSince(bstart)
events = append(events, ChainEvent{block, block.Hash(), logs}) events = append(events, ChainEvent{block, block.Hash(), logs})
@ -1244,9 +1289,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
bc.gcproc += proctime bc.gcproc += proctime
case SideStatTy: case SideStatTy:
utils.GetLogger().Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "elapsed", logger.Debug().Msg("Inserted forked block")
common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
blockInsertTimer.UpdateSince(bstart) blockInsertTimer.UpdateSince(bstart)
events = append(events, ChainSideEvent{block}) events = append(events, ChainSideEvent{block})
} }
@ -1290,23 +1333,30 @@ func (st *insertStats) report(chain []*types.Block, index int, cache common.Stor
end = chain[index] end = chain[index]
txs = countTransactions(chain[st.lastIndex : index+1]) txs = countTransactions(chain[st.lastIndex : index+1])
) )
context := []interface{}{
"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000, context := utils.Logger().With().
"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed), Int("blocks", st.processed).
"number", end.Number(), "hash", end.Hash(), Int("txs", txs).
} Float64("mgas", float64(st.usedGas)/1000000).
Str("elapsed", common.PrettyDuration(elapsed).String()).
Float64("mgasps", float64(st.usedGas)*1000/float64(elapsed)).
Str("number", end.Number().String()).
Str("hash", end.Hash().Hex()).
Str("cache", cache.String())
if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute { if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) context = context.Str("age", common.PrettyAge(timestamp).String())
} }
context = append(context, []interface{}{"cache", cache}...)
if st.queued > 0 { if st.queued > 0 {
context = append(context, []interface{}{"queued", st.queued}...) context = context.Int("queued", st.queued)
} }
if st.ignored > 0 { if st.ignored > 0 {
context = append(context, []interface{}{"ignored", st.ignored}...) context = context.Int("ignored", st.ignored)
} }
utils.GetLogger().Info("Imported new chain segment", context...)
logger := context.Logger()
logger.Info().Msg("Imported new chain segment")
*st = insertStats{startTime: now, lastIndex: index + 1} *st = insertStats{startTime: now, lastIndex: index + 1}
} }
@ -1392,14 +1442,25 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
} }
// Ensure the user sees large reorgs // Ensure the user sees large reorgs
if len(oldChain) > 0 && len(newChain) > 0 { if len(oldChain) > 0 && len(newChain) > 0 {
logFn := utils.GetLogger().Debug logEvent := utils.Logger().Debug()
if len(oldChain) > 63 { if len(oldChain) > 63 {
logFn = utils.GetLogger().Warn logEvent = utils.Logger().Warn()
} }
logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(), logEvent.
"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) Str("number", commonBlock.Number().String()).
Str("hash", commonBlock.Hash().Hex()).
Int("drop", len(oldChain)).
Str("dropfrom", oldChain[0].Hash().Hex()).
Int("add", len(newChain)).
Str("addfrom", newChain[0].Hash().Hex()).
Msg("Chain split detected")
} else { } else {
utils.GetLogger().Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) utils.Logger().Error().
Str("oldnum", oldBlock.Number().String()).
Str("oldhash", oldBlock.Hash().Hex()).
Str("newnum", newBlock.Number().String()).
Str("newhash", newBlock.Hash().Hex()).
Msg("Impossible reorg, please file an issue")
} }
// Insert the new chain, taking care of the proper incremental order // Insert the new chain, taking care of the proper incremental order
var addedTxs types.Transactions var addedTxs types.Transactions
@ -1494,7 +1555,7 @@ func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, e
for _, receipt := range receipts { for _, receipt := range receipts {
receiptString += fmt.Sprintf("\t%v\n", receipt) receiptString += fmt.Sprintf("\t%v\n", receipt)
} }
utils.GetLogger().Error(fmt.Sprintf(` utils.Logger().Error().Msgf(`
########## BAD BLOCK ######### ########## BAD BLOCK #########
Chain config: %v Chain config: %v
@ -1504,7 +1565,7 @@ Hash: 0x%x
Error: %v Error: %v
############################## ##############################
`, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)) `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)
} }
// InsertHeaderChain attempts to insert the given header chain in to the local // InsertHeaderChain attempts to insert the given header chain in to the local
@ -1683,12 +1744,16 @@ func (bc *BlockChain) WriteShardState(
func (bc *BlockChain) WriteShardStateBytes( func (bc *BlockChain) WriteShardStateBytes(
epoch *big.Int, shardState []byte, epoch *big.Int, shardState []byte,
) error { ) error {
decodeShardState := types.ShardState{}
if err := rlp.DecodeBytes(shardState, &decodeShardState); err != nil {
return err
}
err := rawdb.WriteShardStateBytes(bc.db, epoch, shardState) err := rawdb.WriteShardStateBytes(bc.db, epoch, shardState)
if err != nil { if err != nil {
return err return err
} }
cacheKey := string(epoch.Bytes()) cacheKey := string(epoch.Bytes())
bc.shardStateCache.Add(cacheKey, shardState) bc.shardStateCache.Add(cacheKey, decodeShardState)
return nil return nil
} }
@ -1741,7 +1806,8 @@ func (bc *BlockChain) GetVrfByNumber(number uint64) [32]byte {
// GetShardState returns the shard state for the given epoch, // GetShardState returns the shard state for the given epoch,
// creating one if needed. // creating one if needed.
func (bc *BlockChain) GetShardState( func (bc *BlockChain) GetShardState(
epoch *big.Int, stakeInfo *map[common.Address]*structs.StakeInfo, epoch *big.Int,
stakeInfo *map[common.Address]*structs.StakeInfo,
) (types.ShardState, error) { ) (types.ShardState, error) {
shardState, err := bc.ReadShardState(epoch) shardState, err := bc.ReadShardState(epoch)
if err == nil { // TODO ek – distinguish ErrNotFound if err == nil { // TODO ek – distinguish ErrNotFound
@ -1755,7 +1821,7 @@ func (bc *BlockChain) GetShardState(
if err != nil { if err != nil {
return nil, err return nil, err
} }
utils.GetLogger().Debug("saved new shard state", "epoch", epoch) utils.Logger().Debug().Str("epoch", epoch.String()).Msg("saved new shard state")
return shardState, nil return shardState, nil
} }
@ -1802,3 +1868,8 @@ func (bc *BlockChain) StoreEpochBlockNumber(
func (bc *BlockChain) ChainDB() ethdb.Database { func (bc *BlockChain) ChainDB() ethdb.Database {
return bc.db return bc.db
} }
// GetVMConfig returns the block chain VM config.
func (bc *BlockChain) GetVMConfig() *vm.Config {
return &bc.vmConfig
}

@ -27,7 +27,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log" "github.com/rs/zerolog"
"github.com/harmony-one/harmony/core/rawdb" "github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
@ -92,7 +92,7 @@ type ChainIndexer struct {
throttling time.Duration // Disk throttling to prevent a heavy upgrade from hogging resources throttling time.Duration // Disk throttling to prevent a heavy upgrade from hogging resources
log log.Logger log *zerolog.Logger
lock sync.RWMutex lock sync.RWMutex
} }
@ -100,6 +100,7 @@ type ChainIndexer struct {
// chain segments of a given size after certain number of confirmations passed. // chain segments of a given size after certain number of confirmations passed.
// The throttling parameter might be used to prevent database thrashing. // The throttling parameter might be used to prevent database thrashing.
func NewChainIndexer(chainDb ethdb.Database, indexDb ethdb.Database, backend ChainIndexerBackend, section, confirm uint64, throttling time.Duration, kind string) *ChainIndexer { func NewChainIndexer(chainDb ethdb.Database, indexDb ethdb.Database, backend ChainIndexerBackend, section, confirm uint64, throttling time.Duration, kind string) *ChainIndexer {
logger := utils.Logger().With().Str("type", kind).Logger()
c := &ChainIndexer{ c := &ChainIndexer{
chainDb: chainDb, chainDb: chainDb,
indexDb: indexDb, indexDb: indexDb,
@ -109,7 +110,7 @@ func NewChainIndexer(chainDb ethdb.Database, indexDb ethdb.Database, backend Cha
sectionSize: section, sectionSize: section,
confirmsReq: confirm, confirmsReq: confirm,
throttling: throttling, throttling: throttling,
log: utils.GetLogInstance().New("type", kind), log: &logger,
} }
// Initialize database dependent fields and start the updater // Initialize database dependent fields and start the updater
c.loadValidSections() c.loadValidSections()
@ -283,7 +284,11 @@ func (c *ChainIndexer) newHead(head uint64, reorg bool) {
// syncing reached the checkpoint, verify section head // syncing reached the checkpoint, verify section head
syncedHead := rawdb.ReadCanonicalHash(c.chainDb, c.checkpointSections*c.sectionSize-1) syncedHead := rawdb.ReadCanonicalHash(c.chainDb, c.checkpointSections*c.sectionSize-1)
if syncedHead != c.checkpointHead { if syncedHead != c.checkpointHead {
c.log.Error("Synced chain does not match checkpoint", "number", c.checkpointSections*c.sectionSize-1, "expected", c.checkpointHead, "synced", syncedHead) c.log.Error().
Uint64("number", c.checkpointSections*c.sectionSize-1).
Str("expected", c.checkpointHead.Hex()).
Str("synced", syncedHead.Hex()).
Msg("Synced chain does not match checkpoint")
return return
} }
} }
@ -320,7 +325,7 @@ func (c *ChainIndexer) updateLoop() {
if time.Since(updated) > 8*time.Second { if time.Since(updated) > 8*time.Second {
if c.knownSections > c.storedSections+1 { if c.knownSections > c.storedSections+1 {
updating = true updating = true
c.log.Info("Upgrading chain index", "percentage", c.storedSections*100/c.knownSections) c.log.Info().Uint64("percentage", c.storedSections*100/c.knownSections).Msg("Upgrading chain index")
} }
updated = time.Now() updated = time.Now()
} }
@ -340,7 +345,7 @@ func (c *ChainIndexer) updateLoop() {
return return
default: default:
} }
c.log.Error("Section processing failed", "error", err) c.log.Error().Err(err).Msg("Section processing failed")
} }
c.lock.Lock() c.lock.Lock()
@ -350,16 +355,16 @@ func (c *ChainIndexer) updateLoop() {
c.setValidSections(section + 1) c.setValidSections(section + 1)
if c.storedSections == c.knownSections && updating { if c.storedSections == c.knownSections && updating {
updating = false updating = false
c.log.Info("Finished upgrading chain index") c.log.Info().Msg("Finished upgrading chain index")
} }
c.cascadedHead = c.storedSections*c.sectionSize - 1 c.cascadedHead = c.storedSections*c.sectionSize - 1
for _, child := range c.children { for _, child := range c.children {
c.log.Trace("Cascading chain index update", "head", c.cascadedHead) c.log.Warn().Uint64("head", c.cascadedHead).Msg("Cascading chain index update")
child.newHead(c.cascadedHead, false) child.newHead(c.cascadedHead, false)
} }
} else { } else {
// If processing failed, don't retry until further notification // If processing failed, don't retry until further notification
c.log.Debug("Chain index processing failed", "section", section, "err", err) c.log.Debug().Err(err).Uint64("section", section).Msg("Chain index processing failed")
c.knownSections = c.storedSections c.knownSections = c.storedSections
} }
} }
@ -382,7 +387,7 @@ func (c *ChainIndexer) updateLoop() {
// held while processing, the continuity can be broken by a long reorg, in which // held while processing, the continuity can be broken by a long reorg, in which
// case the function returns with an error. // case the function returns with an error.
func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (common.Hash, error) { func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (common.Hash, error) {
c.log.Trace("Processing new chain section", "section", section) c.log.Warn().Uint64("section", section).Msg("Processing new chain section")
// Reset and partial processing // Reset and partial processing

@ -162,10 +162,10 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
stored := rawdb.ReadCanonicalHash(db, 0) stored := rawdb.ReadCanonicalHash(db, 0)
if (stored == common.Hash{}) { if (stored == common.Hash{}) {
if genesis == nil { if genesis == nil {
utils.GetLogger().Info("Writing default main-net genesis block") utils.Logger().Info().Msg("Writing default main-net genesis block")
genesis = DefaultGenesisBlock() genesis = DefaultGenesisBlock()
} else { } else {
utils.GetLogger().Info("Writing custom genesis block") utils.Logger().Info().Msg("Writing custom genesis block")
} }
block, err := genesis.Commit(db) block, err := genesis.Commit(db)
return genesis.Config, block.Hash(), err return genesis.Config, block.Hash(), err
@ -183,7 +183,7 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
newcfg := genesis.configOrDefault(stored) newcfg := genesis.configOrDefault(stored)
storedcfg := rawdb.ReadChainConfig(db, stored) storedcfg := rawdb.ReadChainConfig(db, stored)
if storedcfg == nil { if storedcfg == nil {
utils.GetLogger().Warn("Found genesis block without chain config") utils.Logger().Warn().Msg("Found genesis block without chain config")
rawdb.WriteChainConfig(db, stored, newcfg) rawdb.WriteChainConfig(db, stored, newcfg)
return newcfg, stored, nil return newcfg, stored, nil
} }
@ -225,7 +225,7 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
// to the given database (or discards it if nil). // to the given database (or discards it if nil).
func (g *Genesis) ToBlock(db ethdb.Database) *types.Block { func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
if db == nil { if db == nil {
utils.GetLogInstance().Error("db should be initialized") utils.Logger().Error().Msg("db should be initialized")
os.Exit(1) os.Exit(1)
} }
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db)) statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
@ -240,7 +240,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
root := statedb.IntermediateRoot(false) root := statedb.IntermediateRoot(false)
shardStateBytes, err := rlp.EncodeToBytes(g.ShardState) shardStateBytes, err := rlp.EncodeToBytes(g.ShardState)
if err != nil { if err != nil {
utils.GetLogInstance().Error("failed to rlp-serialize genesis shard state") utils.Logger().Error().Msg("failed to rlp-serialize genesis shard state")
os.Exit(1) os.Exit(1)
} }
head := &types.Header{ head := &types.Header{
@ -281,7 +281,7 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
err := rawdb.WriteShardStateBytes(db, block.Header().Epoch, block.Header().ShardState) err := rawdb.WriteShardStateBytes(db, block.Header().Epoch, block.Header().ShardState)
if err != nil { if err != nil {
utils.GetLogger().Crit("Failed to store genesis shard state", "err", err) utils.Logger().Error().Err(err).Msg("Failed to store genesis shard state")
} }
config := g.Config config := g.Config

@ -144,7 +144,7 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
// Irrelevant of the canonical status, write the td and header to the database // Irrelevant of the canonical status, write the td and header to the database
//if err := hc.WriteTd(hash, number, externTd); err != nil { //if err := hc.WriteTd(hash, number, externTd); err != nil {
// // utils.GetLogger().Crit("Failed to write header total difficulty", "err", err) // // utils.Logger().Error().Err(err).Msg("Failed to write header total difficulty")
// //} // //}
//rawdb.WriteHeader(hc.chainDb, header) //rawdb.WriteHeader(hc.chainDb, header)
@ -207,8 +207,13 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int)
for i := 1; i < len(chain); i++ { for i := 1; i < len(chain); i++ {
if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 || chain[i].ParentHash != chain[i-1].Hash() { if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 || chain[i].ParentHash != chain[i-1].Hash() {
// Chain broke ancestry, log a message (programming error) and skip insertion // Chain broke ancestry, log a message (programming error) and skip insertion
utils.GetLogger().Error("Non contiguous header insert", "number", chain[i].Number, "hash", chain[i].Hash(), utils.Logger().Error().
"parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", chain[i-1].Hash()) Str("number", chain[i].Number.String()).
Str("hash", chain[i].Hash().Hex()).
Str("parent", chain[i].ParentHash.Hex()).
Str("prevnumber", chain[i-1].Number.String()).
Str("prevhash", chain[i-1].Hash().Hex()).
Msg("Non contiguous header insert")
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].Number, return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].Number,
chain[i-1].Hash().Bytes()[:4], i, chain[i].Number, chain[i].Hash().Bytes()[:4], chain[i].ParentHash[:4]) chain[i-1].Hash().Bytes()[:4], i, chain[i].Number, chain[i].Hash().Bytes()[:4], chain[i].ParentHash[:4])
@ -233,7 +238,7 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int)
//for i, _ := range chain { //for i, _ := range chain {
// // If the chain is terminating, stop processing blocks // // If the chain is terminating, stop processing blocks
// if hc.procInterrupt() { // if hc.procInterrupt() {
// utils.GetLogger().Debug("Premature abort during headers verification") // utils.Logger().Debug().Msg("Premature abort during headers verification")
// return 0, errors.New("aborted") // return 0, errors.New("aborted")
// } // }
// //
@ -261,7 +266,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa
for i, header := range chain { for i, header := range chain {
// Short circuit insertion if shutting down // Short circuit insertion if shutting down
if hc.procInterrupt() { if hc.procInterrupt() {
utils.GetLogger().Debug("Premature abort during headers import") utils.Logger().Debug().Msg("Premature abort during headers import")
return i, errors.New("aborted") return i, errors.New("aborted")
} }
// If the header's already known, skip it, otherwise store // If the header's already known, skip it, otherwise store
@ -277,17 +282,20 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa
// Report some public statistics so the user has a clue what's going on // Report some public statistics so the user has a clue what's going on
last := chain[len(chain)-1] last := chain[len(chain)-1]
context := []interface{}{ context := utils.Logger().With().
"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), Int("count", stats.processed).
"number", last.Number, "hash", last.Hash(), Str("elapsed", common.PrettyDuration(time.Since(start)).String()).
} Str("number", last.Number.String()).
Str("hash", last.Hash().Hex())
if timestamp := time.Unix(last.Time.Int64(), 0); time.Since(timestamp) > time.Minute { if timestamp := time.Unix(last.Time.Int64(), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) context = context.Str("age", common.PrettyAge(timestamp).String())
} }
if stats.ignored > 0 { if stats.ignored > 0 {
context = append(context, []interface{}{"ignored", stats.ignored}...) context = context.Int("ignored", stats.ignored)
} }
utils.GetLogger().Info("Imported new block headers", context...) logger := context.Logger()
logger.Info().Msg("Imported new block headers")
return 0, nil return 0, nil
} }

@ -41,14 +41,14 @@ func ReadCanonicalHash(db DatabaseReader, number uint64) common.Hash {
// WriteCanonicalHash stores the hash assigned to a canonical block number. // WriteCanonicalHash stores the hash assigned to a canonical block number.
func WriteCanonicalHash(db DatabaseWriter, hash common.Hash, number uint64) { func WriteCanonicalHash(db DatabaseWriter, hash common.Hash, number uint64) {
if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil { if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
utils.GetLogger().Crit("Failed to store number to hash mapping", "err", err) utils.Logger().Error().Msg("Failed to store number to hash mapping")
} }
} }
// DeleteCanonicalHash removes the number to hash canonical mapping. // DeleteCanonicalHash removes the number to hash canonical mapping.
func DeleteCanonicalHash(db DatabaseDeleter, number uint64) { func DeleteCanonicalHash(db DatabaseDeleter, number uint64) {
if err := db.Delete(headerHashKey(number)); err != nil { if err := db.Delete(headerHashKey(number)); err != nil {
utils.GetLogger().Crit("Failed to delete number to hash mapping", "err", err) utils.Logger().Error().Msg("Failed to delete number to hash mapping")
} }
} }
@ -74,7 +74,7 @@ func ReadHeadHeaderHash(db DatabaseReader) common.Hash {
// WriteHeadHeaderHash stores the hash of the current canonical head header. // WriteHeadHeaderHash stores the hash of the current canonical head header.
func WriteHeadHeaderHash(db DatabaseWriter, hash common.Hash) { func WriteHeadHeaderHash(db DatabaseWriter, hash common.Hash) {
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil { if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
utils.GetLogger().Crit("Failed to store last header's hash", "err", err) utils.Logger().Error().Msg("Failed to store last header's hash")
} }
} }
@ -90,7 +90,7 @@ func ReadHeadBlockHash(db DatabaseReader) common.Hash {
// WriteHeadBlockHash stores the head block's hash. // WriteHeadBlockHash stores the head block's hash.
func WriteHeadBlockHash(db DatabaseWriter, hash common.Hash) { func WriteHeadBlockHash(db DatabaseWriter, hash common.Hash) {
if err := db.Put(headBlockKey, hash.Bytes()); err != nil { if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
utils.GetLogger().Crit("Failed to store last block's hash", "err", err) utils.Logger().Error().Msg("Failed to store last block's hash")
} }
} }
@ -106,7 +106,7 @@ func ReadHeadFastBlockHash(db DatabaseReader) common.Hash {
// WriteHeadFastBlockHash stores the hash of the current fast-sync head block. // WriteHeadFastBlockHash stores the hash of the current fast-sync head block.
func WriteHeadFastBlockHash(db DatabaseWriter, hash common.Hash) { func WriteHeadFastBlockHash(db DatabaseWriter, hash common.Hash) {
if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil { if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil {
utils.GetLogger().Crit("Failed to store last fast block's hash", "err", err) utils.Logger().Error().Msg("Failed to store last fast block's hash")
} }
} }
@ -124,7 +124,7 @@ func ReadFastTrieProgress(db DatabaseReader) uint64 {
// retrieving it across restarts. // retrieving it across restarts.
func WriteFastTrieProgress(db DatabaseWriter, count uint64) { func WriteFastTrieProgress(db DatabaseWriter, count uint64) {
if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil { if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
utils.GetLogger().Crit("Failed to store fast sync trie progress", "err", err) utils.Logger().Error().Msg("Failed to store fast sync trie progress")
} }
} }
@ -150,7 +150,7 @@ func ReadHeader(db DatabaseReader, hash common.Hash, number uint64) *types.Heade
} }
header := new(types.Header) header := new(types.Header)
if err := rlp.Decode(bytes.NewReader(data), header); err != nil { if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
utils.GetLogger().Error("Invalid block header RLP", "hash", hash, "err", err) utils.Logger().Error().Err(err).Str("hash", hash.Hex()).Msg("Invalid block header RLP")
return nil return nil
} }
return header return header
@ -167,26 +167,26 @@ func WriteHeader(db DatabaseWriter, header *types.Header) {
) )
key := headerNumberKey(hash) key := headerNumberKey(hash)
if err := db.Put(key, encoded); err != nil { if err := db.Put(key, encoded); err != nil {
utils.GetLogger().Crit("Failed to store hash to number mapping", "err", err) utils.Logger().Error().Msg("Failed to store hash to number mapping")
} }
// Write the encoded header // Write the encoded header
data, err := rlp.EncodeToBytes(header) data, err := rlp.EncodeToBytes(header)
if err != nil { if err != nil {
utils.GetLogger().Crit("Failed to RLP encode header", "err", err) utils.Logger().Error().Msg("Failed to RLP encode header")
} }
key = headerKey(number, hash) key = headerKey(number, hash)
if err := db.Put(key, data); err != nil { if err := db.Put(key, data); err != nil {
utils.GetLogger().Crit("Failed to store header", "err", err) utils.Logger().Error().Msg("Failed to store header")
} }
} }
// DeleteHeader removes all block header data associated with a hash. // DeleteHeader removes all block header data associated with a hash.
func DeleteHeader(db DatabaseDeleter, hash common.Hash, number uint64) { func DeleteHeader(db DatabaseDeleter, hash common.Hash, number uint64) {
if err := db.Delete(headerKey(number, hash)); err != nil { if err := db.Delete(headerKey(number, hash)); err != nil {
utils.GetLogger().Crit("Failed to delete header", "err", err) utils.Logger().Error().Msg("Failed to delete header")
} }
if err := db.Delete(headerNumberKey(hash)); err != nil { if err := db.Delete(headerNumberKey(hash)); err != nil {
utils.GetLogger().Crit("Failed to delete hash to number mapping", "err", err) utils.Logger().Error().Msg("Failed to delete hash to number mapping")
} }
} }
@ -199,7 +199,7 @@ func ReadBodyRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValu
// WriteBodyRLP stores an RLP encoded block body into the database. // WriteBodyRLP stores an RLP encoded block body into the database.
func WriteBodyRLP(db DatabaseWriter, hash common.Hash, number uint64, rlp rlp.RawValue) { func WriteBodyRLP(db DatabaseWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
if err := db.Put(blockBodyKey(number, hash), rlp); err != nil { if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
utils.GetLogger().Crit("Failed to store block body", "err", err) utils.Logger().Error().Msg("Failed to store block body")
} }
} }
@ -219,7 +219,7 @@ func ReadBody(db DatabaseReader, hash common.Hash, number uint64) *types.Body {
} }
body := new(types.Body) body := new(types.Body)
if err := rlp.Decode(bytes.NewReader(data), body); err != nil { if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
utils.GetLogger().Error("Invalid block body RLP", "hash", hash, "err", err) utils.Logger().Error().Err(err).Str("hash", hash.Hex()).Msg("Invalid block body RLP")
return nil return nil
} }
return body return body
@ -229,7 +229,7 @@ func ReadBody(db DatabaseReader, hash common.Hash, number uint64) *types.Body {
func WriteBody(db DatabaseWriter, hash common.Hash, number uint64, body *types.Body) { func WriteBody(db DatabaseWriter, hash common.Hash, number uint64, body *types.Body) {
data, err := rlp.EncodeToBytes(body) data, err := rlp.EncodeToBytes(body)
if err != nil { if err != nil {
utils.GetLogger().Crit("Failed to RLP encode body", "err", err) utils.Logger().Error().Msg("Failed to RLP encode body")
} }
WriteBodyRLP(db, hash, number, data) WriteBodyRLP(db, hash, number, data)
} }
@ -237,7 +237,7 @@ func WriteBody(db DatabaseWriter, hash common.Hash, number uint64, body *types.B
// DeleteBody removes all block body data associated with a hash. // DeleteBody removes all block body data associated with a hash.
func DeleteBody(db DatabaseDeleter, hash common.Hash, number uint64) { func DeleteBody(db DatabaseDeleter, hash common.Hash, number uint64) {
if err := db.Delete(blockBodyKey(number, hash)); err != nil { if err := db.Delete(blockBodyKey(number, hash)); err != nil {
utils.GetLogger().Crit("Failed to delete block body", "err", err) utils.Logger().Error().Msg("Failed to delete block body")
} }
} }
@ -249,7 +249,7 @@ func ReadTd(db DatabaseReader, hash common.Hash, number uint64) *big.Int {
} }
td := new(big.Int) td := new(big.Int)
if err := rlp.Decode(bytes.NewReader(data), td); err != nil { if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
utils.GetLogger().Error("Invalid block total difficulty RLP", "hash", hash, "err", err) utils.Logger().Error().Err(err).Str("hash", hash.Hex()).Msg("Invalid block total difficulty RLP")
return nil return nil
} }
return td return td
@ -259,17 +259,17 @@ func ReadTd(db DatabaseReader, hash common.Hash, number uint64) *big.Int {
func WriteTd(db DatabaseWriter, hash common.Hash, number uint64, td *big.Int) { func WriteTd(db DatabaseWriter, hash common.Hash, number uint64, td *big.Int) {
data, err := rlp.EncodeToBytes(td) data, err := rlp.EncodeToBytes(td)
if err != nil { if err != nil {
utils.GetLogger().Crit("Failed to RLP encode block total difficulty", "err", err) utils.Logger().Error().Msg("Failed to RLP encode block total difficulty")
} }
if err := db.Put(headerTDKey(number, hash), data); err != nil { if err := db.Put(headerTDKey(number, hash), data); err != nil {
utils.GetLogger().Crit("Failed to store block total difficulty", "err", err) utils.Logger().Error().Msg("Failed to store block total difficulty")
} }
} }
// DeleteTd removes all block total difficulty data associated with a hash. // DeleteTd removes all block total difficulty data associated with a hash.
func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) { func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) {
if err := db.Delete(headerTDKey(number, hash)); err != nil { if err := db.Delete(headerTDKey(number, hash)); err != nil {
utils.GetLogger().Crit("Failed to delete block total difficulty", "err", err) utils.Logger().Error().Msg("Failed to delete block total difficulty")
} }
} }
@ -283,7 +283,7 @@ func ReadReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Rece
// Convert the receipts from their storage form to their internal representation // Convert the receipts from their storage form to their internal representation
storageReceipts := []*types.ReceiptForStorage{} storageReceipts := []*types.ReceiptForStorage{}
if err := rlp.DecodeBytes(data, &storageReceipts); err != nil { if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
utils.GetLogger().Error("Invalid receipt array RLP", "hash", hash, "err", err) utils.Logger().Error().Err(err).Str("hash", hash.Hex()).Msg("Invalid receipt array RLP")
return nil return nil
} }
receipts := make(types.Receipts, len(storageReceipts)) receipts := make(types.Receipts, len(storageReceipts))
@ -302,18 +302,18 @@ func WriteReceipts(db DatabaseWriter, hash common.Hash, number uint64, receipts
} }
bytes, err := rlp.EncodeToBytes(storageReceipts) bytes, err := rlp.EncodeToBytes(storageReceipts)
if err != nil { if err != nil {
utils.GetLogger().Crit("Failed to encode block receipts", "err", err) utils.Logger().Error().Msg("Failed to encode block receipts")
} }
// Store the flattened receipt slice // Store the flattened receipt slice
if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil { if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil {
utils.GetLogger().Crit("Failed to store block receipts", "err", err) utils.Logger().Error().Msg("Failed to store block receipts")
} }
} }
// DeleteReceipts removes all receipt data associated with a block hash. // DeleteReceipts removes all receipt data associated with a block hash.
func DeleteReceipts(db DatabaseDeleter, hash common.Hash, number uint64) { func DeleteReceipts(db DatabaseDeleter, hash common.Hash, number uint64) {
if err := db.Delete(blockReceiptsKey(number, hash)); err != nil { if err := db.Delete(blockReceiptsKey(number, hash)); err != nil {
utils.GetLogger().Crit("Failed to delete block receipts", "err", err) utils.Logger().Error().Msg("Failed to delete block receipts")
} }
} }
@ -349,7 +349,7 @@ func WriteBlock(db DatabaseWriter, block *types.Block) {
epochBlockNum := block.Number() epochBlockNum := block.Number()
writeOne := func() { writeOne := func() {
if err := WriteEpochBlockNumber(db, epoch, epochBlockNum); err != nil { if err := WriteEpochBlockNumber(db, epoch, epochBlockNum); err != nil {
ctxerror.Log15(utils.GetLogInstance().Error, err) utils.Logger().Error().Err(err).Msg("Failed to write epoch block number")
} }
} }
// A block may be a genesis block AND end-of-epoch block at the same time. // A block may be a genesis block AND end-of-epoch block at the same time.
@ -443,8 +443,7 @@ func WriteShardStateBytes(
"epoch", epoch, "epoch", epoch,
).WithCause(err) ).WithCause(err)
} }
utils.GetLogger().Info("wrote sharding state", utils.Logger().Info().Str("epoch", epoch.String()).Int("numShards", len(data)).Msg("wrote sharding state")
"epoch", epoch, "numShards", len(data))
return nil return nil
} }

@ -33,7 +33,7 @@ func ReadTxLookupEntry(db DatabaseReader, hash common.Hash) (common.Hash, uint64
} }
var entry TxLookupEntry var entry TxLookupEntry
if err := rlp.DecodeBytes(data, &entry); err != nil { if err := rlp.DecodeBytes(data, &entry); err != nil {
utils.GetLogger().Error("Invalid transaction lookup entry RLP", "hash", hash, "err", err) utils.Logger().Error().Err(err).Bytes("hash", hash.Bytes()).Msg("Invalid transaction lookup entry RLP")
return common.Hash{}, 0, 0 return common.Hash{}, 0, 0
} }
return entry.BlockHash, entry.BlockIndex, entry.Index return entry.BlockHash, entry.BlockIndex, entry.Index
@ -50,10 +50,10 @@ func WriteTxLookupEntries(db DatabaseWriter, block *types.Block) {
} }
data, err := rlp.EncodeToBytes(entry) data, err := rlp.EncodeToBytes(entry)
if err != nil { if err != nil {
utils.GetLogger().Crit("Failed to encode transaction lookup entry", "err", err) utils.Logger().Error().Err(err).Msg("Failed to encode transaction lookup entry")
} }
if err := db.Put(txLookupKey(tx.Hash()), data); err != nil { if err := db.Put(txLookupKey(tx.Hash()), data); err != nil {
utils.GetLogger().Crit("Failed to store transaction lookup entry", "err", err) utils.Logger().Error().Err(err).Msg("Failed to store transaction lookup entry")
} }
} }
} }
@ -72,7 +72,11 @@ func ReadTransaction(db DatabaseReader, hash common.Hash) (*types.Transaction, c
} }
body := ReadBody(db, blockHash, blockNumber) body := ReadBody(db, blockHash, blockNumber)
if body == nil || len(body.Transactions) <= int(txIndex) { if body == nil || len(body.Transactions) <= int(txIndex) {
utils.GetLogger().Error("Transaction referenced missing", "number", blockNumber, "hash", blockHash, "index", txIndex) utils.Logger().Error().
Uint64("number", blockNumber).
Str("hash", blockHash.Hex()).
Uint64("index", txIndex).
Msg("Transaction referenced missing")
return nil, common.Hash{}, 0, 0 return nil, common.Hash{}, 0, 0
} }
return body.Transactions[txIndex], blockHash, blockNumber, txIndex return body.Transactions[txIndex], blockHash, blockNumber, txIndex
@ -87,7 +91,11 @@ func ReadReceipt(db DatabaseReader, hash common.Hash) (*types.Receipt, common.Ha
} }
receipts := ReadReceipts(db, blockHash, blockNumber) receipts := ReadReceipts(db, blockHash, blockNumber)
if len(receipts) <= int(receiptIndex) { if len(receipts) <= int(receiptIndex) {
utils.GetLogger().Error("Receipt refereced missing", "number", blockNumber, "hash", blockHash, "index", receiptIndex) utils.Logger().Error().
Uint64("number", blockNumber).
Str("hash", blockHash.Hex()).
Uint64("index", receiptIndex).
Msg("Receipt refereced missing")
return nil, common.Hash{}, 0, 0 return nil, common.Hash{}, 0, 0
} }
return receipts[receiptIndex], blockHash, blockNumber, receiptIndex return receipts[receiptIndex], blockHash, blockNumber, receiptIndex
@ -103,6 +111,6 @@ func ReadBloomBits(db DatabaseReader, bit uint, section uint64, head common.Hash
// section and bit index. // section and bit index.
func WriteBloomBits(db DatabaseWriter, bit uint, section uint64, head common.Hash, bits []byte) { func WriteBloomBits(db DatabaseWriter, bit uint, section uint64, head common.Hash, bits []byte) {
if err := db.Put(bloomBitsKey(bit, section, head), bits); err != nil { if err := db.Put(bloomBitsKey(bit, section, head), bits); err != nil {
utils.GetLogger().Crit("Failed to store bloom bits", "err", err) utils.Logger().Error().Err(err).Msg("Failed to store bloom bits")
} }
} }

@ -40,7 +40,7 @@ func ReadDatabaseVersion(db DatabaseReader) int {
func WriteDatabaseVersion(db DatabaseWriter, version int) { func WriteDatabaseVersion(db DatabaseWriter, version int) {
enc, _ := rlp.EncodeToBytes(version) enc, _ := rlp.EncodeToBytes(version)
if err := db.Put(databaseVerisionKey, enc); err != nil { if err := db.Put(databaseVerisionKey, enc); err != nil {
utils.GetLogger().Crit("Failed to store the database version", "err", err) utils.Logger().Error().Err(err).Msg("Failed to store the database version")
} }
} }
@ -52,7 +52,7 @@ func ReadChainConfig(db DatabaseReader, hash common.Hash) *params.ChainConfig {
} }
var config params.ChainConfig var config params.ChainConfig
if err := json.Unmarshal(data, &config); err != nil { if err := json.Unmarshal(data, &config); err != nil {
utils.GetLogger().Error("Invalid chain config JSON", "hash", hash, "err", err) utils.Logger().Error().Err(err).Bytes("hash", hash.Bytes()).Msg("Invalid chain config JSON")
return nil return nil
} }
return &config return &config
@ -65,10 +65,10 @@ func WriteChainConfig(db DatabaseWriter, hash common.Hash, cfg *params.ChainConf
} }
data, err := json.Marshal(cfg) data, err := json.Marshal(cfg)
if err != nil { if err != nil {
utils.GetLogger().Crit("Failed to JSON encode chain config", "err", err) utils.Logger().Error().Err(err).Msg("Failed to JSON encode chain config")
} }
if err := db.Put(configKey(hash), data); err != nil { if err := db.Put(configKey(hash), data); err != nil {
utils.GetLogger().Crit("Failed to store chain config", "err", err) utils.Logger().Error().Err(err).Msg("Failed to store chain config")
} }
} }
@ -83,7 +83,7 @@ func ReadPreimage(db DatabaseReader, hash common.Hash) []byte {
func WritePreimages(db DatabaseWriter, number uint64, preimages map[common.Hash][]byte) { func WritePreimages(db DatabaseWriter, number uint64, preimages map[common.Hash][]byte) {
for hash, preimage := range preimages { for hash, preimage := range preimages {
if err := db.Put(preimageKey(hash), preimage); err != nil { if err := db.Put(preimageKey(hash), preimage); err != nil {
utils.GetLogger().Crit("Failed to store trie preimage", "err", err) utils.Logger().Error().Err(err).Msg("Failed to store trie preimage")
} }
} }
preimageCounter.Inc(int64(len(preimages))) preimageCounter.Inc(int64(len(preimages)))

@ -2,6 +2,7 @@ package core
import ( import (
"encoding/binary" "encoding/binary"
"encoding/hex"
"errors" "errors"
"math/big" "math/big"
"math/rand" "math/rand"
@ -13,22 +14,14 @@ import (
"github.com/harmony-one/harmony/contracts/structs" "github.com/harmony-one/harmony/contracts/structs"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
common2 "github.com/harmony-one/harmony/internal/common" common2 "github.com/harmony-one/harmony/internal/common"
shardingconfig "github.com/harmony-one/harmony/internal/configs/sharding"
"github.com/harmony-one/harmony/internal/ctxerror" "github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/genesis"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
) )
const ( const (
// GenesisEpoch is the number of the genesis epoch. // GenesisEpoch is the number of the genesis epoch.
GenesisEpoch = 0 GenesisEpoch = 0
// FirstEpoch is the number of the first epoch.
FirstEpoch = 1
// GenesisShardNum is the number of shard at genesis
GenesisShardNum = 4
// GenesisShardSize is the size of each shard at genesis
GenesisShardSize = 150
// GenesisShardHarmonyNodes is the number of harmony node at each shard
GenesisShardHarmonyNodes = 112
// CuckooRate is the percentage of nodes getting reshuffled in the second step of cuckoo resharding. // CuckooRate is the percentage of nodes getting reshuffled in the second step of cuckoo resharding.
CuckooRate = 0.1 CuckooRate = 0.1
) )
@ -65,7 +58,7 @@ func (ss *ShardingState) assignNewNodes(newNodeList []types.NodeID) {
if id < len(ss.shardState) { if id < len(ss.shardState) {
ss.shardState[id].NodeList = append(ss.shardState[id].NodeList, nid) ss.shardState[id].NodeList = append(ss.shardState[id].NodeList, nid)
} else { } else {
utils.GetLogInstance().Error("assignNewNodes", "index out of range", len(ss.shardState), "id", id) utils.Logger().Error().Int("id", id).Int("shardState Count", len(ss.shardState)).Msg("assignNewNodes index out of range")
} }
} }
} }
@ -123,7 +116,7 @@ func (ss *ShardingState) Reshard(newNodeList []types.NodeID, percent float64) {
// Put leader back // Put leader back
if len(leaders) < ss.numShards { if len(leaders) < ss.numShards {
utils.GetLogInstance().Error("Not enough leaders to assign to shards") utils.Logger().Error().Msg("Not enough leaders to assign to shards")
} }
for i := 0; i < ss.numShards; i++ { for i := 0; i < ss.numShards; i++ {
ss.shardState[i].NodeList = append([]types.NodeID{leaders[i]}, ss.shardState[i].NodeList...) ss.shardState[i].NodeList = append([]types.NodeID{leaders[i]}, ss.shardState[i].NodeList...)
@ -142,20 +135,22 @@ func Shuffle(list []types.NodeID) {
} }
// GetBlockNumberFromEpoch calculates the block number where epoch sharding information is stored // GetBlockNumberFromEpoch calculates the block number where epoch sharding information is stored
// TODO lc - use ShardingSchedule function
func GetBlockNumberFromEpoch(epoch uint64) uint64 { func GetBlockNumberFromEpoch(epoch uint64) uint64 {
number := epoch * uint64(BlocksPerEpoch) // currently we use the first block in each epoch number := epoch * ShardingSchedule.BlocksPerEpoch() // currently we use the first block in each epoch
return number return number
} }
// GetLastBlockNumberFromEpoch calculates the last block number for the given // GetLastBlockNumberFromEpoch calculates the last block number for the given
// epoch. TODO ek – this is a temp hack. // epoch. TODO ek – this is a temp hack.
// TODO lc - use ShardingSchedule function
func GetLastBlockNumberFromEpoch(epoch uint64) uint64 { func GetLastBlockNumberFromEpoch(epoch uint64) uint64 {
return (epoch+1)*BlocksPerEpoch - 1 return (epoch+1)*ShardingSchedule.BlocksPerEpoch() - 1
} }
// GetEpochFromBlockNumber calculates the epoch number the block belongs to // GetEpochFromBlockNumber calculates the epoch number the block belongs to
func GetEpochFromBlockNumber(blockNumber uint64) uint64 { func GetEpochFromBlockNumber(blockNumber uint64) uint64 {
return blockNumber / uint64(BlocksPerEpoch) return ShardingSchedule.CalcEpochNumber(blockNumber).Uint64()
} }
// GetShardingStateFromBlockChain will retrieve random seed and shard map from beacon chain for given a epoch // GetShardingStateFromBlockChain will retrieve random seed and shard map from beacon chain for given a epoch
@ -191,7 +186,7 @@ func CalculateNewShardState(
WithCause(err) WithCause(err)
} }
newNodeList := ss.UpdateShardingState(stakeInfo) newNodeList := ss.UpdateShardingState(stakeInfo)
utils.GetLogInstance().Info("Cuckoo Rate", "percentage", CuckooRate) utils.Logger().Info().Float64("percentage", CuckooRate).Msg("Cuckoo Rate")
ss.Reshard(newNodeList, CuckooRate) ss.Reshard(newNodeList, CuckooRate)
return ss.shardState, nil return ss.shardState, nil
} }
@ -223,38 +218,82 @@ func (ss *ShardingState) UpdateShardingState(stakeInfo *map[common.Address]*stru
return newAddresses return newAddresses
} }
// TODO ek – shardingSchedule should really be part of a general-purpose network
// configuration. We are OK for the time being,
// until the day we should let one node process join multiple networks.
// ShardingSchedule is the sharding configuration schedule.
// Depends on the type of the network. Defaults to the mainnet schedule.
var ShardingSchedule shardingconfig.Schedule = shardingconfig.MainnetSchedule
// GetInitShardState returns the initial shard state at genesis. // GetInitShardState returns the initial shard state at genesis.
func GetInitShardState() types.ShardState { func GetInitShardState() types.ShardState {
utils.GetLogInstance().Info("Generating Genesis Shard State.") return GetShardState(big.NewInt(GenesisEpoch))
}
// GetShardState returns the shard state based on epoch number
func GetShardState(epoch *big.Int) types.ShardState {
utils.Logger().Info().Int64("epoch", epoch.Int64()).Msg("Get Shard State of Epoch.")
shardingConfig := ShardingSchedule.InstanceForEpoch(epoch)
shardNum := int(shardingConfig.NumShards())
shardHarmonyNodes := shardingConfig.NumHarmonyOperatedNodesPerShard()
shardSize := shardingConfig.NumNodesPerShard()
hmyAccounts := shardingConfig.HmyAccounts()
fnAccounts := shardingConfig.FnAccounts()
shardState := types.ShardState{} shardState := types.ShardState{}
for i := 0; i < GenesisShardNum; i++ { for i := 0; i < shardNum; i++ {
com := types.Committee{ShardID: uint32(i)} com := types.Committee{ShardID: uint32(i)}
for j := 0; j < GenesisShardHarmonyNodes; j++ { for j := 0; j < shardHarmonyNodes; j++ {
index := i + j*GenesisShardNum // The initial account to use for genesis nodes index := i + j*shardNum // The initial account to use for genesis nodes
pub := &bls.PublicKey{} pub := &bls.PublicKey{}
pub.DeserializeHexStr(genesis.HarmonyAccounts[index].BlsPublicKey) pub.DeserializeHexStr(hmyAccounts[index].BlsPublicKey)
pubKey := types.BlsPublicKey{} pubKey := types.BlsPublicKey{}
pubKey.FromLibBLSPublicKey(pub) pubKey.FromLibBLSPublicKey(pub)
// TODO: directly read address for bls too // TODO: directly read address for bls too
curNodeID := types.NodeID{common2.ParseAddr(genesis.HarmonyAccounts[index].Address), pubKey} curNodeID := types.NodeID{common2.ParseAddr(hmyAccounts[index].Address), pubKey}
com.NodeList = append(com.NodeList, curNodeID) com.NodeList = append(com.NodeList, curNodeID)
} }
// add FN runner's key // add FN runner's key
for j := GenesisShardHarmonyNodes; j < GenesisShardSize; j++ { for j := shardHarmonyNodes; j < shardSize; j++ {
index := i + (j-GenesisShardHarmonyNodes)*GenesisShardNum index := i + (j-shardHarmonyNodes)*shardNum
pub := &bls.PublicKey{} pub := &bls.PublicKey{}
pub.DeserializeHexStr(genesis.FoundationalNodeAccounts[index].BlsPublicKey) pub.DeserializeHexStr(fnAccounts[index].BlsPublicKey)
pubKey := types.BlsPublicKey{} pubKey := types.BlsPublicKey{}
pubKey.FromLibBLSPublicKey(pub) pubKey.FromLibBLSPublicKey(pub)
// TODO: directly read address for bls too // TODO: directly read address for bls too
curNodeID := types.NodeID{common2.ParseAddr(genesis.FoundationalNodeAccounts[index].Address), pubKey} curNodeID := types.NodeID{common2.ParseAddr(fnAccounts[index].Address), pubKey}
com.NodeList = append(com.NodeList, curNodeID) com.NodeList = append(com.NodeList, curNodeID)
} }
shardState = append(shardState, com) shardState = append(shardState, com)
} }
return shardState return shardState
} }
// GetPublicKeys returns the publickeys given epoch and shardID
func GetPublicKeys(epoch *big.Int, shardID uint32) []*bls.PublicKey {
shardState := GetShardState(epoch)
// Update validator public keys
committee := shardState.FindCommitteeByID(shardID)
if committee == nil {
utils.Logger().Warn().Uint32("shardID", shardID).Uint64("epoch", epoch.Uint64()).Msg("Cannot find committee")
return nil
}
pubKeys := []*bls.PublicKey{}
for _, node := range committee.NodeList {
pubKey := &bls.PublicKey{}
pubKeyBytes := node.BlsPublicKey[:]
err := pubKey.Deserialize(pubKeyBytes)
if err != nil {
utils.Logger().Warn().Str("pubKeyBytes", hex.EncodeToString(pubKeyBytes)).Msg("Cannot Deserialize pubKey")
return nil
}
pubKeys = append(pubKeys, pubKey)
}
return pubKeys
}

@ -214,7 +214,7 @@ func (st *StateTransition) TransitionDb() (ret []byte, usedGas uint64, failed bo
ret, st.gas, vmerr = evm.Call(sender, st.to(), st.data, st.gas, st.value) ret, st.gas, vmerr = evm.Call(sender, st.to(), st.data, st.gas, st.value)
} }
if vmerr != nil { if vmerr != nil {
utils.GetLogger().Debug("VM returned with error", "err", vmerr) utils.Logger().Debug().Err(vmerr).Msg("VM returned with error")
// The only possible consensus-error would be if there wasn't // The only possible consensus-error would be if there wasn't
// sufficient balance to make the transfer happen. The first // sufficient balance to make the transfer happen. The first
// balance transfer may never fail. // balance transfer may never fail.

@ -83,7 +83,7 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error {
loadBatch := func(txs types.Transactions) { loadBatch := func(txs types.Transactions) {
for _, err := range add(txs) { for _, err := range add(txs) {
if err != nil { if err != nil {
utils.GetLogger().Debug("Failed to add journaled transaction", "err", err) utils.Logger().Error().Err(err).Msg("Failed to add journaled transaction")
dropped++ dropped++
} }
} }
@ -112,7 +112,10 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error {
batch = batch[:0] batch = batch[:0]
} }
} }
utils.GetLogger().Info("Loaded local transaction journal", "transactions", total, "dropped", dropped) utils.Logger().Info().
Int("transactions", total).
Int("dropped", dropped).
Msg("Loaded local transaction journal")
return failure return failure
} }
@ -161,7 +164,10 @@ func (journal *txJournal) rotate(all map[common.Address]types.Transactions) erro
return err return err
} }
journal.writer = sink journal.writer = sink
utils.GetLogger().Info("Regenerated local transaction journal", "transactions", journaled, "accounts", len(all)) utils.Logger().Info().
Int("transactions", journaled).
Int("accounts", len(all)).
Msg("Regenerated local transaction journal")
return nil return nil
} }

@ -486,7 +486,7 @@ func (l *txPricedList) Underpriced(tx *types.Transaction, local *accountSet) boo
} }
// Check if the transaction is underpriced or not // Check if the transaction is underpriced or not
if len(*l.items) == 0 { if len(*l.items) == 0 {
utils.GetLogger().Error("Pricing query for empty pool") // This cannot happen, print to catch programming errors utils.Logger().Error().Msg("Pricing query for empty pool") // This cannot happen, print to catch programming errors
return false return false
} }
cheapest := []*types.Transaction(*l.items)[0] cheapest := []*types.Transaction(*l.items)[0]

@ -164,15 +164,24 @@ var DefaultTxPoolConfig = TxPoolConfig{
func (config *TxPoolConfig) sanitize() TxPoolConfig { func (config *TxPoolConfig) sanitize() TxPoolConfig {
conf := *config conf := *config
if conf.Rejournal < time.Second { if conf.Rejournal < time.Second {
utils.GetLogger().Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) utils.Logger().Warn().
Dur("provided", conf.Rejournal).
Dur("updated", time.Second).
Msg("Sanitizing invalid txpool journal time")
conf.Rejournal = time.Second conf.Rejournal = time.Second
} }
if conf.PriceLimit < 1 { if conf.PriceLimit < 1 {
utils.GetLogger().Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) utils.Logger().Warn().
Uint64("provided", conf.PriceLimit).
Uint64("updated", DefaultTxPoolConfig.PriceLimit).
Msg("Sanitizing invalid txpool price limit")
conf.PriceLimit = DefaultTxPoolConfig.PriceLimit conf.PriceLimit = DefaultTxPoolConfig.PriceLimit
} }
if conf.PriceBump < 1 { if conf.PriceBump < 1 {
utils.GetLogger().Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) utils.Logger().Warn().
Uint64("provided", conf.PriceBump).
Uint64("updated", DefaultTxPoolConfig.PriceBump).
Msg("Sanitizing invalid txpool price bump")
conf.PriceBump = DefaultTxPoolConfig.PriceBump conf.PriceBump = DefaultTxPoolConfig.PriceBump
} }
return conf return conf
@ -236,7 +245,7 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block
} }
pool.locals = newAccountSet(pool.signer) pool.locals = newAccountSet(pool.signer)
for _, addr := range config.Locals { for _, addr := range config.Locals {
utils.GetLogger().Info("Setting new local account", "address", addr) utils.Logger().Info().Interface("address", addr).Msg("Setting new local account")
pool.locals.add(addr) pool.locals.add(addr)
} }
pool.priced = newTxPricedList(pool.all) pool.priced = newTxPricedList(pool.all)
@ -247,10 +256,10 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block
pool.journal = newTxJournal(config.Journal) pool.journal = newTxJournal(config.Journal)
if err := pool.journal.load(pool.AddLocals); err != nil { if err := pool.journal.load(pool.AddLocals); err != nil {
utils.GetLogger().Warn("Failed to load transaction journal", "err", err) utils.Logger().Warn().Err(err).Msg("Failed to load transaction journal")
} }
if err := pool.journal.rotate(pool.local()); err != nil { if err := pool.journal.rotate(pool.local()); err != nil {
utils.GetLogger().Warn("Failed to rotate transaction journal", "err", err) utils.Logger().Warn().Err(err).Msg("Failed to rotate transaction journal")
} }
} }
// Subscribe events from blockchain // Subscribe events from blockchain
@ -311,7 +320,11 @@ func (pool *TxPool) loop() {
pool.mu.RUnlock() pool.mu.RUnlock()
if pending != prevPending || queued != prevQueued || stales != prevStales { if pending != prevPending || queued != prevQueued || stales != prevStales {
utils.GetLogger().Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) utils.Logger().Debug().
Int("executable", pending).
Int("queued", queued).
Int("stales", stales).
Msg("Transaction pool status report")
prevPending, prevQueued, prevStales = pending, queued, stales prevPending, prevQueued, prevStales = pending, queued, stales
} }
@ -337,7 +350,7 @@ func (pool *TxPool) loop() {
if pool.journal != nil { if pool.journal != nil {
pool.mu.Lock() pool.mu.Lock()
if err := pool.journal.rotate(pool.local()); err != nil { if err := pool.journal.rotate(pool.local()); err != nil {
utils.GetLogger().Warn("Failed to rotate local tx journal", "err", err) utils.Logger().Warn().Err(err).Msg("Failed to rotate local tx journal")
} }
pool.mu.Unlock() pool.mu.Unlock()
} }
@ -366,7 +379,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) {
newNum := newHead.Number.Uint64() newNum := newHead.Number.Uint64()
if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
utils.GetLogger().Debug("Skipping deep transaction reorg", "depth", depth) utils.Logger().Debug().Uint64("depth", depth).Msg("Skipping deep transaction reorg")
} else { } else {
// Reorg seems shallow enough to pull in all transactions into memory // Reorg seems shallow enough to pull in all transactions into memory
var discarded, included types.Transactions var discarded, included types.Transactions
@ -378,26 +391,38 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) {
for rem.NumberU64() > add.NumberU64() { for rem.NumberU64() > add.NumberU64() {
discarded = append(discarded, rem.Transactions()...) discarded = append(discarded, rem.Transactions()...)
if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
utils.GetLogger().Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) utils.Logger().Error().
Str("block", oldHead.Number.String()).
Str("hash", oldHead.Hash().Hex()).
Msg("Unrooted old chain seen by tx pool")
return return
} }
} }
for add.NumberU64() > rem.NumberU64() { for add.NumberU64() > rem.NumberU64() {
included = append(included, add.Transactions()...) included = append(included, add.Transactions()...)
if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
utils.GetLogger().Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) utils.Logger().Error().
Str("block", newHead.Number.String()).
Str("hash", newHead.Hash().Hex()).
Msg("Unrooted new chain seen by tx pool")
return return
} }
} }
for rem.Hash() != add.Hash() { for rem.Hash() != add.Hash() {
discarded = append(discarded, rem.Transactions()...) discarded = append(discarded, rem.Transactions()...)
if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
utils.GetLogger().Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) utils.Logger().Error().
Str("block", oldHead.Number.String()).
Str("hash", oldHead.Hash().Hex()).
Msg("Unrooted old chain seen by tx pool")
return return
} }
included = append(included, add.Transactions()...) included = append(included, add.Transactions()...)
if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
utils.GetLogger().Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) utils.Logger().Error().
Str("block", newHead.Number.String()).
Str("hash", newHead.Hash().Hex()).
Msg("Unrooted new chain seen by tx pool")
return return
} }
} }
@ -410,7 +435,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) {
} }
statedb, err := pool.chain.StateAt(newHead.Root) statedb, err := pool.chain.StateAt(newHead.Root)
if err != nil { if err != nil {
utils.GetLogger().Error("Failed to reset txpool state", "err", err) utils.Logger().Error().Err(err).Msg("Failed to reset txpool state")
return return
} }
pool.currentState = statedb pool.currentState = statedb
@ -418,7 +443,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) {
pool.currentMaxGas = newHead.GasLimit pool.currentMaxGas = newHead.GasLimit
// Inject any transactions discarded due to reorgs // Inject any transactions discarded due to reorgs
utils.GetLogger().Debug("Reinjecting stale transactions", "count", len(reinject)) utils.Logger().Debug().Int("count", len(reinject)).Msg("Reinjecting stale transactions")
//senderCacher.recover(pool.signer, reinject) //senderCacher.recover(pool.signer, reinject)
pool.addTxsLocked(reinject, false) pool.addTxsLocked(reinject, false)
@ -450,7 +475,7 @@ func (pool *TxPool) Stop() {
if pool.journal != nil { if pool.journal != nil {
pool.journal.close() pool.journal.close()
} }
utils.GetLogger().Info("Transaction pool stopped") utils.Logger().Info().Msg("Transaction pool stopped")
} }
// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and
@ -477,7 +502,7 @@ func (pool *TxPool) SetGasPrice(price *big.Int) {
for _, tx := range pool.priced.Cap(price, pool.locals) { for _, tx := range pool.priced.Cap(price, pool.locals) {
pool.removeTx(tx.Hash(), false) pool.removeTx(tx.Hash(), false)
} }
utils.GetLogger().Info("Transaction pool price threshold updated", "price", price) utils.Logger().Info().Str("price", price.String()).Msg("Transaction pool price threshold updated")
} }
// State returns the virtual managed state of the transaction pool. // State returns the virtual managed state of the transaction pool.
@ -620,15 +645,16 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
// whitelisted, preventing any associated transaction from being dropped out of // whitelisted, preventing any associated transaction from being dropped out of
// the pool due to pricing constraints. // the pool due to pricing constraints.
func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
logger := utils.Logger().With().Stack().Logger()
// If the transaction is already known, discard it // If the transaction is already known, discard it
hash := tx.Hash() hash := tx.Hash()
if pool.all.Get(hash) != nil { if pool.all.Get(hash) != nil {
utils.GetLogger().Trace("Discarding already known transaction", "hash", hash) logger.Warn().Str("hash", hash.Hex()).Msg("Discarding already known transaction")
return false, fmt.Errorf("known transaction: %x", hash) return false, fmt.Errorf("known transaction: %x", hash)
} }
// If the transaction fails basic validation, discard it // If the transaction fails basic validation, discard it
if err := pool.validateTx(tx, local); err != nil { if err := pool.validateTx(tx, local); err != nil {
utils.GetLogger().Trace("Discarding invalid transaction", "hash", hash, "err", err) logger.Warn().Err(err).Str("hash", hash.Hex()).Msg("Discarding invalid transaction")
invalidTxCounter.Inc(1) invalidTxCounter.Inc(1)
return false, err return false, err
} }
@ -636,14 +662,20 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
if uint64(pool.all.Count()) >= pool.config.GlobalSlots+pool.config.GlobalQueue { if uint64(pool.all.Count()) >= pool.config.GlobalSlots+pool.config.GlobalQueue {
// If the new transaction is underpriced, don't accept it // If the new transaction is underpriced, don't accept it
if !local && pool.priced.Underpriced(tx, pool.locals) { if !local && pool.priced.Underpriced(tx, pool.locals) {
utils.GetLogger().Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice()) logger.Warn().
Str("hash", hash.Hex()).
Str("price", tx.GasPrice().String()).
Msg("Discarding underpriced transaction")
underpricedTxCounter.Inc(1) underpricedTxCounter.Inc(1)
return false, ErrUnderpriced return false, ErrUnderpriced
} }
// New transaction is better than our worse ones, make room for it // New transaction is better than our worse ones, make room for it
drop := pool.priced.Discard(pool.all.Count()-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals) drop := pool.priced.Discard(pool.all.Count()-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals)
for _, tx := range drop { for _, tx := range drop {
utils.GetLogger().Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice()) logger.Warn().
Str("hash", tx.Hash().Hex()).
Str("price", tx.GasPrice().String()).
Msg("Discarding freshly underpriced transaction")
underpricedTxCounter.Inc(1) underpricedTxCounter.Inc(1)
pool.removeTx(tx.Hash(), false) pool.removeTx(tx.Hash(), false)
} }
@ -667,7 +699,11 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
pool.priced.Put(tx) pool.priced.Put(tx)
pool.journalTx(from, tx) pool.journalTx(from, tx)
utils.GetLogger().Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) logger.Warn().
Str("hash", tx.Hash().Hex()).
Interface("from", from).
Interface("to", tx.To()).
Msg("Pooled new executable transaction")
// We've directly injected a replacement transaction, notify subsystems // We've directly injected a replacement transaction, notify subsystems
// go pool.txFeed.Send(NewTxsEvent{types.Transactions{tx}}) // go pool.txFeed.Send(NewTxsEvent{types.Transactions{tx}})
@ -682,13 +718,17 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
// Mark local addresses and journal local transactions // Mark local addresses and journal local transactions
if local { if local {
if !pool.locals.contains(from) { if !pool.locals.contains(from) {
utils.GetLogger().Info("Setting new local account", "address", from) utils.Logger().Info().Interface("address", from).Msg("Setting new local account")
pool.locals.add(from) pool.locals.add(from)
} }
} }
pool.journalTx(from, tx) pool.journalTx(from, tx)
utils.GetLogger().Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) logger.Warn().
Str("hash", hash.Hex()).
Interface("from", from).
Interface("to", tx.To()).
Msg("Pooled new future transaction")
return replace, nil return replace, nil
} }
@ -736,7 +776,7 @@ func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) {
return return
} }
if err := pool.journal.insert(tx); err != nil { if err := pool.journal.insert(tx); err != nil {
utils.GetLogger().Warn("Failed to journal local transaction", "err", err) utils.Logger().Warn().Err(err).Msg("Failed to journal local transaction")
} }
} }
@ -933,6 +973,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
func (pool *TxPool) promoteExecutables(accounts []common.Address) { func (pool *TxPool) promoteExecutables(accounts []common.Address) {
// Track the promoted transactions to broadcast them at once // Track the promoted transactions to broadcast them at once
var promoted []*types.Transaction var promoted []*types.Transaction
logger := utils.Logger().With().Stack().Logger()
// Gather all the accounts potentially needing updates // Gather all the accounts potentially needing updates
if accounts == nil { if accounts == nil {
@ -950,7 +991,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
// Drop all transactions that are deemed too old (low nonce) // Drop all transactions that are deemed too old (low nonce)
for _, tx := range list.Forward(pool.currentState.GetNonce(addr)) { for _, tx := range list.Forward(pool.currentState.GetNonce(addr)) {
hash := tx.Hash() hash := tx.Hash()
utils.GetLogger().Trace("Removed old queued transaction", "hash", hash) logger.Warn().Str("hash", hash.Hex()).Msg("Removed old queued transaction")
pool.all.Remove(hash) pool.all.Remove(hash)
pool.priced.Removed() pool.priced.Removed()
} }
@ -958,7 +999,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
for _, tx := range drops { for _, tx := range drops {
hash := tx.Hash() hash := tx.Hash()
utils.GetLogger().Trace("Removed unpayable queued transaction", "hash", hash) logger.Warn().Str("hash", hash.Hex()).Msg("Removed unpayable queued transaction")
pool.all.Remove(hash) pool.all.Remove(hash)
pool.priced.Removed() pool.priced.Removed()
queuedNofundsCounter.Inc(1) queuedNofundsCounter.Inc(1)
@ -967,7 +1008,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) { for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) {
hash := tx.Hash() hash := tx.Hash()
if pool.promoteTx(addr, hash, tx) { if pool.promoteTx(addr, hash, tx) {
utils.GetLogger().Trace("Promoting queued transaction", "hash", hash) logger.Warn().Str("hash", hash.Hex()).Msg("Promoting queued transaction")
promoted = append(promoted, tx) promoted = append(promoted, tx)
} }
} }
@ -978,7 +1019,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
pool.all.Remove(hash) pool.all.Remove(hash)
pool.priced.Removed() pool.priced.Removed()
queuedRateLimitCounter.Inc(1) queuedRateLimitCounter.Inc(1)
utils.GetLogger().Trace("Removed cap-exceeding queued transaction", "hash", hash) logger.Warn().Str("hash", hash.Hex()).Msg("Removed cap-exceeding queued transaction")
} }
} }
// Delete the entire queue entry if it became empty. // Delete the entire queue entry if it became empty.
@ -1031,7 +1072,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
if nonce := tx.Nonce(); pool.pendingState.GetNonce(offenders[i]) > nonce { if nonce := tx.Nonce(); pool.pendingState.GetNonce(offenders[i]) > nonce {
pool.pendingState.SetNonce(offenders[i], nonce) pool.pendingState.SetNonce(offenders[i], nonce)
} }
utils.GetLogger().Trace("Removed fairness-exceeding pending transaction", "hash", hash) logger.Warn().Str("hash", hash.Hex()).Msg("Removed fairness-exceeding pending transaction")
} }
pending-- pending--
} }
@ -1053,7 +1094,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
if nonce := tx.Nonce(); pool.pendingState.GetNonce(addr) > nonce { if nonce := tx.Nonce(); pool.pendingState.GetNonce(addr) > nonce {
pool.pendingState.SetNonce(addr, nonce) pool.pendingState.SetNonce(addr, nonce)
} }
utils.GetLogger().Trace("Removed fairness-exceeding pending transaction", "hash", hash) logger.Warn().Str("hash", hash.Hex()).Msg("Removed fairness-exceeding pending transaction")
} }
pending-- pending--
} }
@ -1108,13 +1149,15 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
// are moved back into the future queue. // are moved back into the future queue.
func (pool *TxPool) demoteUnexecutables() { func (pool *TxPool) demoteUnexecutables() {
// Iterate over all accounts and demote any non-executable transactions // Iterate over all accounts and demote any non-executable transactions
logger := utils.Logger().With().Stack().Logger()
for addr, list := range pool.pending { for addr, list := range pool.pending {
nonce := pool.currentState.GetNonce(addr) nonce := pool.currentState.GetNonce(addr)
// Drop all transactions that are deemed too old (low nonce) // Drop all transactions that are deemed too old (low nonce)
for _, tx := range list.Forward(nonce) { for _, tx := range list.Forward(nonce) {
hash := tx.Hash() hash := tx.Hash()
utils.GetLogger().Trace("Removed old pending transaction", "hash", hash) logger.Warn().Str("hash", hash.Hex()).Msg("Removed old pending transaction")
pool.all.Remove(hash) pool.all.Remove(hash)
pool.priced.Removed() pool.priced.Removed()
} }
@ -1122,21 +1165,21 @@ func (pool *TxPool) demoteUnexecutables() {
drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
for _, tx := range drops { for _, tx := range drops {
hash := tx.Hash() hash := tx.Hash()
utils.GetLogger().Trace("Removed unpayable pending transaction", "hash", hash) logger.Warn().Str("hash", hash.Hex()).Msg("Removed unpayable pending transaction")
pool.all.Remove(hash) pool.all.Remove(hash)
pool.priced.Removed() pool.priced.Removed()
pendingNofundsCounter.Inc(1) pendingNofundsCounter.Inc(1)
} }
for _, tx := range invalids { for _, tx := range invalids {
hash := tx.Hash() hash := tx.Hash()
utils.GetLogger().Trace("Demoting pending transaction", "hash", hash) logger.Warn().Str("hash", hash.Hex()).Msg("Demoting pending transaction")
pool.enqueueTx(hash, tx) pool.enqueueTx(hash, tx)
} }
// If there's a gap in front, alert (should never happen) and postpone all transactions // If there's a gap in front, alert (should never happen) and postpone all transactions
if list.Len() > 0 && list.txs.Get(nonce) == nil { if list.Len() > 0 && list.txs.Get(nonce) == nil {
for _, tx := range list.Cap(0) { for _, tx := range list.Cap(0) {
hash := tx.Hash() hash := tx.Hash()
utils.GetLogger().Error("Demoting invalidated transaction", "hash", hash) logger.Error().Str("hash", hash.Hex()).Msg("Demoting invalidated transaction")
pool.enqueueTx(hash, tx) pool.enqueueTx(hash, tx)
} }
} }

@ -29,8 +29,8 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
ethtypes "github.com/ethereum/go-ethereum/core/types" ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/rs/zerolog"
"golang.org/x/crypto/sha3" "golang.org/x/crypto/sha3"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
@ -69,7 +69,7 @@ func (n *BlockNonce) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("BlockNonce", input, n[:]) return hexutil.UnmarshalFixedText("BlockNonce", input, n[:])
} }
// Header represents a block header in the Ethereum blockchain. // Header represents a block header in the Harmony blockchain.
type Header struct { type Header struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"` ParentHash common.Hash `json:"parentHash" gencodec:"required"`
Coinbase common.Address `json:"miner" gencodec:"required"` Coinbase common.Address `json:"miner" gencodec:"required"`
@ -118,13 +118,15 @@ func (h *Header) Size() common.StorageSize {
} }
// Logger returns a sub-logger with block contexts added. // Logger returns a sub-logger with block contexts added.
func (h *Header) Logger(logger log.Logger) log.Logger { func (h *Header) Logger(logger *zerolog.Logger) *zerolog.Logger {
return logger.New( nlogger := logger.
"blockHash", h.Hash(), With().
"blockShard", h.ShardID, Str("blockHash", h.Hash().Hex()).
"blockEpoch", h.Epoch, Uint32("blockShard", h.ShardID).
"blockNumber", h.Number, Str("blockEpoch", h.Epoch.String()).
) Str("blockNumber", h.Number.String()).
Logger()
return &nlogger
} }
// GetShardState returns the deserialized shard state object. // GetShardState returns the deserialized shard state object.
@ -175,9 +177,10 @@ type Block struct {
// SetLastCommitSig sets the last block's commit group signature. // SetLastCommitSig sets the last block's commit group signature.
func (b *Block) SetLastCommitSig(sig []byte, signers []byte) { func (b *Block) SetLastCommitSig(sig []byte, signers []byte) {
if len(sig) != len(b.header.LastCommitSignature) { if len(sig) != len(b.header.LastCommitSignature) {
utils.GetLogInstance().Warn("SetLastCommitSig: sig size mismatch", utils.Logger().Warn().
"srcLen", len(sig), Int("srcLen", len(sig)).
"dstLen", len(b.header.LastCommitSignature)) Int("dstLen", len(b.header.LastCommitSignature)).
Msg("SetLastCommitSig: sig size mismatch")
} }
copy(b.header.LastCommitSignature[:], sig[:]) copy(b.header.LastCommitSignature[:], sig[:])
b.header.LastCommitBitmap = append(signers[:0:0], signers...) b.header.LastCommitBitmap = append(signers[:0:0], signers...)
@ -436,7 +439,7 @@ func (b *Block) Hash() common.Hash {
//if hash := b.hash.Load(); hash != nil { //if hash := b.hash.Load(); hash != nil {
// return hash.(common.Hash) // return hash.(common.Hash)
//} //}
//b.Logger(utils.GetLogger()).Debug("finalizing and caching block hash") // b.Logger(utils.Logger()).Debug().Msg("finalizing and caching block hash")
v := b.header.Hash() v := b.header.Hash()
b.hash.Store(v) b.hash.Store(v)
return v return v
@ -507,6 +510,6 @@ func (b *Block) AddShardState(shardState ShardState) error {
} }
// Logger returns a sub-logger with block contexts added. // Logger returns a sub-logger with block contexts added.
func (b *Block) Logger(logger log.Logger) log.Logger { func (b *Block) Logger(logger *zerolog.Logger) *zerolog.Logger {
return b.header.Logger(logger) return b.header.Logger(logger)
} }

@ -50,7 +50,7 @@ type txdata struct {
Price *big.Int `json:"gasPrice" gencodec:"required"` Price *big.Int `json:"gasPrice" gencodec:"required"`
GasLimit uint64 `json:"gas" gencodec:"required"` GasLimit uint64 `json:"gas" gencodec:"required"`
ShardID uint32 `json:"shardID" gencodec:"required"` ShardID uint32 `json:"shardID" gencodec:"required"`
ToShardID uint32 `json:"toShardID" rlp:"nil"` // for cross-shard tx's destination shard ID; nil means intra-shard tx ToShardID uint32 `json:"toShardID" gencodec:"required"`
Recipient *common.Address `json:"to" rlp:"nil"` // nil means contract creation Recipient *common.Address `json:"to" rlp:"nil"` // nil means contract creation
Amount *big.Int `json:"value" gencodec:"required"` Amount *big.Int `json:"value" gencodec:"required"`
Payload []byte `json:"input" gencodec:"required"` Payload []byte `json:"input" gencodec:"required"`

@ -218,6 +218,7 @@ func (fs FrontierSigner) Hash(tx *Transaction) common.Hash {
tx.data.Price, tx.data.Price,
tx.data.GasLimit, tx.data.GasLimit,
tx.data.ShardID, tx.data.ShardID,
tx.data.ToShardID,
tx.data.Recipient, tx.data.Recipient,
tx.data.Amount, tx.data.Amount,
tx.data.Payload, tx.data.Payload,

@ -169,6 +169,11 @@ func (evm *EVM) Cancel() {
atomic.StoreInt32(&evm.abort, 1) atomic.StoreInt32(&evm.abort, 1)
} }
// Cancelled returns true if Cancel has been called
func (evm *EVM) Cancelled() bool {
return atomic.LoadInt32(&evm.abort) == 1
}
// Interpreter returns the current interpreter // Interpreter returns the current interpreter
func (evm *EVM) Interpreter() Interpreter { func (evm *EVM) Interpreter() Interpreter {
return evm.interpreter return evm.interpreter

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save