Merge branch 'master' of github.com:harmony-one/harmony into deposit-contract

pull/367/head
ak 6 years ago
commit 71c9794f45
  1. 2
      .coveralls.yml
  2. 30
      .github/ISSUE_TEMPLATE/bug_report.md
  3. 31
      .github/ISSUE_TEMPLATE/design_issue.md
  4. 12
      .gitignore
  5. 10
      .gitmodules
  6. 14
      .travis.gofmt.sh
  7. 33
      .travis.yml
  8. 50
      CONTRIBUTING.md
  9. 226
      FLA.md
  10. 12
      LICENSE
  11. 19
      PULL_REQUEST_TEMPLATE.md
  12. 120
      README.md
  13. 37
      api/beaconchain/beaconchain.pb.go
  14. 1
      api/beaconchain/beaconchain.proto
  15. 7
      api/client/client.go
  16. 5
      api/client/service/client.go
  17. 9
      api/client/service/server.go
  18. 15
      api/client/service/server_test.go
  19. 79
      api/consensus/consensus.pb.go
  20. 11
      api/consensus/consensus.proto
  21. 19
      api/proto/bcconn/bcconn.go
  22. 34
      api/proto/bcconn/bcconn_test.go
  23. 13
      api/proto/common.go
  24. 77
      api/proto/discovery/pingpong.go
  25. 22
      api/proto/discovery/pingpong_test.go
  26. 1
      api/proto/message/gen.sh
  27. 241
      api/proto/message/message.pb.go
  28. 33
      api/proto/message/message.proto
  29. 42
      api/proto/node/node.go
  30. 4
      api/proto/node/node_test.go
  31. 42
      api/services/explorer/service.go
  32. 10
      api/services/explorer/storage.go
  33. 57
      api/services/syncing/downloader/client.go
  34. 128
      api/services/syncing/downloader/proto/downloader.pb.go
  35. 14
      api/services/syncing/downloader/proto/downloader.proto
  36. 3
      api/services/syncing/downloader/server.go
  37. 5
      api/services/syncing/errors.go
  38. 2
      api/services/syncing/interface.go
  39. 372
      api/services/syncing/syncing.go
  40. 15
      api/services/syncing/syncing.md
  41. 4
      appspec.yml
  42. 25
      cmd/beaconchain/main.go
  43. 90
      cmd/bootnode/main.go
  44. 65
      cmd/client/txgen/main.go
  45. 25
      cmd/client/wallet/main.go
  46. 11
      cmd/client/wallet/wallet_test.go
  47. 91
      cmd/harmony.go
  48. 280
      consensus/consensus.go
  49. 2
      consensus/consensus_engine.go
  50. 443
      consensus/consensus_leader.go
  51. 153
      consensus/consensus_leader_msg.go
  52. 56
      consensus/consensus_leader_msg_test.go
  53. 196
      consensus/consensus_leader_test.go
  54. 20
      consensus/consensus_state.go
  55. 110
      consensus/consensus_test.go
  56. 328
      consensus/consensus_validator.go
  57. 78
      consensus/consensus_validator_msg.go
  58. 39
      consensus/consensus_validator_msg_test.go
  59. 146
      consensus/consensus_validator_test.go
  60. 2
      core/block_validator.go
  61. 24
      core/blockchain.go
  62. 14
      core/chain_makers.go
  63. 17
      core/genesis.go
  64. 8
      core/headerchain.go
  65. 10
      core/state/database.go
  66. 6
      core/state/dump.go
  67. 28
      core/state/journal.go
  68. 18
      core/state/managed_state.go
  69. 14
      core/state/managed_state_test.go
  70. 85
      core/state/state_object.go
  71. 12
      core/state/state_test.go
  72. 116
      core/state/statedb.go
  73. 40
      core/state/statedb_test.go
  74. 4
      core/state_processor.go
  75. 4
      core/tx_pool.go
  76. 36
      core/tx_pool_test.go
  77. 4
      core/types.go
  78. 9
      core/types/block.go
  79. 2
      core/vm/logger_test.go
  80. 10
      core/vm/runtime/runtime.go
  81. 6
      core/vm/runtime/runtime_test.go
  82. 3
      coverage.sh
  83. 230
      crypto/bls/bls.go
  84. 38
      crypto/bls/bls_test.go
  85. 20
      crypto/cosi.go
  86. 37
      crypto/pki/utils.go
  87. 16
      crypto/pki/utils_test.go
  88. 1
      go.mod
  89. 14
      internal/attack/attack.go
  90. 2
      internal/attack/attack_test.go
  91. 3
      internal/beaconchain/README.md
  92. 148
      internal/beaconchain/libs/beaconchain.go
  93. 21
      internal/beaconchain/libs/beaconchain_handler.go
  94. 84
      internal/beaconchain/libs/beaconchain_test.go
  95. 8
      internal/beaconchain/rpc/server.go
  96. 242
      internal/db/db.go
  97. 194
      internal/db/db_test.go
  98. 36
      internal/db/interface.go
  99. 135
      internal/db/memory_db.go
  100. 1
      internal/newnode/README.md
  101. Some files were not shown because too many files have changed in this diff Show More

@ -0,0 +1,2 @@
repo_token: cr4Aim5IFC8A7IvStlMHQbVMRvBhRq0YH

@ -0,0 +1,30 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Check out code with "sha1" commit
2. Build
3. run local test using "...." commands
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Environment (please complete the following information):**
- OS: [Linux, MacOS]
- Go environment [ ```go env``` ]
**Additional context**
Add any other context about the problem here.

@ -0,0 +1,31 @@
---
name: Design Issue
about: Code has an architecture or design issue
title: ''
labels: design
assignees: ''
---
## Summary
<!-- Describe the issue in a few sentences. -->
## Current Design
<!-- Describe how the current version of relevant code works.
Refer to specific files/lines or specific packages/methods where
applicable. -->
## Problems
<!-- Discuss in depth why the current design is problematic.
If the design fails to capture or embody certain concepts,
elaborate on them so that others can also see the need for it. -->
## Proposal
<!-- Propose how to evolve the design and code structure.
If the problem has been solved in other (maybe outside) projects,
or in different parts of the code, explain how they work.
If there are multiple ways,
briefly discuss pros and cons of each approach. -->

12
.gitignore vendored

@ -5,7 +5,8 @@
.vscode
# Executables
harmony-benchmark
harmony
main
*.pyc
*.exe
*.out
@ -41,3 +42,12 @@ tmp_log
# Others
*.png
# beacon chain data
bc_config.json
# leveldb local storage
db/
# bootnode keystore
.bnkey

10
.gitmodules vendored

@ -0,0 +1,10 @@
[submodule "vendor/github.com/ethereum/go-ethereum"]
path = vendor/github.com/ethereum/go-ethereum
url = https://github.com/harmony-one/go-ethereum
branch = master
[submodule "vendor/github.com/golang/protobuf"]
path = vendor/github.com/golang/protobuf
url = https://github.com/golang/protobuf
[submodule "vendor/github.com/dedis/kyber"]
path = vendor/github.com/dedis/kyber
url = https://github.com/dedis/kyber

@ -1,14 +0,0 @@
#!/bin/bash
if [ $(golint ./... | wc | awk '{print $1}') -gt 2 ]; then
echo "Go code is not formatted:"
gofmt -d .
exit 1
fi
if [ -n "$(gofmt -l .)" ]; then
echo "Go code is not formatted:"
gofmt -d .
exit 1
else
echo "Go code is well formatted ;)"
fi

@ -1,18 +1,37 @@
language: go
go:
- master
- stable
go_import_path: github.com/harmony-one/harmony
install:
- export GOPATH=$HOME/gopath
- export CGO_CPPFLAGS="-I$GOPATH/src/github.com/harmony-one/bls/include -I$GOPATH/src/github.com/harmony-one/mcl/include"
- export CGO_LDFLAGS="-L$GOPATH/src/github.com/harmony-one/bls/lib -L$GOPATH/src/github.com/harmony-one/mcl/lib"
- export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$GOPATH/src/github.com/harmony-one/bls/lib:$GOPATH/src/github.com/harmony-one/mcl/lib
- cd $HOME/gopath/src
- cd github.com/harmony-one/harmony
- cd github.com/harmony-one
- git clone https://github.com/harmony-one/mcl.git
- cd mcl
- make
- cd ..
- git clone https://github.com/harmony-one/bls.git
- cd bls
- make
- cd ../harmony
- go get -t -v ./...
- go get -u golang.org/x/lint/golint
- go get -u golang.org/x/tools/cmd/goimports
- go get gopkg.in/check.v1
- ./.travis.gofmt.sh
- ./scripts/travis_checker.sh
- go build -v ./...
script:
- ./.travis.gofmt.sh
- ./scripts/travis_checker.sh
notifications:
slack:
harmonyone:gggCd1QQopsQAW8JYgBWiH7M
# secure: RPB3ThYIGuDUidvaWfOA7Hc9x1bDfd5+Y10r7xwY+NGCN3zW86s/GNLpLutI0MWTV9e2CJupHvz5clp8Ktle/tVjLhs6jHQnNV7U8PTWKkL5By6IFVAHN12unMQn/m0RPwqMfdubajXoV51XhbFA/iow/0fqwsd61VdPIuBrlQjy9z7kyVnRLNoGvYjDqKEkJfYVb3qFNFLzD0F7Y2AgxnezIRjsTLgHzR4owLJYqVMhvTYIV9/vSf1w4UUPzhHyZRESl6bri+a1+g7GxE32OtNwq68xxVeeJcrO/MbjAHHW9V6BW1MjJfYzD5T+7JHIfZOjV2WgzJ7uCkVYztfq+02yOCSWsLNxFVojIDhVFEhhJ6Vd2Zf1otolS7j0svK/qNmShID9q9NAasaI105GsQgtaSPAUGd88J/vyX2ndG1nDOvxmgOo10tZFOnPHW7JnWMybk3PLza8o1ujA7X3JFdvDA8BPP9h6MVP4N7doCQ/n4Crts53HvEWlvcv5sBNu61WYlSTBzf1qNwBKMyN2E0rNubsxKmW8B6jLdWYdlx57nyTRPraNKGE1fnUW5nWRZGax3F1tQRwEfpQMk22qgeUK0RYWsPgHFaPciKCA3dJX7t1k/ib9pyR4nc9SZnYw54KMhkAXPIVQ0iy0EpTAH1DNYV6v8zXCwjl+BdkhlY=
slack: harmonyone:gggCd1QQopsQAW8JYgBWiH7M
after_success:
- wget https://raw.githubusercontent.com/DiscordHooks/travis-ci-discord-webhook/master/send.sh
- chmod +x send.sh
- ./send.sh success $WEBHOOK_URL
after_failure:
- wget https://raw.githubusercontent.com/DiscordHooks/travis-ci-discord-webhook/master/send.sh
- chmod +x send.sh
- ./send.sh failure $WEBHOOK_URL

@ -0,0 +1,50 @@
# Contributing To Harmony
## Coding Guidelines
* In general, we follow [effective_go](https://golang.org/doc/effective_go.html)
* Code must adhere to the official [Go formatting guidelines](https://golang.org/doc/effective_go.html#formatting) (i.e. uses [goimports](https://godoc.org/golang.org/x/tools/cmd/goimports)).
* Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) guidelines.
## Pull Request (PR)
This [github document](https://help.github.com/articles/creating-a-pull-request/) provides some guidance on how to create a pull request in github.
## PR requirement
To pursue engineering excellence, we have insisted on the highest stardard on the quality of each PR.
* For each PR, please run [golint](https://github.com/golang/lint), [goimports](https://godoc.org/golang.org/x/tools/cmd/goimports), to fix the basic issues/warnings.
* Make sure you understand [How to Write a Git Commit Message](https://chris.beams.io/posts/git-commit/).
* Add a [Test] section in every PR detailing on your test process and results. If the test log is too long, please include a link to [gist](https://gist.github.com/) and add the link to the PR.
## Typical workflow example
The best practice is to reorder and squash your local commits before the PR submission to create an atomic and self-contained PR.
This [book chapter](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History) provides detailed explanation and guidance on how to rewrite the local git history.
For exampple, a typical workflow is like the following.
```bash
# assuming you are working on a fix of bug1, and use a local branch called "fixes_of_bug1".
git clone https://github.com/harmony-one/harmony
cd harmony
# create a local branch to keep track of the origin/master
git branch fixes_of_bug1 origin/master
git checkout fixes_of_bug_1
# make changes, build, test locally, commit changes locally
# don't forget to squash or rearrange your commits using "git rebase -i"
git rebase -i origin/master
# rebase your change on the top of the tree
git pull --rebase
# push your branch and create a PR
git push origin fixes_of_bug_1:pr_fixes_of_bug_1
```
## Licensing
Please see [our Fiduciary License Agreement](FLA.md). By your submission of
your contribution to us, you and we mutually agree to the terms and conditions
of the agreement.

226
FLA.md

@ -0,0 +1,226 @@
# Fiduciary License Agreement 2.0
Thank you for your interest in contributing to Simple Rules Company's Harmony
("We" or "Us").
The purpose of this contributor agreement ("Agreement") is to clarify and
document the rights granted by contributors to Us. By Your Submission of your
Contribution to Us, You and We mutually agree to the terms and conditions of
this Agreement.
## 0. Preamble
Software is deeply embedded in all aspects of our lives and it is important
that it empower, rather than restrict us. Free Software gives everybody the
rights to use, understand, adapt and share software. These rights help support
other fundamental freedoms like freedom of speech, press and privacy.
Development of Free Software can follow many patterns. In some cases whole
development is handled by a sole programmer or a small group of people. But
usually, the creation and maintenance of software is a complex process that
requires the contribution of many individuals. This also affects who owns the
rights to the software. In the latter case, rights in software are owned
jointly by a great number of individuals.
To tackle this issue some projects require a full copyright assignment to be
signed by all contributors. The problem with such assignments is that they
often lack checks and balances that would protect the contributors from
potential abuse of power from the new copyright holder.
FSFE’s Fiduciary License Agreement (FLA) was created by the Free Software
Foundation Europe e.V. with just that in mind – to concentrate all deciding
power within one entity and prevent fragmentation of rights on one hand, while
on the other preventing that single entity from abusing its power. The main
aim is to ensure that the software covered under the FLA will forever remain
Free Software.
This process only serves for the transfer of economic rights. So-called moral
rights (e.g. authors right to be identified as author) remain with the original
author(s) and are inalienable.
## How to use this FLA
If You are an employee and have created the Contribution as part of your
employment, You need to have Your employer approve this Agreement or sign the
Entity version of this document. If You do not own the Copyright in the entire
work of authorship, any other author of the Contribution should also sign this
– in any event, please contact Us at licensing@harmony.one
## 1. Definitions
**"You"** means the individual Copyright owner who Submits a Contribution to Us.
**"Contribution"** means any original work of authorship, including any
original modifications or additions to an existing work of authorship,
Submitted by You to Us, in which You own the Copyright.
**"Copyright"** means all rights protecting works of authorship, including
copyright, moral and neighboring rights, as appropriate, for the full term of
their existence.
**"Material"** means the software or documentation made available by Us to
third parties. When this Agreement covers more than one software project, the
Material means the software or documentation to which the Contribution was
Submitted. After You Submit the Contribution, it may be included in the
Material.
**"Submit"** means any act by which a Contribution is transferred to Us by You
by means of tangible or intangible media, including but not limited to
electronic mailing lists, source code control systems, and issue tracking
systems that are managed by, or on behalf of, Us, but excluding any transfer
that is conspicuously marked or otherwise designated in writing by You as "Not
a Contribution."
**"Documentation"** means any non-software portion of a Contribution.
## 2. License grant
### 2.1 Copyright license to Us
Subject to the terms and conditions of this Agreement, You hereby grant to Us a
worldwide, royalty-free, exclusive, perpetual and irrevocable (except as stated
in Section 8.2) license, with the right to transfer an unlimited number of
non-exclusive licenses or to grant sublicenses to third parties, under the
Copyright covering the Contribution to use the Contribution by all means,
including, but not limited to:
* publish the Contribution,
* modify the Contribution,
* prepare derivative works based upon or containing the Contribution and/or to
combine the Contribution with other Materials,
* reproduce the Contribution in original or modified form,
* distribute, to make the Contribution available to the public, display and
publicly perform the Contribution in original or modified form.
### 2.2 Moral rights
Moral Rights remain unaffected to the extent they are recognized and not
waivable by applicable law. Notwithstanding, You may add your name to the
attribution mechanism customary used in the Materials you Contribute to, such
as the header of the source code files of Your Contribution, and We will
respect this attribution when using Your Contribution.
### 2.3 Copyright license back to You
Upon such grant of rights to Us, We immediately grant to You a worldwide,
royalty-free, non-exclusive, perpetual and irrevocable license, with the right
to transfer an unlimited number of non-exclusive licenses or to grant
sublicenses to third parties, under the Copyright covering the Contribution to
use the Contribution by all means, including, but not limited to:
* publish the Contribution,
* modify the Contribution,
* prepare derivative works based upon or containing the Contribution and/or to
combine the Contribution with other Materials,
* reproduce the Contribution in original or modified form,
* distribute, to make the Contribution available to the public, display and
publicly perform the Contribution in original or modified form.
This license back is limited to the Contribution and does not provide any
rights to the Material.
## 3. Patents
### 3.1 Patent license
Subject to the terms and conditions of this Agreement You hereby grant to Us
and to recipients of Materials distributed by Us a worldwide, royalty-free,
non-exclusive, perpetual and irrevocable (except as stated in Section 3.2)
patent license, with the right to transfer an unlimited number of non-exclusive
licenses or to grant sublicenses to third parties, to make, have made, use,
sell, offer for sale, import and otherwise transfer the Contribution and the
Contribution in combination with any Material (and portions of such
combination). This license applies to all patents owned or controlled by You,
whether already acquired or hereafter acquired, that would be infringed by
making, having made, using, selling, offering for sale, importing or otherwise
transferring of Your Contribution(s) alone or by combination of Your
Contribution(s) with any Material.
### 3.2 Revocation of patent license
You reserve the right to revoke the patent license stated in section 3.1 if We
make any infringement claim that is targeted at your Contribution and not
asserted for a Defensive Purpose. An assertion of claims of the Patents shall
be considered for a "Defensive Purpose" if the claims are asserted against an
entity that has filed, maintained, threatened, or voluntarily participated in a
patent infringement lawsuit against Us or any of Our licensees.
## 4. License obligations by Us
We agree to (sub)license the Contribution or any Materials containing, based on
or derived from your Contribution under the terms of any licenses the Free
Software Foundation classifies as Free Software License and which are approved
by the Open Source Initiative as Open Source licenses.
We agree to license patents owned or controlled by You only to the extent
necessary to (sub)license Your Contribution(s) and the combination of Your
Contribution(s) with the Material under the terms of any licenses the Free
Software Foundation classifies as Free Software licenses and which are approved
by the Open Source Initiative as Open Source licenses..
## 5. Disclaimer
THE CONTRIBUTION IS PROVIDED "AS IS". MORE PARTICULARLY, ALL EXPRESS OR
IMPLIED WARRANTIES INCLUDING, WITHOUT LIMITATION, ANY IMPLIED WARRANTY OF
SATISFACTORY QUALITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
EXPRESSLY DISCLAIMED BY YOU TO US AND BY US TO YOU. TO THE EXTENT THAT ANY
SUCH WARRANTIES CANNOT BE DISCLAIMED, SUCH WARRANTY IS LIMITED IN DURATION AND
EXTENT TO THE MINIMUM PERIOD AND EXTENT PERMITTED BY LAW.
## 6. Consequential damage waiver
TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT WILL YOU OR WE
BE LIABLE FOR ANY LOSS OF PROFITS, LOSS OF ANTICIPATED SAVINGS, LOSS OF DATA,
INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL AND EXEMPLARY DAMAGES ARISING OUT
OF THIS AGREEMENT REGARDLESS OF THE LEGAL OR EQUITABLE THEORY (CONTRACT, TORT
OR OTHERWISE) UPON WHICH THE CLAIM IS BASED.
## 7. Approximation of disclaimer and damage waiver
IF THE DISCLAIMER AND DAMAGE WAIVER MENTIONED IN SECTION 5. AND SECTION 6.
CANNOT BE GIVEN LEGAL EFFECT UNDER APPLICABLE LOCAL LAW, REVIEWING COURTS SHALL
APPLY LOCAL LAW THAT MOST CLOSELY APPROXIMATES AN ABSOLUTE WAIVER OF ALL CIVIL
OR CONTRACTUAL LIABILITY IN CONNECTION WITH THE CONTRIBUTION.
## 8. Term
8.1 This Agreement shall come into effect upon Your acceptance of the terms and
conditions.
8.2 This Agreement shall apply for the term of the copyright and patents
licensed here. However, You shall have the right to terminate the Agreement if
We do not fulfill the obligations as set forth in Section 4. Such termination
must be made in writing.
8.3 In the event of a termination of this Agreement Sections 5., 6., 7., 8.,
and 9. shall survive such termination and shall remain in full force
thereafter. For the avoidance of doubt, Free and Open Source Software
(sub)licenses that have already been granted for Contributions at the date of
the termination shall remain in full force after the termination of this
Agreement.
## 9. Miscellaneous
9.1 This Agreement and all disputes, claims, actions, suits or other
proceedings arising out of this agreement or relating in any way to it shall be
governed by the laws of the State of California, in the United States of
America excluding its private international law provisions.
9.2 This Agreement sets out the entire agreement between You and Us for Your
Contributions to Us and overrides all other agreements or understandings.
9.3 In case of Your death, this agreement shall continue with Your heirs. In
case of more than one heir, all heirs must exercise their rights through a
commonly authorized person.
9.4 If any provision of this Agreement is found void and unenforceable, such
provision will be replaced to the extent possible with a provision that comes
closest to the meaning of the original provision and that is enforceable. The
terms and conditions set forth in this Agreement shall apply notwithstanding
any failure of essential purpose of this Agreement or any limited remedy to the
maximum extent possible under law.
9.5 You agree to notify Us of any facts or circumstances of which you become
aware that would make this Agreement inaccurate in any respect.

@ -1,6 +1,4 @@
The MIT License (MIT)
Copyright (c) 2014 Simon Eskildsen
Copyright (c) 2018-2019 Simple Rules Company.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@ -9,13 +7,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

@ -0,0 +1,19 @@
## Issue
<!-- link to the issue number or description of the issue -->
## Test
#### Test Coverage Data
<!-- run 'go test -cover' in the directory you made change -->
* Before
* After
#### Test/Run Logs
<!-- links to the test/run log, or copy&paste part of the log if it is too long -->
<!-- or you may just create a [gist](https://gist.github.com/) and link the gist here -->
## TODO

@ -1,51 +1,131 @@
# Harmony Benchmark
# Harmony
[![Build Status](https://travis-ci.com/harmony-one/harmony.svg?token=DnoYvYiTAk7pqTo9XsTi&branch=master)](https://travis-ci.com/harmony-one/harmony)
<a href='https://github.com/jpoles1/gopherbadger' target='_blank'>![gopherbadger-tag-do-not-edit](https://img.shields.io/badge/Go%20Coverage-39%25-brightgreen.svg?longCache=true&style=flat)</a>
## Coding Guidelines
* In general, we follow [effective_go](https://golang.org/doc/effective_go.html)
* Code must adhere to the official [Go formatting guidelines](https://golang.org/doc/effective_go.html#formatting) (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)).
* Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) guidelines.
<a href='https://github.com/jpoles1/gopherbadger' target='_blank'>![gopherbadger-tag-do-not-edit](https://img.shields.io/badge/Go%20Coverage-45%25-brightgreen.svg?longCache=true&style=flat)</a>
<a href="https://discord.gg/kdf8a6T">![Discord](https://img.shields.io/discord/532383335348043777.svg)</a>
[![Coverage Status](https://coveralls.io/repos/github/harmony-one/harmony/badge.svg?branch=master)](https://coveralls.io/github/harmony-one/harmony?branch=master)
## Installation Requirements
GMP and OpenSSL
```bash
brew install gmp
brew install openssl
```
## Dev Environment Setup
```
```bash
export GOPATH=$HOME/<path_of_your_choice>
export CGO_CFLAGS="-I$GOPATH/src/github.com/harmony-one/bls/include -I$GOPATH/src/github.com/harmony-one/mcl/include -I/usr/local/opt/openssl/include"
export CGO_LDFLAGS="-L$GOPATH/src/github.com/harmony-one/bls/lib -L/usr/local/opt/openssl/lib"
export LD_LIBRARY_PATH=$GOPATH/src/github.com/harmony-one/bls/lib:$GOPATH/src/github.com/harmony-one/mcl/lib:/usr/local/opt/openssl/lib
export LIBRARY_PATH=$LD_LIBRARY_PATH
export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH
mkdir -p $HOME/<path_of_your_choice>/src/github.com/harmony-one
cd $HOME/<path_of_your_choice>/src/github.com/harmony-one
git clone git@github.com:harmony-one/mcl.git
cd mcl && make -j4 && cd ..
git clone git@github.com:harmony-one/bls.git
cd bls && make -j4 && cd ..
git clone git@github.com:harmony-one/harmony.git
cd harmony
go get ./...
git submodule update --init --recursive
```
## Usage
## Build
### Running local test
Harmony server / main node:
```
./test/deploy.sh ./test/configs/local_config1.txt
go build -o bin/harmony cmd/harmony.go
```
## Testing
Beacon node:
```
go build -o bin/beacon cmd/beaconchain/main.go
```
Make sure you the following command and make sure everything passed before submitting your code.
Wallet:
```
go build -o bin/wallet cmd/client/wallet/main.go
```
Tx Generator:
```
./test_before_submit.sh
go build -o bin/txgen cmd/client/txgen/main.go
```
## Linting
You can also run the script `./scripts/go_executable_build.sh` to build all the executables.
Make sure you the following command and make sure everything passes golint.
Some of our scripts require bash 4.x support, please [install bash 4.x](http://tldrdevnotes.com/bash-upgrade-3-4-macos) on MacOS X.
## Usage
You may build the src/harmony.go locally and run local test.
### Running local test
The deploy.sh script creates a local environment of Harmony blockchain devnet based on the configuration file.
The configuration file configures number of nodes and their IP/Port.
The script starts one local beacon chain node, the blockchain nodes, and run a transactional generator program which generates and sends simulated transactions to the local blockchain.
```bash
./test/deploy.sh ./test/configs/local_config1.txt
```
./lint_before_submit.sh
## Testing
Make sure you use the following command and make sure everything passed before submitting your code.
```bash
./test/test_before_submit.sh
```
## License
Harmony is licensed under the MIT License. See [`LICENSE`](LICENSE) file for
the terms and conditions.
Also please see [our Fiduciary License Agreement](FLA.md) if you are
contributing to the project. By your submission of your contribution to us, you
and we mutually agree to the terms and conditions of the agreement.
## Contributing To Harmony
See [`CONTRIBUTING`](CONTRIBUTING.md) for details.
## Development Status
### Features Done
* Basic consensus protocol with O(n) complexity
* Basic validator server
* P2p network connection and unicast
* Account model and support for Solidity
* Simple wallet program
* Mock beacon chain with static sharding
* Information disposal algorithm using erasure encoding (to be integrated)
* Blockchain explorer with performance report and transaction lookup
* Transaction generator for loadtesting
### Features To Be Implemented
* Full beacon chain with multiple validators
* Resharding
* Staking on beacon chain
* Fast state synchronization
* Distributed randomness generation with VRF and VDF
* Kademlia routing
* P2P network and gossiping
* Full protocol of consensus with BLS multi-sig and view-change protocol
* Integration with WASM
* Cross-shard transaction

@ -98,6 +98,7 @@ type FetchLeadersResponse_Leader struct {
Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"`
Port string `protobuf:"bytes,2,opt,name=port,proto3" json:"port,omitempty"`
ShardId uint32 `protobuf:"varint,3,opt,name=shardId,proto3" json:"shardId,omitempty"`
PeerID string `protobuf:"bytes,4,opt,name=peerID,proto3" json:"peerID,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -149,6 +150,13 @@ func (m *FetchLeadersResponse_Leader) GetShardId() uint32 {
return 0
}
func (m *FetchLeadersResponse_Leader) GetPeerID() string {
if m != nil {
return m.PeerID
}
return ""
}
func init() {
proto.RegisterType((*FetchLeadersRequest)(nil), "beaconchain.FetchLeadersRequest")
proto.RegisterType((*FetchLeadersResponse)(nil), "beaconchain.FetchLeadersResponse")
@ -158,20 +166,21 @@ func init() {
func init() { proto.RegisterFile("beaconchain.proto", fileDescriptor_474fd8061d1037cf) }
var fileDescriptor_474fd8061d1037cf = []byte{
// 207 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x4a, 0x4d, 0x4c,
0xce, 0xcf, 0x4b, 0xce, 0x48, 0xcc, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x46,
0x12, 0x52, 0x12, 0xe5, 0x12, 0x76, 0x4b, 0x2d, 0x49, 0xce, 0xf0, 0x49, 0x4d, 0x4c, 0x49, 0x2d,
0x2a, 0x0e, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x51, 0x5a, 0xc4, 0xc8, 0x25, 0x82, 0x2a, 0x5e,
0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0xe4, 0xc4, 0xc5, 0x9e, 0x03, 0x11, 0x92, 0x60, 0x54, 0x60,
0xd6, 0xe0, 0x36, 0xd2, 0xd0, 0x43, 0xb6, 0x01, 0x9b, 0x1e, 0x3d, 0x08, 0x3f, 0x08, 0xa6, 0x51,
0xca, 0x8d, 0x8b, 0x0d, 0x22, 0x24, 0xc4, 0xc7, 0xc5, 0x94, 0x59, 0x20, 0xc1, 0xa8, 0xc0, 0xa8,
0xc1, 0x19, 0xc4, 0x94, 0x59, 0x20, 0x24, 0xc4, 0xc5, 0x52, 0x90, 0x5f, 0x54, 0x22, 0xc1, 0x04,
0x16, 0x01, 0xb3, 0x85, 0x24, 0xb8, 0xd8, 0x8b, 0x33, 0x12, 0x8b, 0x52, 0x3c, 0x53, 0x24, 0x98,
0x15, 0x18, 0x35, 0x78, 0x83, 0x60, 0x5c, 0xa3, 0x6c, 0x2e, 0x21, 0x27, 0xb0, 0xdd, 0xce, 0x20,
0xbb, 0x83, 0x53, 0x8b, 0xca, 0x32, 0x93, 0x53, 0x85, 0x42, 0xb9, 0x78, 0x90, 0x5d, 0x21, 0xa4,
0x80, 0xc7, 0x81, 0x60, 0xcf, 0x4a, 0x29, 0x12, 0xf4, 0x82, 0x12, 0x43, 0x12, 0x1b, 0x38, 0xf0,
0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x27, 0x0b, 0x9f, 0xda, 0x51, 0x01, 0x00, 0x00,
// 222 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x90, 0xcd, 0x4a, 0xc4, 0x30,
0x14, 0x85, 0x4d, 0x67, 0xe8, 0xe0, 0x1d, 0x15, 0xbc, 0xfe, 0x10, 0x66, 0x15, 0xbb, 0xca, 0xaa,
0x8b, 0xf1, 0x0d, 0xaa, 0x08, 0x05, 0x57, 0x11, 0xb7, 0x42, 0x9a, 0x5e, 0x68, 0x50, 0x9a, 0x98,
0x44, 0x1f, 0xce, 0xa7, 0x13, 0x53, 0x0b, 0x15, 0x44, 0x77, 0x39, 0x1f, 0x39, 0xe4, 0xcb, 0x81,
0xd3, 0x8e, 0xb4, 0x71, 0xa3, 0x19, 0xb4, 0x1d, 0x6b, 0x1f, 0x5c, 0x72, 0xb8, 0x5d, 0xa0, 0xea,
0x02, 0xce, 0xee, 0x28, 0x99, 0xe1, 0x9e, 0x74, 0x4f, 0x21, 0x2a, 0x7a, 0x7d, 0xa3, 0x98, 0xaa,
0x0f, 0x06, 0xe7, 0x3f, 0x79, 0xf4, 0x6e, 0x8c, 0x84, 0x0d, 0x6c, 0x5e, 0x26, 0xc4, 0x99, 0x58,
0xc9, 0xed, 0x5e, 0xd6, 0xcb, 0x17, 0x7e, 0xeb, 0xd4, 0x53, 0x56, 0x73, 0x71, 0xf7, 0x04, 0xe5,
0x84, 0xf0, 0x04, 0x0a, 0xeb, 0x39, 0x13, 0x4c, 0x1e, 0xaa, 0xc2, 0x7a, 0x44, 0x58, 0x7b, 0x17,
0x12, 0x2f, 0x32, 0xc9, 0x67, 0xe4, 0xb0, 0x89, 0x83, 0x0e, 0x7d, 0xdb, 0xf3, 0x95, 0x60, 0xf2,
0x58, 0xcd, 0x11, 0x2f, 0xa1, 0xf4, 0x44, 0xa1, 0xbd, 0xe5, 0xeb, 0x7c, 0xff, 0x3b, 0xed, 0x9f,
0x01, 0x9b, 0xec, 0x74, 0xf3, 0xe5, 0xf4, 0x40, 0xe1, 0xdd, 0x1a, 0xc2, 0x47, 0x38, 0x5a, 0xda,
0xa1, 0xf8, 0x43, 0x3c, 0x8f, 0xb0, 0xbb, 0xfa, 0xf7, 0x6b, 0xd5, 0x41, 0x57, 0xe6, 0x51, 0xaf,
0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x35, 0x50, 0x26, 0x86, 0x69, 0x01, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.

@ -17,6 +17,7 @@ message FetchLeadersResponse {
string ip = 1;
string port = 2;
uint32 shardId = 3;
string peerID = 4;
}
repeated Leader leaders = 1;
}

@ -1,10 +1,9 @@
package client
import (
"github.com/ethereum/go-ethereum/log"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/log"
"github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/p2p/host"
)
// Client represents a node (e.g. a wallet) which sends transactions and receives responses from the harmony network
@ -15,11 +14,11 @@ type Client struct {
log log.Logger // Log utility
// The p2p host used to send/receive p2p messages
host host.Host
host p2p.Host
}
// NewClient creates a new Client
func NewClient(host host.Host, leaders *map[uint32]p2p.Peer) *Client {
func NewClient(host p2p.Host, leaders *map[uint32]p2p.Peer) *Client {
client := Client{}
client.Leaders = leaders
client.host = host

@ -3,11 +3,12 @@ package client
import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/common"
proto "github.com/harmony-one/harmony/api/client/service/proto"
"log"
"time"
"github.com/ethereum/go-ethereum/common"
proto "github.com/harmony-one/harmony/api/client/service/proto"
"google.golang.org/grpc"
)

@ -2,11 +2,12 @@ package client
import (
"context"
"github.com/ethereum/go-ethereum/common"
"github.com/harmony-one/harmony/core/state"
"log"
"net"
"github.com/ethereum/go-ethereum/common"
"github.com/harmony-one/harmony/core/state"
"google.golang.org/grpc"
proto "github.com/harmony-one/harmony/api/client/service/proto"
@ -14,7 +15,7 @@ import (
// Server is the Server struct for client service package.
type Server struct {
stateReader func() (*state.StateDB, error)
stateReader func() (*state.DB, error)
callFaucetContract func(common.Address) common.Hash
}
@ -54,7 +55,7 @@ func (s *Server) Start(ip, port string) (*grpc.Server, error) {
}
// NewServer creates new Server which implements ClientServiceServer interface.
func NewServer(stateReader func() (*state.StateDB, error), callFaucetContract func(common.Address) common.Hash) *Server {
func NewServer(stateReader func() (*state.DB, error), callFaucetContract func(common.Address) common.Hash) *Server {
s := &Server{stateReader: stateReader, callFaucetContract: callFaucetContract}
return s
}

@ -2,18 +2,19 @@ package client
import (
"bytes"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/harmony-one/harmony/api/client/service/proto"
client "github.com/harmony-one/harmony/api/client/service/proto"
"github.com/harmony-one/harmony/core/state"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/vm"
"github.com/harmony-one/harmony/internal/db"
)
var (
@ -28,7 +29,7 @@ var (
func TestGetFreeToken(test *testing.T) {
hash := common.Hash{}
hash.SetBytes([]byte("hello"))
server := NewServer(func() (*state.StateDB, error) {
server := NewServer(func() (*state.DB, error) {
return nil, nil
}, func(common.Address) common.Hash {
return hash
@ -48,7 +49,7 @@ func TestGetFreeToken(test *testing.T) {
func TestFetchAccountState(test *testing.T) {
var (
database = db.NewMemDatabase()
database = ethdb.NewMemDatabase()
gspec = core.Genesis{
Config: chainConfig,
Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
@ -62,7 +63,7 @@ func TestFetchAccountState(test *testing.T) {
hash := common.Hash{}
hash.SetBytes([]byte("hello"))
server := NewServer(func() (*state.StateDB, error) {
server := NewServer(func() (*state.DB, error) {
return chain.State()
}, func(common.Address) common.Hash {
return hash

@ -23,39 +23,30 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type MessageType int32
const (
MessageType_UNKNOWN MessageType = 0
MessageType_ANNOUNCE MessageType = 1
MessageType_COMMIT MessageType = 2
MessageType_CHALLENGE MessageType = 3
MessageType_RESPONSE MessageType = 4
MessageType_COLLECTIVE_SIG MessageType = 5
MessageType_FINAL_COMMIT MessageType = 6
MessageType_FINAL_CHALLENGE MessageType = 7
MessageType_FINAL_RESPONSE MessageType = 8
MessageType_UNKNOWN MessageType = 0
MessageType_ANNOUNCE MessageType = 1
MessageType_PREPARE MessageType = 2
MessageType_PREPARED MessageType = 3
MessageType_COMMIT MessageType = 4
MessageType_COMMITTED MessageType = 5
)
var MessageType_name = map[int32]string{
0: "UNKNOWN",
1: "ANNOUNCE",
2: "COMMIT",
3: "CHALLENGE",
4: "RESPONSE",
5: "COLLECTIVE_SIG",
6: "FINAL_COMMIT",
7: "FINAL_CHALLENGE",
8: "FINAL_RESPONSE",
2: "PREPARE",
3: "PREPARED",
4: "COMMIT",
5: "COMMITTED",
}
var MessageType_value = map[string]int32{
"UNKNOWN": 0,
"ANNOUNCE": 1,
"COMMIT": 2,
"CHALLENGE": 3,
"RESPONSE": 4,
"COLLECTIVE_SIG": 5,
"FINAL_COMMIT": 6,
"FINAL_CHALLENGE": 7,
"FINAL_RESPONSE": 8,
"UNKNOWN": 0,
"ANNOUNCE": 1,
"PREPARE": 2,
"PREPARED": 3,
"COMMIT": 4,
"COMMITTED": 5,
}
func (x MessageType) String() string {
@ -153,24 +144,22 @@ func init() {
func init() { proto.RegisterFile("consensus.proto", fileDescriptor_56f0f2c53b3de771) }
var fileDescriptor_56f0f2c53b3de771 = []byte{
// 302 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x91, 0xdf, 0x4e, 0xc2, 0x30,
0x14, 0xc6, 0x2d, 0x7f, 0x36, 0x76, 0x18, 0xd0, 0x1c, 0x13, 0xd3, 0x44, 0x4d, 0xd0, 0x2b, 0xc2,
0x05, 0x17, 0xfa, 0x04, 0x64, 0xa9, 0xd0, 0x38, 0x3a, 0x33, 0x40, 0x2f, 0x97, 0xc2, 0x1a, 0x20,
0x92, 0x6d, 0xa1, 0x70, 0xc1, 0xdb, 0xf8, 0x40, 0x3e, 0x94, 0x61, 0x60, 0xf5, 0xb2, 0xbf, 0xdf,
0xf7, 0x7d, 0x69, 0x72, 0xa0, 0xb3, 0xcc, 0x33, 0xa3, 0x33, 0x73, 0x30, 0x83, 0x62, 0x97, 0xef,
0x73, 0xf4, 0x2c, 0x78, 0xfc, 0x26, 0xe0, 0x4e, 0xb4, 0x31, 0x6a, 0xa5, 0xb1, 0x0f, 0xb5, 0xfd,
0xb1, 0xd0, 0x8c, 0x74, 0x49, 0xaf, 0xfd, 0x74, 0x33, 0xf8, 0xab, 0x5d, 0x12, 0xb3, 0x63, 0xa1,
0xe3, 0x32, 0x83, 0x0f, 0xe0, 0x5b, 0x9d, 0x6c, 0x52, 0x56, 0xe9, 0x92, 0x5e, 0x2b, 0x6e, 0x5a,
0x26, 0x52, 0xbc, 0x05, 0xcf, 0xe8, 0x2c, 0xd5, 0xbb, 0x93, 0xaf, 0x96, 0xbe, 0x71, 0x06, 0x22,
0xc5, 0x7b, 0x80, 0xc5, 0x36, 0x5f, 0x7e, 0x26, 0x6b, 0x65, 0xd6, 0xac, 0xd6, 0x25, 0x3d, 0x3f,
0xf6, 0x4a, 0x32, 0x56, 0x66, 0x8d, 0x0c, 0xdc, 0x42, 0x1d, 0xb7, 0xb9, 0x4a, 0x59, 0xbd, 0x74,
0xbf, 0x4f, 0xbc, 0x03, 0xcf, 0x6c, 0x56, 0x99, 0xda, 0x1f, 0x76, 0x9a, 0x39, 0xe7, 0x9e, 0x05,
0xfd, 0x2f, 0x02, 0xcd, 0x7f, 0x9f, 0xc5, 0x26, 0xb8, 0x73, 0xf9, 0x2a, 0xa3, 0x0f, 0x49, 0xaf,
0xd0, 0x87, 0xc6, 0x50, 0xca, 0x68, 0x2e, 0x03, 0x4e, 0x09, 0x02, 0x38, 0x41, 0x34, 0x99, 0x88,
0x19, 0xad, 0x60, 0x0b, 0xbc, 0x60, 0x3c, 0x0c, 0x43, 0x2e, 0x47, 0x9c, 0x56, 0x4f, 0xc1, 0x98,
0x4f, 0xdf, 0x22, 0x39, 0xe5, 0xb4, 0x86, 0x08, 0xed, 0x20, 0x0a, 0x43, 0x1e, 0xcc, 0xc4, 0x3b,
0x4f, 0xa6, 0x62, 0x44, 0xeb, 0x48, 0xc1, 0x7f, 0x11, 0x72, 0x18, 0x26, 0x97, 0x09, 0x07, 0xaf,
0xa1, 0x73, 0x21, 0x76, 0xc8, 0x3d, 0x55, 0xcf, 0xd0, 0xce, 0x35, 0x16, 0x4e, 0x79, 0x83, 0xe7,
0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x86, 0xa2, 0x8a, 0x4f, 0x96, 0x01, 0x00, 0x00,
// 263 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x90, 0xc1, 0x6a, 0xb3, 0x40,
0x14, 0x85, 0xff, 0x49, 0x8c, 0xc6, 0xab, 0xf9, 0x3b, 0xdc, 0x45, 0x19, 0x68, 0x0b, 0xb6, 0x2b,
0xc9, 0x22, 0x8b, 0xf6, 0x09, 0x42, 0x22, 0x54, 0x8a, 0x63, 0x10, 0x43, 0x77, 0x0d, 0x93, 0x38,
0xc4, 0xd0, 0xa0, 0xe2, 0x98, 0x85, 0xef, 0xd8, 0x87, 0x2a, 0x8e, 0xa9, 0xed, 0x6e, 0xce, 0xf7,
0x9d, 0x03, 0xc3, 0x85, 0x9b, 0x43, 0x59, 0x28, 0x59, 0xa8, 0x8b, 0x5a, 0x54, 0x75, 0xd9, 0x94,
0x68, 0x0f, 0xe0, 0xe9, 0x8b, 0x80, 0x15, 0x49, 0xa5, 0xc4, 0x51, 0xe2, 0x1c, 0x8c, 0xa6, 0xad,
0x24, 0x23, 0x1e, 0xf1, 0xff, 0x3f, 0xdf, 0x2e, 0x7e, 0x67, 0xd7, 0x46, 0xda, 0x56, 0x32, 0xd1,
0x1d, 0x7c, 0x04, 0x77, 0xd0, 0xbb, 0x53, 0xc6, 0x46, 0x1e, 0xf1, 0x67, 0x89, 0x33, 0xb0, 0x30,
0xc3, 0x3b, 0xb0, 0x95, 0x2c, 0x32, 0x59, 0x77, 0x7e, 0xac, 0xfd, 0xb4, 0x07, 0x61, 0x86, 0x0f,
0x00, 0xfb, 0x73, 0x79, 0xf8, 0xdc, 0xe5, 0x42, 0xe5, 0xcc, 0xf0, 0x88, 0xef, 0x26, 0xb6, 0x26,
0xaf, 0x42, 0xe5, 0xc8, 0xc0, 0xaa, 0x44, 0x7b, 0x2e, 0x45, 0xc6, 0x26, 0xda, 0xfd, 0x44, 0xbc,
0x07, 0x5b, 0x9d, 0x8e, 0x85, 0x68, 0x2e, 0xb5, 0x64, 0x66, 0xbf, 0x1b, 0xc0, 0xfc, 0x03, 0x9c,
0x3f, 0x7f, 0x45, 0x07, 0xac, 0x2d, 0x7f, 0xe3, 0xf1, 0x3b, 0xa7, 0xff, 0xd0, 0x85, 0xe9, 0x92,
0xf3, 0x78, 0xcb, 0x57, 0x01, 0x25, 0x9d, 0xda, 0x24, 0xc1, 0x66, 0x99, 0x04, 0x74, 0xd4, 0xa9,
0x6b, 0x58, 0xd3, 0x31, 0x02, 0x98, 0xab, 0x38, 0x8a, 0xc2, 0x94, 0x1a, 0x38, 0x03, 0xbb, 0x7f,
0xa7, 0xc1, 0x9a, 0x4e, 0xf6, 0xa6, 0x3e, 0xe0, 0xcb, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd5,
0x30, 0x8a, 0xaf, 0x53, 0x01, 0x00, 0x00,
}

@ -5,13 +5,10 @@ package consensus;
enum MessageType {
UNKNOWN = 0;
ANNOUNCE = 1;
COMMIT = 2;
CHALLENGE = 3;
RESPONSE = 4;
COLLECTIVE_SIG = 5;
FINAL_COMMIT = 6;
FINAL_CHALLENGE = 7;
FINAL_RESPONSE = 8;
PREPARE = 2;
PREPARED = 3;
COMMIT = 4;
COMMITTED = 5;
}
message Message {

@ -4,25 +4,20 @@ import (
"bytes"
"encoding/gob"
"github.com/harmony-one/harmony/log"
"github.com/harmony-one/harmony/p2p"
)
"github.com/ethereum/go-ethereum/log"
//NodeInfo struct exists to share information on the node
type NodeInfo struct { //TODO: to be merged with Leo's nodeinfo.
Self p2p.Peer
PubK []byte
}
"github.com/harmony-one/harmony/api/proto/node"
)
//ResponseRandomNumber struct for exchanging random information
type ResponseRandomNumber struct {
NumberOfShards int
NumberOfNodesAdded int
Leaders []*NodeInfo
Leaders []*node.Info
}
// SerializeNodeInfo is for serializing nodeinfo
func SerializeNodeInfo(nodeinfo *NodeInfo) []byte {
func SerializeNodeInfo(nodeinfo *node.Info) []byte {
var result bytes.Buffer
encoder := gob.NewEncoder(&result)
err := encoder.Encode(nodeinfo)
@ -33,8 +28,8 @@ func SerializeNodeInfo(nodeinfo *NodeInfo) []byte {
}
// DeserializeNodeInfo deserializes the nodeinfo
func DeserializeNodeInfo(d []byte) *NodeInfo {
var wn NodeInfo
func DeserializeNodeInfo(d []byte) *node.Info {
var wn node.Info
r := bytes.NewBuffer(d)
decoder := gob.NewDecoder(r)
err := decoder.Decode(&wn)

@ -5,21 +5,17 @@ import (
"reflect"
"testing"
"github.com/harmony-one/harmony/api/proto/node"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/p2p"
)
func TestSerializeDeserializeNodeInfo(t *testing.T) {
var ip, port string
ip = "127.0.0.1"
port = "8080"
self := p2p.Peer{IP: ip, Port: port}
_, pk := utils.GenKey(ip, port)
pkb, err := pk.MarshalBinary()
if err != nil {
fmt.Println("problem marshalling binary from public key")
}
nodeInfo := &NodeInfo{Self: self, PubK: pkb}
_, pk := utils.GenKeyBLS(ip, port)
pkb := pk.Serialize()
nodeInfo := &node.Info{IP: ip, Port: port, PubKey: pkb}
serializedNI := SerializeNodeInfo(nodeInfo)
deserializedNI := DeserializeNodeInfo(serializedNI)
if !reflect.DeepEqual(nodeInfo, deserializedNI) {
@ -33,25 +29,17 @@ func TestSerializeDeserializeRandomInfo(t *testing.T) {
ip = "127.0.0.1"
port = "8080"
self := p2p.Peer{IP: ip, Port: port}
_, pk := utils.GenKey(ip, port)
pkb, err := pk.MarshalBinary()
if err != nil {
fmt.Println("problem marshalling binary from public key")
}
nodeInfo1 := &NodeInfo{Self: self, PubK: pkb}
_, pk := utils.GenKeyBLS(ip, port)
pkb := pk.Serialize()
nodeInfo1 := &node.Info{IP: ip, Port: port, PubKey: pkb}
ip = "127.0.0.1"
port = "9080"
self2 := p2p.Peer{IP: ip, Port: port}
_, pk2 := utils.GenKey(ip, port)
pkb2, err := pk2.MarshalBinary()
if err != nil {
fmt.Println("problem marshalling binary from public key")
}
nodeInfo2 := &NodeInfo{Self: self2, PubK: pkb2}
_, pk2 := utils.GenKeyBLS(ip, port)
pkb2 := pk2.Serialize()
nodeInfo2 := &node.Info{IP: ip, Port: port, PubKey: pkb2}
leaders := make([]*NodeInfo, 2)
leaders := make([]*node.Info, 2)
leaders[0] = nodeInfo1
leaders[1] = nodeInfo2

@ -34,11 +34,14 @@ const (
// TODO: add more types
)
// MessageCategoryBytes is the number of bytes message category takes
const MessageCategoryBytes = 1
// MessageTypeBytes is the number of bytes message type takes
const MessageTypeBytes = 1
const (
// ProtocolVersion is a constant defined as the version of the Harmony protocol
ProtocolVersion = 1
// MessageCategoryBytes is the number of bytes message category takes
MessageCategoryBytes = 1
// MessageTypeBytes is the number of bytes message type takes
MessageTypeBytes = 1
)
// GetMessageCategory gets the message category from the p2p message content
func GetMessageCategory(message []byte) (MessageCategory, error) {

@ -1,5 +1,5 @@
/*
Package proto/node implements the communication protocol among nodes.
Package proto/discovery implements the discovery ping/pong protocol among nodes.
pingpong.go adds support of ping/pong messages.
@ -8,7 +8,7 @@ pong: peer responds to ping messages, sending all pubkeys known by peer
*/
package node
package discovery
import (
"bytes"
@ -16,51 +16,22 @@ import (
"fmt"
"log"
"github.com/dedis/kyber"
"github.com/harmony-one/bls/ffi/go/bls"
"github.com/harmony-one/harmony/api/proto"
"github.com/harmony-one/harmony/api/proto/node"
"github.com/harmony-one/harmony/p2p"
)
// RoleType defines the role of the node
type RoleType int
// Type of roles of a node
const (
ValidatorRole RoleType = iota
ClientRole
)
func (r RoleType) String() string {
switch r {
case ValidatorRole:
return "Validator"
case ClientRole:
return "Client"
}
return "Unknown"
}
// refer to Peer struct in p2p/peer.go
// this is basically a simplified version of Peer
// for network transportation
type nodeInfo struct {
IP string
Port string
PubKey []byte
ValidatorID int
Role RoleType
}
// PingMessageType defines the data structure of the Ping message
type PingMessageType struct {
Version uint16 // version of the protocol
Node nodeInfo
Node node.Info
}
// PongMessageType defines the data structure of the Pong message
type PongMessageType struct {
Version uint16 // version of the protocol
Peers []nodeInfo
Peers []node.Info
PubKeys [][]byte // list of publickKeys, has to be identical among all validators/leaders
}
@ -77,37 +48,33 @@ func (p PongMessageType) String() string {
func NewPingMessage(peer p2p.Peer) *PingMessageType {
ping := new(PingMessageType)
var err error
ping.Version = ProtocolVersion
ping.Version = proto.ProtocolVersion
ping.Node.IP = peer.IP
ping.Node.Port = peer.Port
ping.Node.PeerID = peer.PeerID
ping.Node.ValidatorID = peer.ValidatorID
ping.Node.PubKey, err = peer.PubKey.MarshalBinary()
ping.Node.Role = ValidatorRole
if err != nil {
fmt.Printf("Error Marshal PubKey: %v", err)
return nil
}
ping.Node.PubKey = peer.PubKey.Serialize()
ping.Node.Role = node.ValidatorRole
return ping
}
// NewPongMessage creates a new Pong message based on a list of p2p.Peer and a list of publicKeys
func NewPongMessage(peers []p2p.Peer, pubKeys []kyber.Point) *PongMessageType {
func NewPongMessage(peers []p2p.Peer, pubKeys []*bls.PublicKey) *PongMessageType {
pong := new(PongMessageType)
pong.PubKeys = make([][]byte, 0)
pong.Version = ProtocolVersion
pong.Peers = make([]nodeInfo, 0)
pong.Version = proto.ProtocolVersion
pong.Peers = make([]node.Info, 0)
var err error
for _, p := range peers {
n := nodeInfo{}
n := node.Info{}
n.IP = p.IP
n.Port = p.Port
n.ValidatorID = p.ValidatorID
n.PubKey, err = p.PubKey.MarshalBinary()
n.PeerID = p.PeerID
n.PubKey = p.PubKey.Serialize()
if err != nil {
fmt.Printf("Error Marshal PubKey: %v", err)
continue
@ -116,11 +83,7 @@ func NewPongMessage(peers []p2p.Peer, pubKeys []kyber.Point) *PongMessageType {
}
for _, p := range pubKeys {
key, err := p.MarshalBinary()
if err != nil {
fmt.Printf("Error Marshal PublicKeys: %v", err)
continue
}
key := p.Serialize()
pong.PubKeys = append(pong.PubKeys, key)
}
@ -146,7 +109,7 @@ func GetPingMessage(payload []byte) (*PingMessageType, error) {
// GetPongMessage deserializes the Pong Message from a list of byte
func GetPongMessage(payload []byte) (*PongMessageType, error) {
pong := new(PongMessageType)
pong.Peers = make([]nodeInfo, 0)
pong.Peers = make([]node.Info, 0)
pong.PubKeys = make([][]byte, 0)
r := bytes.NewBuffer(payload)
@ -163,7 +126,7 @@ func GetPongMessage(payload []byte) (*PongMessageType, error) {
// ConstructPingMessage contructs ping message from node to leader
func (p PingMessageType) ConstructPingMessage() []byte {
byteBuffer := bytes.NewBuffer([]byte{byte(proto.Node)})
byteBuffer.WriteByte(byte(PING))
byteBuffer.WriteByte(byte(node.PING))
encoder := gob.NewEncoder(byteBuffer)
err := encoder.Encode(p)
@ -177,7 +140,7 @@ func (p PingMessageType) ConstructPingMessage() []byte {
// ConstructPongMessage contructs pong message from leader to node
func (p PongMessageType) ConstructPongMessage() []byte {
byteBuffer := bytes.NewBuffer([]byte{byte(proto.Node)})
byteBuffer.WriteByte(byte(PONG))
byteBuffer.WriteByte(byte(node.PONG))
encoder := gob.NewEncoder(byteBuffer)
err := encoder.Encode(p)

@ -1,4 +1,4 @@
package node
package discovery
import (
"fmt"
@ -6,47 +6,43 @@ import (
"strings"
"testing"
"github.com/dedis/kyber"
"github.com/harmony-one/bls/ffi/go/bls"
"github.com/harmony-one/harmony/api/proto"
"github.com/harmony-one/harmony/crypto"
"github.com/harmony-one/harmony/api/proto/node"
"github.com/harmony-one/harmony/crypto/pki"
"github.com/harmony-one/harmony/p2p"
)
var (
priKey1 = crypto.Ed25519Curve.Scalar().SetInt64(int64(333))
pubKey1 = pki.GetPublicKeyFromScalar(priKey1)
pubKey1 = pki.GetBLSPrivateKeyFromInt(333).GetPublicKey()
p1 = p2p.Peer{
IP: "127.0.0.1",
Port: "9999",
ValidatorID: -1,
PubKey: pubKey1,
}
e1 = "ping:Validator/1=>127.0.0.1:9999:-1/[90 217 28 68 64 211 160 232 61 244 159 244 160 36 61 161 237 242 236 45 147 118 237 88 234 122 198 188 157 116 90 228]"
e3 = "ping:Client/1=>127.0.0.1:9999:-1/[90 217 28 68 64 211 160 232 61 244 159 244 160 36 61 161 237 242 236 45 147 118 237 88 234 122 198 188 157 116 90 228]"
e1 = "ping:Validator/1=>127.0.0.1:9999:-1/[120 1 130 197 30 202 78 236 84 249 5 230 132 208 242 242 246 63 100 123 96 11 211 228 4 56 64 94 57 133 3 226 254 222 231 160 178 81 252 205 40 28 45 2 90 74 207 15 68 86 138 68 143 176 221 161 108 105 133 6 64 121 92 25 134 255 9 209 156 209 119 187 13 160 23 147 240 24 196 152 100 20 163 51 118 45 100 26 179 227 184 166 147 113 50 139]"
e3 = "ping:Client/1=>127.0.0.1:9999:-1/[120 1 130 197 30 202 78 236 84 249 5 230 132 208 242 242 246 63 100 123 96 11 211 228 4 56 64 94 57 133 3 226 254 222 231 160 178 81 252 205 40 28 45 2 90 74 207 15 68 86 138 68 143 176 221 161 108 105 133 6 64 121 92 25 134 255 9 209 156 209 119 187 13 160 23 147 240 24 196 152 100 20 163 51 118 45 100 26 179 227 184 166 147 113 50 139]"
priKey2 = crypto.Ed25519Curve.Scalar().SetInt64(int64(999))
pubKey2 = pki.GetPublicKeyFromScalar(priKey2)
pubKey2 = pki.GetBLSPrivateKeyFromInt(999).GetPublicKey()
p2 = []p2p.Peer{
{
IP: "127.0.0.1",
Port: "8888",
PubKey: pubKey1,
Ready: true,
ValidatorID: -1,
},
{
IP: "127.0.0.1",
Port: "9999",
PubKey: pubKey2,
Ready: false,
ValidatorID: -2,
},
}
e2 = "pong:1=>length:2"
pubKeys = []kyber.Point{pubKey1, pubKey2}
pubKeys = []*bls.PublicKey{pubKey1, pubKey2}
buf1 []byte
buf2 []byte
@ -60,7 +56,7 @@ func TestString(test *testing.T) {
test.Errorf("expect: %v, got: %v", e1, r1)
}
ping1.Node.Role = ClientRole
ping1.Node.Role = node.ClientRole
r3 := fmt.Sprintf("%v", *ping1)
if strings.Compare(r3, e3) != 0 {

@ -0,0 +1 @@
protoc -I ./ message.proto --go_out=./

@ -0,0 +1,241 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: message.proto
package message
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type MessageType int32
const (
MessageType_UNKNOWN MessageType = 0
MessageType_NEWNODE_BOOTNODE MessageType = 1
MessageType_BOOTNODE_NEWNODE MessageType = 2
MessageType_NEWNODE_BEACON MessageType = 3
MessageType_BEACON_NEWNODE MessageType = 4
)
var MessageType_name = map[int32]string{
0: "UNKNOWN",
1: "NEWNODE_BOOTNODE",
2: "BOOTNODE_NEWNODE",
3: "NEWNODE_BEACON",
4: "BEACON_NEWNODE",
}
var MessageType_value = map[string]int32{
"UNKNOWN": 0,
"NEWNODE_BOOTNODE": 1,
"BOOTNODE_NEWNODE": 2,
"NEWNODE_BEACON": 3,
"BEACON_NEWNODE": 4,
}
func (x MessageType) String() string {
return proto.EnumName(MessageType_name, int32(x))
}
func (MessageType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_33c57e4bae7b9afd, []int{0}
}
// This is universal message for all communication protocols.
// There are different Requests for different message types.
// As we introduce a new type of message just add a new MessageType and new type of request in Message.
//
// The request field will be either one of the structure corresponding to the MessageType type.
type Message struct {
Type MessageType `protobuf:"varint,1,opt,name=type,proto3,enum=message.MessageType" json:"type,omitempty"`
// Types that are valid to be assigned to Request:
// *Message_NewnodeBootnodeRequest
// *Message_BootnodeNewnodeRequest
Request isMessage_Request `protobuf_oneof:"request"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Message) Reset() { *m = Message{} }
func (m *Message) String() string { return proto.CompactTextString(m) }
func (*Message) ProtoMessage() {}
func (*Message) Descriptor() ([]byte, []int) {
return fileDescriptor_33c57e4bae7b9afd, []int{0}
}
func (m *Message) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Message.Unmarshal(m, b)
}
func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Message.Marshal(b, m, deterministic)
}
func (m *Message) XXX_Merge(src proto.Message) {
xxx_messageInfo_Message.Merge(m, src)
}
func (m *Message) XXX_Size() int {
return xxx_messageInfo_Message.Size(m)
}
func (m *Message) XXX_DiscardUnknown() {
xxx_messageInfo_Message.DiscardUnknown(m)
}
var xxx_messageInfo_Message proto.InternalMessageInfo
func (m *Message) GetType() MessageType {
if m != nil {
return m.Type
}
return MessageType_UNKNOWN
}
type isMessage_Request interface {
isMessage_Request()
}
type Message_NewnodeBootnodeRequest struct {
NewnodeBootnodeRequest *NewNodeBootNodeRequest `protobuf:"bytes,2,opt,name=newnode_bootnode_request,json=newnodeBootnodeRequest,proto3,oneof"`
}
type Message_BootnodeNewnodeRequest struct {
BootnodeNewnodeRequest *BootNodeNewNodeRequest `protobuf:"bytes,3,opt,name=bootnode_newnode_request,json=bootnodeNewnodeRequest,proto3,oneof"`
}
func (*Message_NewnodeBootnodeRequest) isMessage_Request() {}
func (*Message_BootnodeNewnodeRequest) isMessage_Request() {}
func (m *Message) GetRequest() isMessage_Request {
if m != nil {
return m.Request
}
return nil
}
func (m *Message) GetNewnodeBootnodeRequest() *NewNodeBootNodeRequest {
if x, ok := m.GetRequest().(*Message_NewnodeBootnodeRequest); ok {
return x.NewnodeBootnodeRequest
}
return nil
}
func (m *Message) GetBootnodeNewnodeRequest() *BootNodeNewNodeRequest {
if x, ok := m.GetRequest().(*Message_BootnodeNewnodeRequest); ok {
return x.BootnodeNewnodeRequest
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*Message) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*Message_NewnodeBootnodeRequest)(nil),
(*Message_BootnodeNewnodeRequest)(nil),
}
}
// Message of NewNode talking to BootNode.
type NewNodeBootNodeRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NewNodeBootNodeRequest) Reset() { *m = NewNodeBootNodeRequest{} }
func (m *NewNodeBootNodeRequest) String() string { return proto.CompactTextString(m) }
func (*NewNodeBootNodeRequest) ProtoMessage() {}
func (*NewNodeBootNodeRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_33c57e4bae7b9afd, []int{1}
}
func (m *NewNodeBootNodeRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NewNodeBootNodeRequest.Unmarshal(m, b)
}
func (m *NewNodeBootNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_NewNodeBootNodeRequest.Marshal(b, m, deterministic)
}
func (m *NewNodeBootNodeRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_NewNodeBootNodeRequest.Merge(m, src)
}
func (m *NewNodeBootNodeRequest) XXX_Size() int {
return xxx_messageInfo_NewNodeBootNodeRequest.Size(m)
}
func (m *NewNodeBootNodeRequest) XXX_DiscardUnknown() {
xxx_messageInfo_NewNodeBootNodeRequest.DiscardUnknown(m)
}
var xxx_messageInfo_NewNodeBootNodeRequest proto.InternalMessageInfo
// Message of BootNode talking to NewNode.
type BootNodeNewNodeRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BootNodeNewNodeRequest) Reset() { *m = BootNodeNewNodeRequest{} }
func (m *BootNodeNewNodeRequest) String() string { return proto.CompactTextString(m) }
func (*BootNodeNewNodeRequest) ProtoMessage() {}
func (*BootNodeNewNodeRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_33c57e4bae7b9afd, []int{2}
}
func (m *BootNodeNewNodeRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BootNodeNewNodeRequest.Unmarshal(m, b)
}
func (m *BootNodeNewNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BootNodeNewNodeRequest.Marshal(b, m, deterministic)
}
func (m *BootNodeNewNodeRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_BootNodeNewNodeRequest.Merge(m, src)
}
func (m *BootNodeNewNodeRequest) XXX_Size() int {
return xxx_messageInfo_BootNodeNewNodeRequest.Size(m)
}
func (m *BootNodeNewNodeRequest) XXX_DiscardUnknown() {
xxx_messageInfo_BootNodeNewNodeRequest.DiscardUnknown(m)
}
var xxx_messageInfo_BootNodeNewNodeRequest proto.InternalMessageInfo
func init() {
proto.RegisterEnum("message.MessageType", MessageType_name, MessageType_value)
proto.RegisterType((*Message)(nil), "message.Message")
proto.RegisterType((*NewNodeBootNodeRequest)(nil), "message.NewNodeBootNodeRequest")
proto.RegisterType((*BootNodeNewNodeRequest)(nil), "message.BootNodeNewNodeRequest")
}
func init() { proto.RegisterFile("message.proto", fileDescriptor_33c57e4bae7b9afd) }
var fileDescriptor_33c57e4bae7b9afd = []byte{
// 253 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x41, 0x4b, 0xc3, 0x40,
0x10, 0x85, 0xbb, 0x6d, 0x31, 0x38, 0xc1, 0xb2, 0x2c, 0xa5, 0xe4, 0x66, 0xe9, 0x29, 0x78, 0xe8,
0xa1, 0xfe, 0x02, 0xa3, 0x01, 0x41, 0x9c, 0x85, 0x50, 0xe9, 0xc1, 0x43, 0xb0, 0x74, 0xf0, 0x64,
0x26, 0x36, 0x2b, 0xa5, 0xff, 0xdc, 0xa3, 0x6c, 0x32, 0x1b, 0x8a, 0xe4, 0xb4, 0x6f, 0xe6, 0xbd,
0xf9, 0x66, 0xd9, 0x85, 0x9b, 0x2f, 0x6a, 0x9a, 0x8f, 0x4f, 0x5a, 0xd7, 0x47, 0x76, 0x6c, 0x22,
0x29, 0x57, 0xbf, 0x0a, 0xa2, 0xd7, 0x4e, 0x9b, 0x14, 0xa6, 0xee, 0x5c, 0x53, 0xa2, 0x96, 0x2a,
0x9d, 0x6d, 0xe6, 0xeb, 0x30, 0x22, 0xfe, 0xf6, 0x5c, 0x53, 0xd1, 0x26, 0xcc, 0x3b, 0x24, 0x15,
0x9d, 0x2a, 0x3e, 0x50, 0xb9, 0x67, 0x76, 0xad, 0x38, 0xd2, 0xf7, 0x0f, 0x35, 0x2e, 0x19, 0x2f,
0x55, 0x1a, 0x6f, 0x6e, 0xfb, 0x69, 0xa4, 0x13, 0xf2, 0x81, 0x32, 0x66, 0xe7, 0xcf, 0xa2, 0x8b,
0x3d, 0x8f, 0x8a, 0x85, 0x20, 0x32, 0x21, 0x88, 0xe3, 0xe1, 0x3d, 0x34, 0x6c, 0x09, 0xf0, 0xc9,
0x3f, 0x78, 0xa0, 0xca, 0x92, 0x0b, 0x78, 0x40, 0x60, 0x47, 0x10, 0x27, 0xbb, 0x86, 0x48, 0x58,
0xab, 0x04, 0x16, 0xc3, 0x77, 0xf3, 0xce, 0x30, 0xf8, 0xae, 0x82, 0xf8, 0xe2, 0x35, 0x4c, 0x0c,
0xd1, 0x1b, 0xbe, 0xa0, 0xdd, 0xa1, 0x1e, 0x99, 0x39, 0x68, 0xcc, 0x77, 0x68, 0x9f, 0xf2, 0x32,
0xb3, 0x76, 0xeb, 0x85, 0x56, 0xbe, 0x1b, 0xaa, 0x52, 0x6c, 0x3d, 0x36, 0x06, 0x66, 0x7d, 0x36,
0x7f, 0x78, 0xb4, 0xa8, 0x27, 0xbe, 0xd7, 0xe9, 0x3e, 0x37, 0xdd, 0x5f, 0xb5, 0xdf, 0x75, 0xff,
0x17, 0x00, 0x00, 0xff, 0xff, 0x37, 0x30, 0xd0, 0xfb, 0xbf, 0x01, 0x00, 0x00,
}

@ -0,0 +1,33 @@
syntax = "proto3";
package message;
enum MessageType {
UNKNOWN = 0;
NEWNODE_BOOTNODE = 1;
BOOTNODE_NEWNODE = 2;
NEWNODE_BEACON = 3;
BEACON_NEWNODE = 4;
}
// This is universal message for all communication protocols.
// There are different Requests for different message types.
// As we introduce a new type of message just add a new MessageType and new type of request in Message.
//
// The request field will be either one of the structure corresponding to the MessageType type.
message Message {
MessageType type = 1;
oneof request {
NewNodeBootNodeRequest newnode_bootnode_request = 2;
BootNodeNewNodeRequest bootnode_newnode_request = 3;
}
}
// Message of NewNode talking to BootNode.
message NewNodeBootNodeRequest {
}
// Message of BootNode talking to NewNode.
message BootNodeNewNodeRequest {
}
// TODO(minhdoan): refactor and introduce consensus message as one of possible Message.request.

@ -3,6 +3,7 @@ package node
import (
"bytes"
"encoding/gob"
"fmt"
"log"
"github.com/ethereum/go-ethereum/common"
@ -10,16 +11,12 @@ import (
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/api/proto"
peer "github.com/libp2p/go-libp2p-peer"
)
// MessageType is to indicate the specific type of message under Node category
type MessageType byte
// ProtocolVersion is a constant defined as the version of the Harmony protocol
const (
ProtocolVersion = 1
)
// Constant of the top level Message Type exchanged among nodes
const (
Transaction MessageType = iota
@ -57,6 +54,41 @@ const (
Unlock
)
// RoleType defines the role of the node
type RoleType int
// Type of roles of a node
const (
ValidatorRole RoleType = iota
ClientRole
)
func (r RoleType) String() string {
switch r {
case ValidatorRole:
return "Validator"
case ClientRole:
return "Client"
}
return "Unknown"
}
// Info refers to Peer struct in p2p/peer.go
// this is basically a simplified version of Peer
// for network transportation
type Info struct {
IP string
Port string
PubKey []byte
ValidatorID int
Role RoleType
PeerID peer.ID // Peerstore ID
}
func (info Info) String() string {
return fmt.Sprintf("Info:%v/%v=>%v/%v", info.IP, info.Port, info.ValidatorID, info.PeerID)
}
// BlockMessageType represents the type of messages used for Node/Block
type BlockMessageType int

@ -3,10 +3,10 @@ package node
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types"
hdb "github.com/harmony-one/harmony/internal/db"
// "fmt"
"math/big"
@ -75,7 +75,7 @@ func TestConstructStopMessage(t *testing.T) {
func TestConstructBlocksSyncMessage(t *testing.T) {
db := hdb.NewMemDatabase()
db := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
root := statedb.IntermediateRoot(false)

@ -1,9 +1,9 @@
package explorer
import (
"context"
"encoding/json"
"fmt"
"log"
"net"
"net/http"
"os"
@ -12,6 +12,8 @@ import (
"github.com/ethereum/go-ethereum/rlp"
"github.com/gorilla/mux"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/p2p"
)
// Constants for explorer service.
@ -25,6 +27,32 @@ type Service struct {
IP string
Port string
storage *Storage
server *http.Server
}
// New returns explorer service.
func New(selfPeer *p2p.Peer) *Service {
return &Service{
IP: selfPeer.IP,
Port: selfPeer.Port,
}
}
// StartService starts explorer service.
func (s *Service) StartService() {
utils.GetLogInstance().Info("Starting explorer service.")
s.Init(true)
s.server = s.Run()
}
// StopService shutdowns explorer service.
func (s *Service) StopService() {
utils.GetLogInstance().Info("Shutting down explorer service.")
if err := s.server.Shutdown(context.Background()); err != nil {
utils.GetLogInstance().Error("Error when shutting down explorer server", "error", err)
} else {
utils.GetLogInstance().Error("Shutting down explorer server successufully")
}
}
// GetExplorerPort returns the port serving explorer dashboard. This port is explorerPortDifference less than the node port.
@ -32,7 +60,7 @@ func GetExplorerPort(nodePort string) string {
if port, err := strconv.Atoi(nodePort); err == nil {
return fmt.Sprintf("%d", port-explorerPortDifference)
}
Log.Error("error on parsing.")
utils.GetLogInstance().Error("error on parsing.")
return ""
}
@ -42,7 +70,7 @@ func (s *Service) Init(remove bool) {
}
// Run is to run serving explorer.
func (s *Service) Run() {
func (s *Service) Run() *http.Server {
// Init address.
addr := net.JoinHostPort("", GetExplorerPort(s.Port))
@ -60,8 +88,10 @@ func (s *Service) Run() {
s.router.Path("/address").HandlerFunc(s.GetExplorerAddress)
// Do serving now.
fmt.Println("Listening to:", GetExplorerPort(s.Port))
log.Fatal(http.ListenAndServe(addr, s.router))
utils.GetLogInstance().Info("Listening on ", "port: ", GetExplorerPort(s.Port))
server := &http.Server{Addr: addr, Handler: s.router}
go server.ListenAndServe()
return server
}
// GetAccountBlocks returns a list of types.Block to server blocks end-point.
@ -80,7 +110,7 @@ func (s *Service) GetAccountBlocks(from, to int) []*types.Block {
}
block := new(types.Block)
if rlp.DecodeBytes(data, block) != nil {
Log.Error("Error on getting from db")
utils.GetLogInstance().Error("Error on getting from db")
os.Exit(1)
}
blocks = append(blocks, block)

@ -6,10 +6,10 @@ import (
"strconv"
"sync"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/db"
"github.com/harmony-one/harmony/log"
)
// Constants for storage.
@ -49,7 +49,7 @@ var once sync.Once
// Storage dump the block info into leveldb.
type Storage struct {
db *db.LDBDatabase
db *ethdb.LDBDatabase
}
// GetStorageInstance returns attack model by using singleton pattern.
@ -71,13 +71,13 @@ func (storage *Storage) Init(ip, port string, remove bool) {
Log.Error(err.Error())
}
}
if storage.db, err = db.NewLDBDatabase(dbFileName, 0, 0); err != nil {
if storage.db, err = ethdb.NewLDBDatabase(dbFileName, 0, 0); err != nil {
Log.Error(err.Error())
}
}
// GetDB returns the LDBDatabase of the storage.
func (storage *Storage) GetDB() *db.LDBDatabase {
func (storage *Storage) GetDB() *ethdb.LDBDatabase {
return storage.db
}

@ -2,11 +2,12 @@ package downloader
import (
"context"
"encoding/binary"
"fmt"
"log"
"time"
pb "github.com/harmony-one/harmony/api/services/syncing/downloader/proto"
"github.com/harmony-one/harmony/internal/utils"
"google.golang.org/grpc"
)
@ -24,7 +25,7 @@ func ClientSetup(ip, port string) *Client {
var err error
client.conn, err = grpc.Dial(fmt.Sprintf("%s:%s", ip, port), client.opts...)
if err != nil {
log.Fatalf("fail to dial: %v", err)
utils.GetLogInstance().Info("client.go:ClientSetup fail to dial: ", "error", err)
return nil
}
@ -34,17 +35,20 @@ func ClientSetup(ip, port string) *Client {
// Close closes the Client.
func (client *Client) Close() {
client.conn.Close()
err := client.conn.Close()
if err != nil {
utils.GetLogInstance().Info("unable to close connection ")
}
}
// GetBlockHashes gets block hashes from all the peers by calling grpc request.
func (client *Client) GetBlockHashes() *pb.DownloaderResponse {
func (client *Client) GetBlockHashes(startHash []byte) *pb.DownloaderResponse {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
request := &pb.DownloaderRequest{Type: pb.DownloaderRequest_HEADER}
request := &pb.DownloaderRequest{Type: pb.DownloaderRequest_HEADER, BlockHash: startHash}
response, err := client.dlClient.Query(ctx, request)
if err != nil {
log.Fatalf("Error")
utils.GetLogInstance().Info("[SYNC] GetBlockHashes query failed", "error", err)
}
return response
}
@ -61,7 +65,46 @@ func (client *Client) GetBlocks(hashes [][]byte) *pb.DownloaderResponse {
}
response, err := client.dlClient.Query(ctx, request)
if err != nil {
log.Fatalf("Error")
utils.GetLogInstance().Info("[SYNC] downloader/client.go:GetBlocks query failed.", "error", err)
}
return response
}
// Register will register node's ip/port information to peers receive newly created blocks in future
// hash is the bytes of "ip:port" string representation
func (client *Client) Register(hash []byte) *pb.DownloaderResponse {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
request := &pb.DownloaderRequest{Type: pb.DownloaderRequest_REGISTER}
request.PeerHash = make([]byte, len(hash))
copy(request.PeerHash, hash)
response, err := client.dlClient.Query(ctx, request)
if err != nil {
utils.GetLogInstance().Info("[SYNC] client.go:Register failed.", "error", err)
}
return response
}
// PushNewBlock will send the lastest verified blow to registered nodes
func (client *Client) PushNewBlock(peerID uint32, blockHash []byte, timeout bool) *pb.DownloaderResponse {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
peerHash := make([]byte, 4)
binary.BigEndian.PutUint32(peerHash, peerID)
request := &pb.DownloaderRequest{Type: pb.DownloaderRequest_NEWBLOCK}
request.BlockHash = make([]byte, len(blockHash))
copy(request.BlockHash, blockHash)
request.PeerHash = make([]byte, len(peerHash))
copy(request.PeerHash, peerHash)
if timeout {
request.Type = pb.DownloaderRequest_REGISTERTIMEOUT
}
response, err := client.dlClient.Query(ctx, request)
if err != nil {
utils.GetLogInstance().Info("[SYNC] unable to send new block to unsync node", "error", err)
}
return response
}

@ -6,10 +6,9 @@ package downloader
import (
context "context"
fmt "fmt"
math "math"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
@ -21,26 +20,35 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type DownloaderRequest_RequestType int32
const (
DownloaderRequest_HEADER DownloaderRequest_RequestType = 0
DownloaderRequest_BLOCK DownloaderRequest_RequestType = 1
DownloaderRequest_UNKOWN DownloaderRequest_RequestType = 2
DownloaderRequest_HEADER DownloaderRequest_RequestType = 0
DownloaderRequest_BLOCK DownloaderRequest_RequestType = 1
DownloaderRequest_NEWBLOCK DownloaderRequest_RequestType = 2
DownloaderRequest_REGISTER DownloaderRequest_RequestType = 3
DownloaderRequest_REGISTERTIMEOUT DownloaderRequest_RequestType = 4
DownloaderRequest_UNKNOWN DownloaderRequest_RequestType = 5
)
var DownloaderRequest_RequestType_name = map[int32]string{
0: "HEADER",
1: "BLOCK",
2: "UNKOWN",
2: "NEWBLOCK",
3: "REGISTER",
4: "REGISTERTIMEOUT",
5: "UNKNOWN",
}
var DownloaderRequest_RequestType_value = map[string]int32{
"HEADER": 0,
"BLOCK": 1,
"UNKOWN": 2,
"HEADER": 0,
"BLOCK": 1,
"NEWBLOCK": 2,
"REGISTER": 3,
"REGISTERTIMEOUT": 4,
"UNKNOWN": 5,
}
func (x DownloaderRequest_RequestType) String() string {
@ -51,12 +59,42 @@ func (DownloaderRequest_RequestType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_6a99ec95c7ab1ff1, []int{0, 0}
}
type DownloaderResponse_RegisterResponseType int32
const (
DownloaderResponse_SUCCESS DownloaderResponse_RegisterResponseType = 0
DownloaderResponse_FAIL DownloaderResponse_RegisterResponseType = 1
DownloaderResponse_INSYNC DownloaderResponse_RegisterResponseType = 2
)
var DownloaderResponse_RegisterResponseType_name = map[int32]string{
0: "SUCCESS",
1: "FAIL",
2: "INSYNC",
}
var DownloaderResponse_RegisterResponseType_value = map[string]int32{
"SUCCESS": 0,
"FAIL": 1,
"INSYNC": 2,
}
func (x DownloaderResponse_RegisterResponseType) String() string {
return proto.EnumName(DownloaderResponse_RegisterResponseType_name, int32(x))
}
func (DownloaderResponse_RegisterResponseType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_6a99ec95c7ab1ff1, []int{1, 0}
}
// DownloaderRequest is the generic download request.
type DownloaderRequest struct {
// Request type.
Type DownloaderRequest_RequestType `protobuf:"varint,1,opt,name=type,proto3,enum=downloader.DownloaderRequest_RequestType" json:"type,omitempty"`
// The hashes of the blocks we want to download.
Hashes [][]byte `protobuf:"bytes,2,rep,name=hashes,proto3" json:"hashes,omitempty"`
PeerHash []byte `protobuf:"bytes,3,opt,name=peerHash,proto3" json:"peerHash,omitempty"`
BlockHash []byte `protobuf:"bytes,4,opt,name=blockHash,proto3" json:"blockHash,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -101,13 +139,29 @@ func (m *DownloaderRequest) GetHashes() [][]byte {
return nil
}
func (m *DownloaderRequest) GetPeerHash() []byte {
if m != nil {
return m.PeerHash
}
return nil
}
func (m *DownloaderRequest) GetBlockHash() []byte {
if m != nil {
return m.BlockHash
}
return nil
}
// DownloaderResponse is the generic response of DownloaderRequest.
type DownloaderResponse struct {
// payload of Block.
Payload [][]byte `protobuf:"bytes,1,rep,name=payload,proto3" json:"payload,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
Payload [][]byte `protobuf:"bytes,1,rep,name=payload,proto3" json:"payload,omitempty"`
// response of registration request
Type DownloaderResponse_RegisterResponseType `protobuf:"varint,2,opt,name=type,proto3,enum=downloader.DownloaderResponse_RegisterResponseType" json:"type,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DownloaderResponse) Reset() { *m = DownloaderResponse{} }
@ -142,8 +196,16 @@ func (m *DownloaderResponse) GetPayload() [][]byte {
return nil
}
func (m *DownloaderResponse) GetType() DownloaderResponse_RegisterResponseType {
if m != nil {
return m.Type
}
return DownloaderResponse_SUCCESS
}
func init() {
proto.RegisterEnum("downloader.DownloaderRequest_RequestType", DownloaderRequest_RequestType_name, DownloaderRequest_RequestType_value)
proto.RegisterEnum("downloader.DownloaderResponse_RegisterResponseType", DownloaderResponse_RegisterResponseType_name, DownloaderResponse_RegisterResponseType_value)
proto.RegisterType((*DownloaderRequest)(nil), "downloader.DownloaderRequest")
proto.RegisterType((*DownloaderResponse)(nil), "downloader.DownloaderResponse")
}
@ -151,21 +213,29 @@ func init() {
func init() { proto.RegisterFile("downloader.proto", fileDescriptor_6a99ec95c7ab1ff1) }
var fileDescriptor_6a99ec95c7ab1ff1 = []byte{
// 210 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0xc9, 0x2f, 0xcf,
0xcb, 0xc9, 0x4f, 0x4c, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0x88,
0x28, 0xcd, 0x61, 0xe4, 0x12, 0x74, 0x81, 0x73, 0x83, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84,
0x6c, 0xb9, 0x58, 0x4a, 0x2a, 0x0b, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0xf8, 0x8c, 0x34, 0xf5,
0x90, 0x8c, 0xc0, 0x50, 0xac, 0x07, 0xa5, 0x43, 0x2a, 0x0b, 0x52, 0x83, 0xc0, 0xda, 0x84, 0xc4,
0xb8, 0xd8, 0x32, 0x12, 0x8b, 0x33, 0x52, 0x8b, 0x25, 0x98, 0x14, 0x98, 0x35, 0x78, 0x82, 0xa0,
0x3c, 0x25, 0x03, 0x2e, 0x6e, 0x24, 0xc5, 0x42, 0x5c, 0x5c, 0x6c, 0x1e, 0xae, 0x8e, 0x2e, 0xae,
0x41, 0x02, 0x0c, 0x42, 0x9c, 0x5c, 0xac, 0x4e, 0x3e, 0xfe, 0xce, 0xde, 0x02, 0x8c, 0x20, 0xe1,
0x50, 0x3f, 0x6f, 0xff, 0x70, 0x3f, 0x01, 0x26, 0x25, 0x3d, 0x2e, 0x21, 0x64, 0x0b, 0x8b, 0x0b,
0xf2, 0xf3, 0x8a, 0x53, 0x85, 0x24, 0xb8, 0xd8, 0x0b, 0x12, 0x2b, 0x41, 0x82, 0x12, 0x8c, 0x60,
0x0b, 0x60, 0x5c, 0xa3, 0x30, 0x2e, 0x2e, 0x84, 0x7a, 0x21, 0x0f, 0x2e, 0xd6, 0xc0, 0xd2, 0xd4,
0xa2, 0x4a, 0x21, 0x59, 0xbc, 0x3e, 0x90, 0x92, 0xc3, 0x25, 0x0d, 0xb1, 0x4f, 0x89, 0x21, 0x89,
0x0d, 0x1c, 0x72, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xab, 0xfd, 0xd9, 0xfa, 0x4d, 0x01,
0x00, 0x00,
// 337 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x4e, 0xc2, 0x40,
0x10, 0xc6, 0xd9, 0x52, 0xfe, 0x0d, 0x44, 0xd7, 0xd1, 0x98, 0x86, 0xa8, 0x21, 0x3d, 0xe1, 0xa5,
0x07, 0x38, 0x79, 0xf0, 0x80, 0x65, 0x85, 0x06, 0x2c, 0x71, 0x5b, 0x24, 0x1e, 0x8b, 0x6c, 0xc4,
0x48, 0x68, 0xed, 0x96, 0x98, 0xbe, 0x81, 0xcf, 0xe3, 0x13, 0x9a, 0x96, 0x3f, 0x25, 0x51, 0x39,
0x35, 0xbf, 0x6f, 0xba, 0x33, 0xf3, 0x7d, 0xbb, 0x40, 0x67, 0xfe, 0xe7, 0x72, 0xe1, 0x7b, 0x33,
0x11, 0x1a, 0x41, 0xe8, 0x47, 0x3e, 0x42, 0xa6, 0xe8, 0x5f, 0x0a, 0x9c, 0x74, 0x77, 0xc8, 0xc5,
0xc7, 0x4a, 0xc8, 0x08, 0x6f, 0x41, 0x8d, 0xe2, 0x40, 0x68, 0xa4, 0x41, 0x9a, 0x47, 0xad, 0x6b,
0x63, 0xaf, 0xc5, 0xaf, 0x9f, 0x8d, 0xcd, 0xd7, 0x8d, 0x03, 0xc1, 0xd3, 0x63, 0x78, 0x0e, 0xc5,
0xb9, 0x27, 0xe7, 0x42, 0x6a, 0x4a, 0x23, 0xdf, 0xac, 0xf1, 0x0d, 0x61, 0x1d, 0xca, 0x81, 0x10,
0x61, 0xdf, 0x93, 0x73, 0x2d, 0xdf, 0x20, 0xcd, 0x1a, 0xdf, 0x31, 0x5e, 0x40, 0x65, 0xba, 0xf0,
0x5f, 0xde, 0xd3, 0xa2, 0x9a, 0x16, 0x33, 0x41, 0x9f, 0x42, 0x75, 0x6f, 0x0c, 0x02, 0x14, 0xfb,
0xac, 0xd3, 0x65, 0x9c, 0xe6, 0xb0, 0x02, 0x85, 0xbb, 0xe1, 0xc8, 0x1c, 0x50, 0x82, 0x35, 0x28,
0xdb, 0x6c, 0xb2, 0x26, 0x25, 0x21, 0xce, 0x7a, 0x96, 0xe3, 0x32, 0x4e, 0xf3, 0x78, 0x0a, 0xc7,
0x5b, 0x72, 0xad, 0x07, 0x36, 0x1a, 0xbb, 0x54, 0xc5, 0x2a, 0x94, 0xc6, 0xf6, 0xc0, 0x1e, 0x4d,
0x6c, 0x5a, 0xd0, 0xbf, 0x09, 0xe0, 0xbe, 0x3b, 0x19, 0xf8, 0x4b, 0x29, 0x50, 0x83, 0x52, 0xe0,
0xc5, 0x89, 0xa8, 0x91, 0xd4, 0xcd, 0x16, 0xb1, 0xb7, 0x49, 0x49, 0x49, 0x53, 0x6a, 0xff, 0x97,
0xd2, 0xba, 0x8f, 0xc1, 0xc5, 0xeb, 0x9b, 0x8c, 0x32, 0x21, 0xcb, 0x4b, 0xbf, 0x81, 0xb3, 0xbf,
0xaa, 0xc9, 0x7a, 0xce, 0xd8, 0x34, 0x99, 0xe3, 0xd0, 0x1c, 0x96, 0x41, 0xbd, 0xef, 0x58, 0x43,
0x4a, 0x12, 0xf7, 0x96, 0xed, 0x3c, 0xdb, 0x26, 0x55, 0x5a, 0x4f, 0x00, 0xd9, 0x2c, 0xec, 0x43,
0xe1, 0x71, 0x25, 0xc2, 0x18, 0x2f, 0x0f, 0x5e, 0x59, 0xfd, 0xea, 0xf0, 0xae, 0x7a, 0x6e, 0x5a,
0x4c, 0x9f, 0x4a, 0xfb, 0x27, 0x00, 0x00, 0xff, 0xff, 0xf9, 0xfb, 0x70, 0x9d, 0x3e, 0x02, 0x00,
0x00,
}
// Reference imports to suppress errors if they are not otherwise used.

@ -12,7 +12,10 @@ message DownloaderRequest {
enum RequestType {
HEADER = 0;
BLOCK = 1;
UNKOWN = 2;
NEWBLOCK = 2;
REGISTER = 3;
REGISTERTIMEOUT = 4;
UNKNOWN = 5;
}
// Request type.
@ -20,10 +23,19 @@ message DownloaderRequest {
// The hashes of the blocks we want to download.
repeated bytes hashes = 2;
bytes peerHash = 3;
bytes blockHash = 4;
}
// DownloaderResponse is the generic response of DownloaderRequest.
message DownloaderResponse {
enum RegisterResponseType {
SUCCESS = 0;
FAIL = 1;
INSYNC = 2; // node is now in sync, remove it from the broadcast list
}
// payload of Block.
repeated bytes payload = 1;
// response of registration request
RegisterResponseType type = 2;
}

@ -5,9 +5,8 @@ import (
"log"
"net"
"google.golang.org/grpc"
pb "github.com/harmony-one/harmony/api/services/syncing/downloader/proto"
"google.golang.org/grpc"
)
// Constants for downloader server.

@ -4,5 +4,8 @@ import "errors"
// Errors ...
var (
ErrSyncPeerConfigClientNotReady = errors.New("client is not ready")
ErrSyncPeerConfigClientNotReady = errors.New("[SYNC]: client is not ready")
ErrRegistrationFail = errors.New("[SYNC]: registration failed")
ErrGetBlock = errors.New("[SYNC]: get block failed")
ErrGetBlockHash = errors.New("[SYNC]: get blockhash failed")
)

@ -9,5 +9,5 @@ import (
type StateSyncInterface interface {
// Syncing blockchain from other peers.
// The returned channel is the signal of syncing finish.
ProcessStateSyncFromPeers(peers []p2p.Peer, bc *core.BlockChain) (chan struct{}, error)
ProcessStateSyncFromPeers(startHash []byte, peers []p2p.Peer, bc *core.BlockChain) (chan struct{}, error)
}

@ -2,16 +2,25 @@ package syncing
import (
"bytes"
"encoding/binary"
"fmt"
"reflect"
"sort"
"strconv"
"sync"
"time"
"github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/node/worker"
"github.com/Workiva/go-datastructures/queue"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/harmony-one/harmony/api/services/syncing/downloader"
"github.com/harmony-one/harmony/log"
pb "github.com/harmony-one/harmony/api/services/syncing/downloader/proto"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/p2p"
)
@ -20,6 +29,8 @@ const (
ConsensusRatio = float64(0.66)
SleepTimeAfterNonConsensusBlockHashes = time.Second * 30
TimesToFail = 5
RegistrationNumber = 3
SyncingPortDifference = 3000
)
// SyncPeerConfig is peer config to sync.
@ -27,7 +38,9 @@ type SyncPeerConfig struct {
ip string
port string
client *downloader.Client
blockHashes [][]byte
blockHashes [][]byte // block hashes before node doing sync
newBlocks []*types.Block // blocks after node doing sync
mux sync.Mutex
}
// GetClient returns client pointer of downloader.Client
@ -49,18 +62,67 @@ type SyncConfig struct {
peers []*SyncPeerConfig
}
// GetStateSync returns the implementation of StateSyncInterface interface.
func GetStateSync() *StateSync {
return &StateSync{}
// CreateStateSync returns the implementation of StateSyncInterface interface.
func CreateStateSync(ip string, port string) *StateSync {
stateSync := &StateSync{}
stateSync.selfip = ip
stateSync.selfport = port
stateSync.commonBlocks = make(map[int]*types.Block)
stateSync.lastMileBlocks = []*types.Block{}
return stateSync
}
// StateSync is the struct that implements StateSyncInterface.
type StateSync struct {
selfip string
selfport string
peerNumber int
activePeerNumber int
blockHeight int
commonBlocks map[int]*types.Block
lastMileBlocks []*types.Block // last mile blocks to catch up with the consensus
syncConfig *SyncConfig
stateSyncTaskQueue *queue.Queue
syncMux sync.Mutex
}
// AddLastMileBlock add the lastest a few block into queue for syncing
func (ss *StateSync) AddLastMileBlock(block *types.Block) {
ss.lastMileBlocks = append(ss.lastMileBlocks, block)
}
// CloseConnections close grpc connections for state sync clients
func (ss *StateSync) CloseConnections() {
for _, pc := range ss.syncConfig.peers {
if pc.client != nil {
pc.client.Close()
}
}
}
// GetServicePort returns the service port from syncing port
// TODO: really need use a unique ID instead of ip/port
func GetServicePort(nodePort string) string {
if port, err := strconv.Atoi(nodePort); err == nil {
return fmt.Sprintf("%d", port+SyncingPortDifference)
}
Log.Warn("unable to get service port")
return ""
}
// AddNewBlock will add newly received block into state syncing queue
func (ss *StateSync) AddNewBlock(peerHash []byte, block *types.Block) {
for i, pc := range ss.syncConfig.peers {
pid := utils.GetUniqueIDFromIPPort(pc.ip, GetServicePort(pc.port))
ph := make([]byte, 4)
binary.BigEndian.PutUint32(ph, pid)
if bytes.Compare(ph, peerHash) != 0 {
continue
}
pc.mux.Lock()
pc.newBlocks = append(pc.newBlocks, block)
pc.mux.Unlock()
Log.Debug("[SYNC] new block received", "total", len(ss.syncConfig.peers[i].newBlocks), "blockHeight", block.NumberU64())
}
}
// CreateTestSyncPeerConfig used for testing.
@ -87,46 +149,23 @@ func CompareSyncPeerConfigByblockHashes(a *SyncPeerConfig, b *SyncPeerConfig) in
return 0
}
// GetBlockHashes gets block hashes by calling grpc request to the corresponding peer.
func (peerConfig *SyncPeerConfig) GetBlockHashes() error {
if peerConfig.client == nil {
return ErrSyncPeerConfigClientNotReady
}
response := peerConfig.client.GetBlockHashes()
peerConfig.blockHashes = make([][]byte, len(response.Payload))
for i := range response.Payload {
peerConfig.blockHashes[i] = make([]byte, len(response.Payload[i]))
copy(peerConfig.blockHashes[i], response.Payload[i])
}
return nil
}
// GetBlocks gets blocks by calling grpc request to the corresponding peer.
func (peerConfig *SyncPeerConfig) GetBlocks(hashes [][]byte) ([][]byte, error) {
if peerConfig.client == nil {
return nil, ErrSyncPeerConfigClientNotReady
}
response := peerConfig.client.GetBlocks(hashes)
if response == nil {
return nil, ErrGetBlock
}
return response.Payload, nil
}
// ProcessStateSyncFromPeers used to do state sync.
func (ss *StateSync) ProcessStateSyncFromPeers(peers []p2p.Peer, bc *core.BlockChain) (chan struct{}, error) {
// TODO: Validate peers.
done := make(chan struct{})
go func() {
ss.StartStateSync(peers, bc)
done <- struct{}{}
}()
return done, nil
}
// CreateSyncConfig creates SyncConfig for StateSync object.
func (ss *StateSync) CreateSyncConfig(peers []p2p.Peer) {
Log.Debug("CreateSyncConfig: len of peers", "len", len(peers))
Log.Debug("CreateSyncConfig: len of peers", "peers", peers)
ss.peerNumber = len(peers)
Log.Debug("CreateSyncConfig: hello")
ss.syncConfig = &SyncConfig{
peers: make([]*SyncPeerConfig, ss.peerNumber),
}
@ -135,9 +174,9 @@ func (ss *StateSync) CreateSyncConfig(peers []p2p.Peer) {
ip: peers[id].IP,
port: peers[id].Port,
}
Log.Debug("CreateSyncConfig: peer port to connect", "port", peers[id].Port)
Log.Debug("[SYNC] CreateSyncConfig: peer port to connect", "port", peers[id].Port)
}
Log.Info("syncing: Finished creating SyncConfig.")
Log.Info("[SYNC] syncing: Finished creating SyncConfig.")
}
// MakeConnectionToPeers makes grpc connection to all peers.
@ -229,19 +268,21 @@ func (ss *StateSync) GetBlockHashesConsensusAndCleanUp() bool {
}
// GetConsensusHashes gets all hashes needed to download.
func (ss *StateSync) GetConsensusHashes() bool {
func (ss *StateSync) GetConsensusHashes(startHash []byte) bool {
count := 0
for {
var wg sync.WaitGroup
wg.Add(ss.activePeerNumber)
for id := range ss.syncConfig.peers {
if ss.syncConfig.peers[id].client == nil {
continue
}
wg.Add(1)
go func(peerConfig *SyncPeerConfig) {
defer wg.Done()
response := peerConfig.client.GetBlockHashes()
response := peerConfig.client.GetBlockHashes(startHash)
if response == nil {
return
}
peerConfig.blockHashes = response.Payload
}(ss.syncConfig.peers[id])
}
@ -260,25 +301,17 @@ func (ss *StateSync) GetConsensusHashes() bool {
return true
}
// getConsensusHashes gets all hashes needed to download.
func (ss *StateSync) generateStateSyncTaskQueue(bc *core.BlockChain) {
ss.stateSyncTaskQueue = queue.New(0)
for _, configPeer := range ss.syncConfig.peers {
if configPeer.client != nil {
ss.blockHeight = len(configPeer.blockHashes)
// TODO (minh) rework the syncing for account model.
//bc.Blocks = append(bc.Blocks, make([]*blockchain.Block, ss.blockHeight-len(bc.Blocks))...)
//for id, blockHash := range configPeer.blockHashes {
// if bc.Blocks[id] == nil || !reflect.DeepEqual(bc.Blocks[id].Hash[:], blockHash) {
// ss.stateSyncTaskQueue.Put(SyncBlockTask{index: id, blockHash: blockHash})
// // TODO(minhdoan): Check error
// }
//}
for id, blockHash := range configPeer.blockHashes {
ss.stateSyncTaskQueue.Put(SyncBlockTask{index: id, blockHash: blockHash})
}
break
}
}
Log.Info("syncing: Finished generateStateSyncTaskQueue.")
Log.Info("syncing: Finished generateStateSyncTaskQueue", "length", ss.stateSyncTaskQueue.Len())
}
// downloadBlocks downloads blocks from state sync task queue.
@ -286,6 +319,7 @@ func (ss *StateSync) downloadBlocks(bc *core.BlockChain) {
// Initialize blockchain
var wg sync.WaitGroup
wg.Add(ss.activePeerNumber)
count := 0
for i := range ss.syncConfig.peers {
if ss.syncConfig.peers[i].client == nil {
continue
@ -293,45 +327,241 @@ func (ss *StateSync) downloadBlocks(bc *core.BlockChain) {
go func(peerConfig *SyncPeerConfig, stateSyncTaskQueue *queue.Queue, bc *core.BlockChain) {
defer wg.Done()
for !stateSyncTaskQueue.Empty() {
task, err := stateSyncTaskQueue.Poll(1, time.Millisecond)
task, err := ss.stateSyncTaskQueue.Poll(1, time.Millisecond)
if err == queue.ErrTimeout {
Log.Debug("[SYNC] ss.stateSyncTaskQueue poll timeout", "error", err)
break
}
syncTask := task[0].(SyncBlockTask)
for {
//id := syncTask.index
_, err := peerConfig.GetBlocks([][]byte{syncTask.blockHash})
if err == nil {
// As of now, only send and ask for one block.
// TODO (minh) rework the syncing for account model.
//bc.Blocks[id], err = blockchain.DeserializeBlock(payload[0])
//_, err = blockchain.DeserializeBlock(payload[0])
if err == nil {
break
}
//id := syncTask.index
payload, err := peerConfig.GetBlocks([][]byte{syncTask.blockHash})
if err != nil {
count++
Log.Debug("[SYNC] GetBlocks failed", "failNumber", count)
if count > TimesToFail {
break
}
ss.stateSyncTaskQueue.Put(syncTask)
continue
}
var blockObj types.Block
// currently only send one block a time
err = rlp.DecodeBytes(payload[0], &blockObj)
if err != nil {
count++
Log.Debug("[SYNC] downloadBlocks: failed to DecodeBytes from received new block")
if count > TimesToFail {
break
}
ss.stateSyncTaskQueue.Put(syncTask)
continue
}
ss.syncMux.Lock()
ss.commonBlocks[syncTask.index] = &blockObj
ss.syncMux.Unlock()
}
}(ss.syncConfig.peers[i], ss.stateSyncTaskQueue, bc)
}
wg.Wait()
Log.Info("syncing: Finished downloadBlocks.")
Log.Info("[SYNC] Finished downloadBlocks.")
}
// CompareBlockByHash compares two block by hash, it will be used in sort the blocks
func CompareBlockByHash(a *types.Block, b *types.Block) int {
ha := a.Hash()
hb := b.Hash()
return bytes.Compare(ha[:], hb[:])
}
// GetHowManyMaxConsensus will get the most common blocks and the first such blockID
func GetHowManyMaxConsensus(blocks []*types.Block) (int, int) {
// As all peers are sorted by their blockHashes, all equal blockHashes should come together and consecutively.
curCount := 0
curFirstID := -1
maxCount := 0
maxFirstID := -1
for i := range blocks {
if curFirstID == -1 || CompareBlockByHash(blocks[curFirstID], blocks[i]) != 0 {
curCount = 1
curFirstID = i
} else {
curCount++
}
if curCount > maxCount {
maxCount = curCount
maxFirstID = curFirstID
}
}
return maxFirstID, maxCount
}
func (ss *StateSync) getMaxConsensusBlockFromParentHash(parentHash common.Hash) *types.Block {
candidateBlocks := []*types.Block{}
ss.syncMux.Lock()
for id := range ss.syncConfig.peers {
peerConfig := ss.syncConfig.peers[id]
for _, block := range peerConfig.newBlocks {
ph := block.ParentHash()
if bytes.Compare(ph[:], parentHash[:]) == 0 {
candidateBlocks = append(candidateBlocks, block)
break
}
}
}
ss.syncMux.Unlock()
if len(candidateBlocks) == 0 {
return nil
}
// Sort by blockHashes.
sort.Slice(candidateBlocks, func(i, j int) bool {
return CompareBlockByHash(candidateBlocks[i], candidateBlocks[j]) == -1
})
maxFirstID, maxCount := GetHowManyMaxConsensus(candidateBlocks)
Log.Debug("[SYNC] Find block with matching parenthash", "parentHash", parentHash, "hash", candidateBlocks[maxFirstID].Hash(), "maxCount", maxCount)
return candidateBlocks[maxFirstID]
}
func (ss *StateSync) getBlockFromOldBlocksByParentHash(parentHash common.Hash) *types.Block {
for _, block := range ss.commonBlocks {
ph := block.ParentHash()
if bytes.Compare(ph[:], parentHash[:]) == 0 {
return block
}
}
return nil
}
func (ss *StateSync) getBlockFromLastMileBlocksByParentHash(parentHash common.Hash) *types.Block {
for _, block := range ss.lastMileBlocks {
ph := block.ParentHash()
if bytes.Compare(ph[:], parentHash[:]) == 0 {
return block
}
}
return nil
}
func (ss *StateSync) updateBlockAndStatus(block *types.Block, bc *core.BlockChain, worker *worker.Worker) bool {
Log.Info("[SYNC] Current Block", "blockHex", bc.CurrentBlock().Hash().Hex())
_, err := bc.InsertChain([]*types.Block{block})
if err != nil {
Log.Debug("Error adding new block to blockchain", "Error", err)
return false
}
Log.Info("[SYNC] new block added to blockchain", "blockHeight", bc.CurrentBlock().NumberU64(), "blockHex", bc.CurrentBlock().Hash().Hex(), "parentHex", bc.CurrentBlock().ParentHash().Hex())
ss.syncMux.Lock()
worker.UpdateCurrent()
ss.syncMux.Unlock()
return true
}
// generateNewState will construct most recent state from downloaded blocks
func (ss *StateSync) generateNewState(bc *core.BlockChain, worker *worker.Worker) {
// update blocks created before node start sync
parentHash := bc.CurrentBlock().Hash()
for {
block := ss.getBlockFromOldBlocksByParentHash(parentHash)
if block == nil {
break
}
ok := ss.updateBlockAndStatus(block, bc, worker)
if !ok {
break
}
parentHash = block.Hash()
}
ss.syncMux.Lock()
ss.commonBlocks = make(map[int]*types.Block)
ss.syncMux.Unlock()
// update blocks after node start sync
parentHash = bc.CurrentBlock().Hash()
for {
block := ss.getMaxConsensusBlockFromParentHash(parentHash)
if block == nil {
break
}
ok := ss.updateBlockAndStatus(block, bc, worker)
if !ok {
break
}
parentHash = block.Hash()
}
ss.syncMux.Lock()
for id := range ss.syncConfig.peers {
ss.syncConfig.peers[id].newBlocks = []*types.Block{}
}
ss.syncMux.Unlock()
// update last mile blocks if any
parentHash = bc.CurrentBlock().Hash()
for {
block := ss.getBlockFromLastMileBlocksByParentHash(parentHash)
if block == nil {
break
}
ok := ss.updateBlockAndStatus(block, bc, worker)
if !ok {
break
}
parentHash = block.Hash()
}
}
// StartStateSync starts state sync.
func (ss *StateSync) StartStateSync(peers []p2p.Peer, bc *core.BlockChain) bool {
// Creates sync config.
ss.CreateSyncConfig(peers)
// Makes connections to peers.
ss.MakeConnectionToPeers()
func (ss *StateSync) StartStateSync(startHash []byte, bc *core.BlockChain, worker *worker.Worker) {
ss.RegisterNodeInfo()
// Gets consensus hashes.
if !ss.GetConsensusHashes() {
return false
if !ss.GetConsensusHashes(startHash) {
Log.Debug("[SYNC] StartStateSync unable to reach consensus on ss.GetConsensusHashes")
return
}
Log.Debug("[SYNC] StartStateSync reach consensus on ss.GetConsensusHashes")
ss.generateStateSyncTaskQueue(bc)
// Download blocks.
if ss.stateSyncTaskQueue.Len() > 0 {
ss.downloadBlocks(bc)
}
return true
ss.generateNewState(bc, worker)
}
func (peerConfig *SyncPeerConfig) registerToBroadcast(peerHash []byte) error {
response := peerConfig.client.Register(peerHash)
if response == nil || response.Type == pb.DownloaderResponse_FAIL {
return ErrRegistrationFail
} else if response.Type == pb.DownloaderResponse_SUCCESS {
return nil
}
return ErrRegistrationFail
}
// RegisterNodeInfo will register node to peers to accept future new block broadcasting
// return number of successfull registration
func (ss *StateSync) RegisterNodeInfo() int {
ss.CleanUpNilPeers()
registrationNumber := RegistrationNumber
Log.Debug("[SYNC] node registration to peers", "registrationNumber", registrationNumber, "activePeerNumber", ss.activePeerNumber)
peerID := utils.GetUniqueIDFromIPPort(ss.selfip, ss.selfport)
peerHash := make([]byte, 4)
binary.BigEndian.PutUint32(peerHash[:], peerID)
count := 0
for id := range ss.syncConfig.peers {
peerConfig := ss.syncConfig.peers[id]
if count >= registrationNumber {
break
}
if peerConfig.client == nil {
continue
}
err := peerConfig.registerToBroadcast(peerHash)
if err != nil {
Log.Debug("[SYNC] register failed to peer", "ip", peerConfig.ip, "port", peerConfig.port, "peerHash", peerHash)
continue
}
Log.Debug("[SYNC] register success", "ip", peerConfig.ip, "port", peerConfig.port)
count++
}
return count
}

@ -0,0 +1,15 @@
### Full state syncing
A node downloads all the missing blocks until it catches up with the block that is in the process of consensus.
### Node states
The states of a node have the following options:
NodeInit, NodeWaitToJoin, NodeNotInSync, NodeOffline, NodeReadyForConsensus, NodeDoingConsensus
When any node joins the network, it will join the shard and try to participate in the consensus process. It will assume its status is NodeReadyForConsensus until it finds it is not able to verify the new block. Then it will move its status into NodeNotInSync. After finish the syncing process, its status becomes NodeReadyForConsensus again. Simply speaking, most of the time, its status is jumping between these two states.
### Doing syncing
Syncing process consists of 3 parts: download the old blocks that have timestamps before state syncing beginning time; register to a few peers (full node) and accept new blocks that have timestampes after state syncing beginning time; catch the last mile blocks from consensus process when its latest block is only 1~2 blocks behind the current consensus block.

@ -2,7 +2,7 @@ version: 0.0
os: linux
files:
- source: /
destination: /home/ec2-user/projects/src/harmony-benchmark
destination: /home/ec2-user/projects/src/harmony
hooks:
BeforeInstall:
- location: aws-scripts/say_hello.sh
@ -19,4 +19,4 @@ hooks:
ApplicationStop:
- location: aws-scripts/say_bye.sh
timeout: 10
runas: root
runas: root

@ -6,8 +6,9 @@ import (
"os"
"path"
"github.com/ethereum/go-ethereum/log"
beaconchain "github.com/harmony-one/harmony/internal/beaconchain/libs"
"github.com/harmony-one/harmony/log"
"github.com/harmony-one/harmony/internal/utils"
)
var (
@ -27,18 +28,32 @@ func main() {
ip := flag.String("ip", "127.0.0.1", "ip on which beaconchain listens")
port := flag.String("port", "8081", "port on which beaconchain listens")
versionFlag := flag.Bool("version", false, "Output version info")
resetFlag := flag.String("path", "bc_config.json", "path to file")
flag.Parse()
if *versionFlag {
printVersion(os.Args[0])
}
h := log.StdoutHandler
h := log.StreamHandler(os.Stdout, log.TerminalFormat(false))
log.Root().SetHandler(h)
var bc *beaconchain.BeaconChain
if _, err := os.Stat(*resetFlag); err == nil {
bc, err = beaconchain.LoadBeaconChainInfo(*resetFlag)
if err != nil {
fmt.Fprintf(os.Stderr, "Could not reset beaconchain from file: %+v\n", err)
}
} else {
fmt.Printf("Starting new beaconchain\n")
beaconchain.SetSaveFile(*resetFlag)
priKey, _, _ := utils.GenKeyP2P(*ip, *port)
bc = beaconchain.New(*numShards, *ip, *port, priKey)
}
bc := beaconchain.New(*numShards, *ip, *port)
go bc.SupportRPC()
fmt.Printf("Beacon Chain Started: /ip4/%s/tcp/%v/ipfs/%s\n", *ip, *port, bc.GetID().Pretty())
go bc.SupportRPC()
bc.StartServer()
}

@ -0,0 +1,90 @@
// bootnode provides peer discovery service to new node to connect to the p2p network
package main
import (
"context"
"flag"
"fmt"
"os"
"path"
"github.com/ethereum/go-ethereum/log"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/p2p/p2pimpl"
ds "github.com/ipfs/go-datastore"
dsync "github.com/ipfs/go-datastore/sync"
kaddht "github.com/libp2p/go-libp2p-kad-dht"
)
var (
version string
builtBy string
builtAt string
commit string
)
func printVersion(me string) {
fmt.Fprintf(os.Stderr, "Harmony (C) 2019. %v, version %v-%v (%v %v)\n", path.Base(me), version, commit, builtBy, builtAt)
os.Exit(0)
}
func loggingInit(logFolder, ip, port string) {
// Setup a logger to stdout and log file.
if err := os.MkdirAll(logFolder, 0755); err != nil {
panic(err)
}
logFileName := fmt.Sprintf("./%v/bootnode-%v-%v.log", logFolder, ip, port)
h := log.MultiHandler(
log.StreamHandler(os.Stdout, log.TerminalFormat(false)),
log.Must.FileHandler(logFileName, log.JSONFormat()), // Log to file
)
log.Root().SetHandler(h)
}
func main() {
ip := flag.String("ip", "127.0.0.1", "IP of the node")
port := flag.String("port", "9876", "port of the node.")
logFolder := flag.String("log_folder", "latest", "the folder collecting the logs of this execution")
keyFile := flag.String("key", "./.bnkey", "the private key file of the bootnode")
versionFlag := flag.Bool("version", false, "Output version info")
flag.Parse()
if *versionFlag {
printVersion(os.Args[0])
}
// Logging setup
utils.SetPortAndIP(*port, *ip)
// Init logging.
loggingInit(*logFolder, *ip, *port)
privKey, err := utils.LoadKeyFromFile(*keyFile)
if err != nil {
panic(err)
}
var selfPeer = p2p.Peer{IP: *ip, Port: *port}
host, err := p2pimpl.NewHost(&selfPeer, privKey)
if err != nil {
panic(err)
}
log.Info("bootnode", "BN_MA", fmt.Sprintf("/ip4/%s/tcp/%s/p2p/%s", *ip, *port, host.GetID().Pretty()))
dataStore := dsync.MutexWrap(ds.NewMapDatastore())
dht := kaddht.NewDHT(context.Background(), host.GetP2PHost(), dataStore)
if err := dht.Bootstrap(context.Background()); err != nil {
log.Error("failed to bootstrap DHT")
panic(err)
}
select {}
}

@ -9,6 +9,7 @@ import (
"sync"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/harmony-one/harmony/api/client"
proto_node "github.com/harmony-one/harmony/api/proto/node"
"github.com/harmony-one/harmony/cmd/client/txgen/txgen"
@ -16,10 +17,11 @@ import (
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/newnode"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/log"
"github.com/harmony-one/harmony/node"
"github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/p2p/p2pimpl"
peerstore "github.com/libp2p/go-libp2p-peerstore"
multiaddr "github.com/multiformats/go-multiaddr"
)
var (
@ -50,6 +52,8 @@ func main() {
bcIP := flag.String("bc", "127.0.0.1", "IP of the identity chain")
bcPort := flag.String("bc_port", "8081", "port of the identity chain")
bcAddr := flag.String("bc_addr", "", "MultiAddr of the identity chain")
flag.Parse()
if *versionFlag {
@ -59,21 +63,36 @@ func main() {
// Add GOMAXPROCS to achieve max performance.
runtime.GOMAXPROCS(1024)
var clientPeer *p2p.Peer
var bcPeer *p2p.Peer
var shardIDLeaderMap map[uint32]p2p.Peer
priKey, _, err := utils.GenKeyP2P(*ip, *port)
candidateNode := newnode.New(*ip, *port)
BCPeer := p2p.Peer{IP: *bcIP, Port: *bcPort}
candidateNode.ContactBeaconChain(BCPeer)
clientPeer = &p2p.Peer{IP: *ip, Port: *port}
_, pubKey := utils.GenKey(clientPeer.IP, clientPeer.Port)
clientPeer.PubKey = pubKey
if *bcAddr != "" {
// Turn the destination into a multiaddr.
maddr, err := multiaddr.NewMultiaddr(*bcAddr)
if err != nil {
panic(err)
}
shardIDLeaderMap = candidateNode.Leaders
// Extract the peer ID from the multiaddr.
info, err := peerstore.InfoFromP2pAddr(maddr)
if err != nil {
panic(err)
}
if clientPeer == nil {
panic("Client Peer is nil!")
bcPeer = &p2p.Peer{IP: *bcIP, Port: *bcPort, Addrs: info.Addrs, PeerID: info.ID}
} else {
bcPeer = &p2p.Peer{IP: *bcIP, Port: *bcPort}
}
candidateNode := newnode.New(*ip, *port, priKey)
candidateNode.AddPeer(bcPeer)
candidateNode.ContactBeaconChain(*bcPeer)
selfPeer := candidateNode.GetSelfPeer()
selfPeer.PubKey = candidateNode.PubK
shardIDLeaderMap = candidateNode.Leaders
debugPrintShardIDLeaderMap(shardIDLeaderMap)
// Do cross shard tx if there are more than one shard
@ -88,24 +107,24 @@ func main() {
// Setup a logger to stdout and log file.
logFileName := fmt.Sprintf("./%v/txgen.log", *logFolder)
h := log.MultiHandler(
log.StdoutHandler,
log.StreamHandler(os.Stdout, log.TerminalFormat(false)),
log.Must.FileHandler(logFileName, log.LogfmtFormat()), // Log to file
)
log.Root().SetHandler(h)
// Nodes containing blockchain data to mirror the shards' data in the network
nodes := []*node.Node{}
host, err := p2pimpl.NewHost(&selfPeer, priKey)
if err != nil {
panic("unable to new host in txgen")
}
for shardID := range shardIDLeaderMap {
_, pubKey := utils.GenKey(clientPeer.IP, clientPeer.Port)
clientPeer.PubKey = pubKey
host := p2pimpl.NewHost(*clientPeer)
node := node.New(host, &consensus.Consensus{ShardID: shardID}, nil)
// Assign many fake addresses so we have enough address to play with at first
nodes = append(nodes, node)
}
// Client/txgenerator server node setup
host := p2pimpl.NewHost(*clientPeer)
consensusObj := consensus.New(host, "0", nil, p2p.Peer{})
clientNode := node.New(host, consensusObj, nil)
clientNode.Client = client.NewClient(clientNode.GetHost(), &shardIDLeaderMap)
@ -145,15 +164,17 @@ func main() {
for _, leader := range shardIDLeaderMap {
log.Debug("Client Join Shard", "leader", leader)
clientNode.GetHost().AddPeer(&leader)
go clientNode.JoinShard(leader)
// wait for 3 seconds for client to send ping message to leader
time.Sleep(3 * time.Second)
clientNode.StopPing <- struct{}{}
clientNode.State = node.NodeJoinedShard
clientNode.State = node.NodeReadyForConsensus
}
// wait for 1 seconds for client to send ping message to leader
time.Sleep(time.Second)
clientNode.StopPing <- struct{}{}
clientNode.State = node.NodeReadyForConsensus
// Transaction generation process
time.Sleep(5 * time.Second) // wait for nodes to be ready
time.Sleep(2 * time.Second) // wait for nodes to be ready
start := time.Now()
totalTime := float64(*duration)
@ -185,6 +206,8 @@ func main() {
}(shardID, txs)
}
lock.Unlock()
case <-time.After(2 * time.Second):
log.Warn("No new block is received so far")
}
}

@ -16,16 +16,19 @@ import (
"github.com/ethereum/go-ethereum/common"
crypto2 "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/api/client"
clientService "github.com/harmony-one/harmony/api/client/service"
proto_node "github.com/harmony-one/harmony/api/proto/node"
"github.com/harmony-one/harmony/core/types"
libs "github.com/harmony-one/harmony/internal/beaconchain/libs"
"github.com/harmony-one/harmony/internal/beaconchain/rpc"
beaconchain "github.com/harmony-one/harmony/internal/beaconchain/rpc"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/node"
"github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/p2p/p2pimpl"
peer "github.com/libp2p/go-libp2p-peer"
)
var (
@ -49,6 +52,9 @@ type AccountState struct {
// The main wallet program entrance. Note the this wallet program is for demo-purpose only. It does not implement
// the secure storage of keys.
func main() {
h := log.StreamHandler(os.Stdout, log.TerminalFormat(false))
log.Root().SetHandler(h)
// Account subcommands
accountImportCommand := flag.NewFlagSet("import", flag.ExitOnError)
accountImportPtr := accountImportCommand.String("privateKey", "", "Specify the private key to import")
@ -289,11 +295,24 @@ func CreateWalletNode() *node.Node {
bcClient := beaconchain.NewClient("54.183.5.66", strconv.Itoa(port+libs.BeaconchainServicePortDiff))
response := bcClient.GetLeaders()
// dummy host for wallet
self := p2p.Peer{IP: "127.0.0.1", Port: "6789"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "6789")
host, err := p2pimpl.NewHost(&self, priKey)
if err != nil {
panic(err)
}
for _, leader := range response.Leaders {
shardIDLeaderMap[leader.ShardId] = p2p.Peer{IP: leader.Ip, Port: leader.Port}
peerID, err := peer.IDB58Decode(leader.PeerID)
if err != nil {
panic(err)
}
leaderPeer := p2p.Peer{IP: leader.Ip, Port: leader.Port, PeerID: peerID}
shardIDLeaderMap[leader.ShardId] = leaderPeer
host.AddPeer(&leaderPeer)
}
host := p2pimpl.NewHost(p2p.Peer{})
walletNode := node.New(host, nil, nil)
walletNode.Client = client.NewClient(walletNode.GetHost(), &shardIDLeaderMap)
return walletNode

@ -1,14 +1,17 @@
package main
import (
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/harmony-one/harmony/api/client"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/node"
"github.com/harmony-one/harmony/p2p"
mock_host "github.com/harmony-one/harmony/p2p/host/mock"
"testing"
"time"
peer "github.com/libp2p/go-libp2p-peer"
)
func TestCreateWalletNode(test *testing.T) {
@ -29,7 +32,9 @@ func TestSubmitTransaction(test *testing.T) {
m.EXPECT().SendMessage(gomock.Any(), gomock.Any()).Times(1)
walletNode := node.New(m, nil, nil)
walletNode.Client = client.NewClient(walletNode.GetHost(), &map[uint32]p2p.Peer{0: p2p.Peer{IP: "1", Port: "2"}})
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9990")
peerID, _ := peer.IDFromPrivateKey(priKey)
walletNode.Client = client.NewClient(walletNode.GetHost(), &map[uint32]p2p.Peer{0: p2p.Peer{IP: "127.0.0.1", Port: "9990", PeerID: peerID}})
SubmitTransaction(&types.Transaction{}, walletNode, 0)

@ -9,15 +9,18 @@ import (
"runtime"
"time"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/internal/attack"
"github.com/harmony-one/harmony/internal/db"
pkg_newnode "github.com/harmony-one/harmony/internal/newnode"
"github.com/harmony-one/harmony/internal/profiler"
"github.com/harmony-one/harmony/log"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/node"
"github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/p2p/p2pimpl"
peerstore "github.com/libp2p/go-libp2p-peerstore"
multiaddr "github.com/multiformats/go-multiaddr"
)
var (
@ -27,7 +30,7 @@ var (
commit string
)
// Constants used by the benchmark.
// Constants used by the harmony.
const (
AttackProbability = 20
)
@ -45,14 +48,15 @@ func attackDetermination(attackedMode int) bool {
}
// InitLDBDatabase initializes a LDBDatabase.
func InitLDBDatabase(ip string, port string) (*db.LDBDatabase, error) {
// TODO(minhdoan): Refactor this.
dbFileName := "/tmp/harmony_" + ip + port + ".dat"
var err = os.RemoveAll(dbFileName)
if err != nil {
fmt.Println(err.Error())
func InitLDBDatabase(ip string, port string, freshDB bool) (*ethdb.LDBDatabase, error) {
dbFileName := fmt.Sprintf("./db/harmony_%s_%s", ip, port)
if freshDB {
var err = os.RemoveAll(dbFileName)
if err != nil {
fmt.Println(err.Error())
}
}
return db.NewLDBDatabase(dbFileName, 0, 0)
return ethdb.NewLDBDatabase(dbFileName, 0, 0)
}
func printVersion(me string) {
@ -64,7 +68,7 @@ func loggingInit(logFolder, role, ip, port string, onlyLogTps bool) {
// Setup a logger to stdout and log file.
logFileName := fmt.Sprintf("./%v/%s-%v-%v.log", logFolder, role, ip, port)
h := log.MultiHandler(
log.StdoutHandler,
log.StreamHandler(os.Stdout, log.TerminalFormat(false)),
log.Must.FileHandler(logFileName, log.JSONFormat()), // Log to file
)
if onlyLogTps {
@ -79,7 +83,8 @@ func main() {
port := flag.String("port", "9000", "port of the node.")
logFolder := flag.String("log_folder", "latest", "the folder collecting the logs of this execution")
attackedMode := flag.Int("attacked_mode", 0, "0 means not attacked, 1 means attacked, 2 means being open to be selected as attacked")
dbSupported := flag.Bool("db_supported", false, "false means not db_supported, true means db_supported")
dbSupported := flag.Bool("db_supported", true, "false means not db_supported, true means db_supported")
freshDB := flag.Bool("fresh_db", false, "true means the existing disk based db will be removed")
profile := flag.Bool("profile", false, "Turn on profiling (CPU, Memory).")
metricsReportURL := flag.String("metrics_report_url", "", "If set, reports metrics to this URL.")
versionFlag := flag.Bool("version", false, "Output version info")
@ -88,6 +93,7 @@ func main() {
//This IP belongs to jenkins.harmony.one
bcIP := flag.String("bc", "127.0.0.1", "IP of the identity chain")
bcPort := flag.String("bc_port", "8081", "port of the identity chain")
bcAddr := flag.String("bc_addr", "", "MultiAddr of the identity chain")
//Leader needs to have a minimal number of peers to start consensus
minPeers := flag.Int("min_peers", 100, "Minimal number of Peers in shard")
@ -98,6 +104,9 @@ func main() {
printVersion(os.Args[0])
}
// Logging setup
utils.SetPortAndIP(*port, *ip)
// Add GOMAXPROCS to achieve max performance.
runtime.GOMAXPROCS(1024)
@ -109,18 +118,38 @@ func main() {
var leader p2p.Peer
var selfPeer p2p.Peer
var clientPeer *p2p.Peer
var BCPeer *p2p.Peer
priKey, _, err := utils.GenKeyP2P(*ip, *port)
if *bcAddr != "" {
// Turn the destination into a multiaddr.
maddr, err := multiaddr.NewMultiaddr(*bcAddr)
if err != nil {
panic(err)
}
// Extract the peer ID from the multiaddr.
info, err := peerstore.InfoFromP2pAddr(maddr)
if err != nil {
panic(err)
}
BCPeer = &p2p.Peer{IP: *bcIP, Port: *bcPort, Addrs: info.Addrs, PeerID: info.ID}
} else {
BCPeer = &p2p.Peer{IP: *bcIP, Port: *bcPort}
}
//Use Peer Discovery to get shard/leader/peer/...
candidateNode := pkg_newnode.New(*ip, *port)
BCPeer := p2p.Peer{IP: *bcIP, Port: *bcPort}
candidateNode.ContactBeaconChain(BCPeer)
candidateNode := pkg_newnode.New(*ip, *port, priKey)
candidateNode.AddPeer(BCPeer)
candidateNode.ContactBeaconChain(*BCPeer)
shardID = candidateNode.GetShardID()
leader = candidateNode.GetLeader()
selfPeer = candidateNode.GetSelfPeer()
clientPeer = candidateNode.GetClientPeer()
selfPeer.PubKey = candidateNode.PubK
// fmt.Println(peers, leader, selfPeer, clientPeer, *logFolder, *minPeers) //TODO: to be replaced by a logger later: ak, rl
var role string
if leader.IP == *ip && leader.Port == *port {
role = "leader"
@ -136,13 +165,18 @@ func main() {
loggingInit(*logFolder, role, *ip, *port, *onlyLogTps)
// Initialize leveldb if dbSupported.
var ldb *db.LDBDatabase
var ldb *ethdb.LDBDatabase
if *dbSupported {
ldb, _ = InitLDBDatabase(*ip, *port)
ldb, _ = InitLDBDatabase(*ip, *port, *freshDB)
}
host, err := p2pimpl.NewHost(&selfPeer, priKey)
if err != nil {
panic("unable to new host in harmony")
}
host := p2pimpl.NewHost(selfPeer)
host.AddPeer(&leader)
// Consensus object.
consensus := consensus.New(host, shardID, peers, leader)
consensus.MinPeers = *minPeers
@ -150,17 +184,20 @@ func main() {
// Start Profiler for leader if profile argument is on
if role == "leader" && (*profile || *metricsReportURL != "") {
prof := profiler.GetProfiler()
prof.Config(consensus.Log, shardID, *metricsReportURL)
prof.Config(shardID, *metricsReportURL)
if *profile {
prof.Start()
}
}
// Set logger to attack model.
attack.GetInstance().SetLogger(consensus.Log)
// Current node.
currentNode := node.New(host, consensus, ldb)
currentNode.Consensus.OfflinePeers = currentNode.OfflinePeers
if role == "leader" {
currentNode.Role = node.ShardLeader
} else {
currentNode.Role = node.ShardValidator
}
// If there is a client configured in the node list.
if clientPeer != nil {
@ -175,10 +212,6 @@ func main() {
if consensus.IsLeader {
currentNode.State = node.NodeLeader
// Let consensus run
go func() {
consensus.WaitForNewBlock(currentNode.BlockChannel)
}()
// Node waiting for consensus readiness to create new block
go func() {
currentNode.WaitForConsensusReady(consensus.ReadySignal)
@ -190,7 +223,7 @@ func main() {
go currentNode.SupportSyncing()
if consensus.IsLeader {
go currentNode.SupportClient()
go currentNode.SupportExplorer()
}
currentNode.AddAndRunServices()
currentNode.StartServer()
}

@ -2,27 +2,31 @@
package consensus // consensus
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"reflect"
"strconv"
"sync"
"github.com/dedis/kyber"
"github.com/dedis/kyber/sign/schnorr"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
protobuf "github.com/golang/protobuf/proto"
"github.com/harmony-one/bls/ffi/go/bls"
consensus_proto "github.com/harmony-one/harmony/api/consensus"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/crypto"
"github.com/harmony-one/harmony/crypto/pki"
bls_cosi "github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/log"
"github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/p2p/host"
"golang.org/x/crypto/sha3"
proto_node "github.com/harmony-one/harmony/api/proto/node"
proto_discovery "github.com/harmony-one/harmony/api/proto/discovery"
)
// Consensus is the main struct with all states and data related to consensus process.
@ -31,20 +35,12 @@ type Consensus struct {
state State
// Commits collected from validators.
commitments *map[uint32]kyber.Point
finalCommitments *map[uint32]kyber.Point
aggregatedCommitment kyber.Point
aggregatedFinalCommitment kyber.Point
bitmap *crypto.Mask
finalBitmap *crypto.Mask
// Challenges for the validators
challenge [32]byte
finalChallenge [32]byte
// Responses collected from validators
responses *map[uint32]kyber.Scalar
finalResponses *map[uint32]kyber.Scalar
prepareSigs *map[uint32]*bls.Sign
commitSigs *map[uint32]*bls.Sign
aggregatedPrepareSig *bls.Sign
aggregatedCommitSig *bls.Sign
prepareBitmap *bls_cosi.Mask
commitBitmap *bls_cosi.Mask
// map of nodeID to validator Peer object
// FIXME: should use PubKey of p2p.Peer as the hashkey
@ -58,12 +54,12 @@ type Consensus struct {
leader p2p.Peer
// Public keys of the committee including leader and validators
PublicKeys []kyber.Point
PublicKeys []*bls.PublicKey
pubKeyLock sync.Mutex
// private/public keys of current node
priKey kyber.Scalar
pubKey kyber.Point
priKey *bls.SecretKey
pubKey *bls.PublicKey
// Whether I am leader. False means I am validator
IsLeader bool
@ -86,8 +82,6 @@ type Consensus struct {
// Validator specific fields
// Blocks received but not done with consensus yet
blocksReceived map[uint32]*BlockConsensusStatus
// Commitment secret
secret map[uint32]kyber.Scalar
// Signal channel for starting a new consensus process
ReadySignal chan struct{}
@ -97,12 +91,15 @@ type Consensus struct {
// Called when consensus on a new block is done
OnConsensusDone func(*types.Block)
Log log.Logger
// current consensus block to check if out of sync
ConsensusBlock chan *types.Block
// verified block to state sync broadcast
VerifiedNewBlock chan *types.Block
uniqueIDInstance *utils.UniqueValidatorID
// The p2p host used to send/receive p2p messages
host host.Host
host p2p.Host
// Signal channel for lost validators
OfflinePeers chan p2p.Peer
@ -123,7 +120,7 @@ type BlockConsensusStatus struct {
}
// New creates a new Consensus object
func New(host host.Host, ShardID string, peers []p2p.Peer, leader p2p.Peer) *Consensus {
func New(host p2p.Host, ShardID string, peers []p2p.Peer, leader p2p.Peer) *Consensus {
consensus := Consensus{}
consensus.host = host
@ -134,43 +131,43 @@ func New(host host.Host, ShardID string, peers []p2p.Peer, leader p2p.Peer) *Con
consensus.IsLeader = false
}
consensus.commitments = &map[uint32]kyber.Point{}
consensus.finalCommitments = &map[uint32]kyber.Point{}
consensus.responses = &map[uint32]kyber.Scalar{}
consensus.finalResponses = &map[uint32]kyber.Scalar{}
consensus.leader = leader
for _, peer := range peers {
consensus.validators.Store(utils.GetUniqueIDFromPeer(peer), peer)
}
consensus.prepareSigs = &map[uint32]*bls.Sign{}
consensus.commitSigs = &map[uint32]*bls.Sign{}
// Initialize cosign bitmap
allPublicKeys := make([]kyber.Point, 0)
allPublicKeys := make([]*bls.PublicKey, 0)
for _, validatorPeer := range peers {
allPublicKeys = append(allPublicKeys, validatorPeer.PubKey)
}
allPublicKeys = append(allPublicKeys, leader.PubKey)
mask, err := crypto.NewMask(crypto.Ed25519Curve, allPublicKeys, consensus.leader.PubKey)
if err != nil {
panic("Failed to create mask")
}
finalMask, err := crypto.NewMask(crypto.Ed25519Curve, allPublicKeys, consensus.leader.PubKey)
if err != nil {
panic("Failed to create final mask")
}
consensus.PublicKeys = allPublicKeys
consensus.bitmap = mask
consensus.finalBitmap = finalMask
consensus.secret = map[uint32]kyber.Scalar{}
prepareBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.PubKey)
commitBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.PubKey)
consensus.prepareBitmap = prepareBitmap
consensus.commitBitmap = commitBitmap
consensus.aggregatedPrepareSig = nil
consensus.aggregatedCommitSig = nil
// For now use socket address as ID
// TODO: populate Id derived from address
consensus.nodeID = utils.GetUniqueIDFromPeer(selfPeer)
// Set private key for myself so that I can sign messages.
consensus.priKey = crypto.Ed25519Curve.Scalar().SetInt64(int64(consensus.nodeID))
consensus.pubKey = pki.GetPublicKeyFromScalar(consensus.priKey)
nodeIDBytes := make([]byte, 32)
binary.LittleEndian.PutUint32(nodeIDBytes, consensus.nodeID)
privateKey := bls.SecretKey{}
err := privateKey.SetLittleEndian(nodeIDBytes)
consensus.priKey = &privateKey
consensus.pubKey = privateKey.GetPublicKey()
consensus.consensusID = 0 // or view Id in the original pbft paper
myShardID, err := strconv.Atoi(ShardID)
@ -192,7 +189,6 @@ func New(host host.Host, ShardID string, peers []p2p.Peer, leader p2p.Peer) *Con
}()
}
consensus.Log = log.New()
consensus.uniqueIDInstance = utils.GetUniqueValidatorIDInstance()
consensus.OfflinePeerList = make([]p2p.Peer, 0)
@ -200,19 +196,92 @@ func New(host host.Host, ShardID string, peers []p2p.Peer, leader p2p.Peer) *Con
return &consensus
}
// Checks the basic meta of a consensus message.
func (consensus *Consensus) checkConsensusMessage(message consensus_proto.Message, publicKey *bls.PublicKey) bool {
consensusID := message.ConsensusId
blockHash := message.BlockHash
// Verify message signature
err := verifyMessageSig(publicKey, message)
if err != nil {
utils.GetLogInstance().Warn("Failed to verify the message signature", "Error", err)
return false
}
// check consensus Id
if consensusID != consensus.consensusID {
utils.GetLogInstance().Warn("Wrong consensus Id", "myConsensusId", consensus.consensusID, "theirConsensusId", consensusID, "consensus", consensus)
return false
}
if !bytes.Equal(blockHash, consensus.blockHash[:]) {
utils.GetLogInstance().Warn("Wrong blockHash", "consensus", consensus)
return false
}
return true
}
// Gets the validator peer based on validator ID.
func (consensus *Consensus) getValidatorPeerByID(validatorID uint32) *p2p.Peer {
v, ok := consensus.validators.Load(validatorID)
if !ok {
utils.GetLogInstance().Warn("Unrecognized validator", "validatorID", validatorID, "consensus", consensus)
return nil
}
value, ok := v.(p2p.Peer)
if !ok {
utils.GetLogInstance().Warn("Invalid validator", "validatorID", validatorID, "consensus", consensus)
return nil
}
return &value
}
// Verify the signature of the message are valid from the signer's public key.
func verifyMessageSig(signerPubKey *bls.PublicKey, message consensus_proto.Message) error {
signature := message.Signature
message.Signature = nil
messageBytes, err := protobuf.Marshal(&message)
if err != nil {
return err
}
msgSig := bls.Sign{}
err = msgSig.Deserialize(signature)
if err != nil {
return err
}
msgHash := sha256.Sum256(messageBytes)
if !msgSig.VerifyHash(signerPubKey, msgHash[:]) {
return errors.New("failed to verify the signature")
}
return nil
}
// Author returns the author of the block header.
func (consensus *Consensus) Author(header *types.Header) (common.Address, error) {
// TODO: implement this
return common.Address{}, nil
}
// TODO: switch to BLS-based signature
// Sign on the hash of the message
func (consensus *Consensus) signMessage(message []byte) []byte {
signature, err := schnorr.Sign(crypto.Ed25519Curve, consensus.priKey, message)
hash := sha256.Sum256(message)
signature := consensus.priKey.SignHash(hash[:])
return signature.Serialize()
}
// Sign on the consensus message signature field.
func (consensus *Consensus) signConsensusMessage(message *consensus_proto.Message) error {
message.Signature = nil
// TODO: use custom serialization method rather than protobuf
marshaledMessage, err := protobuf.Marshal(message)
if err != nil {
panic("Failed to sign message with Schnorr signature.")
return err
}
return signature
// 64 byte of signature on previous data
signature := consensus.signMessage(marshaledMessage)
message.Signature = signature
return nil
}
// GetValidatorPeers returns list of validator peers.
@ -230,24 +299,37 @@ func (consensus *Consensus) GetValidatorPeers() []p2p.Peer {
return validatorPeers
}
// GetPrepareSigsArray returns the signatures for prepare as a array
func (consensus *Consensus) GetPrepareSigsArray() []*bls.Sign {
sigs := []*bls.Sign{}
for _, sig := range *consensus.prepareSigs {
sigs = append(sigs, sig)
}
return sigs
}
// GetCommitSigsArray returns the signatures for commit as a array
func (consensus *Consensus) GetCommitSigsArray() []*bls.Sign {
sigs := []*bls.Sign{}
for _, sig := range *consensus.commitSigs {
sigs = append(sigs, sig)
}
return sigs
}
// ResetState resets the state of the consensus
func (consensus *Consensus) ResetState() {
consensus.state = Finished
consensus.commitments = &map[uint32]kyber.Point{}
consensus.finalCommitments = &map[uint32]kyber.Point{}
consensus.responses = &map[uint32]kyber.Scalar{}
consensus.finalResponses = &map[uint32]kyber.Scalar{}
mask, _ := crypto.NewMask(crypto.Ed25519Curve, consensus.PublicKeys, consensus.leader.PubKey)
finalMask, _ := crypto.NewMask(crypto.Ed25519Curve, consensus.PublicKeys, consensus.leader.PubKey)
consensus.bitmap = mask
consensus.finalBitmap = finalMask
consensus.bitmap.SetMask([]byte{})
consensus.finalBitmap.SetMask([]byte{})
consensus.aggregatedCommitment = nil
consensus.aggregatedFinalCommitment = nil
consensus.secret = map[uint32]kyber.Scalar{}
consensus.prepareSigs = &map[uint32]*bls.Sign{}
consensus.commitSigs = &map[uint32]*bls.Sign{}
prepareBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.PubKey)
commitBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.PubKey)
consensus.prepareBitmap = prepareBitmap
consensus.commitBitmap = commitBitmap
consensus.aggregatedPrepareSig = nil
consensus.aggregatedCommitSig = nil
// Clear the OfflinePeersList again
consensus.OfflinePeerList = make([]p2p.Peer, 0)
@ -261,8 +343,8 @@ func (consensus *Consensus) String() string {
} else {
duty = "VLD" // validator
}
return fmt.Sprintf("[duty:%s, priKey:%s, ShardID:%v, nodeID:%v, state:%s]",
duty, consensus.priKey.String(), consensus.ShardID, consensus.nodeID, consensus.state)
return fmt.Sprintf("[duty:%s, pubKey:%s, ShardID:%v, nodeID:%v, state:%s]",
duty, hex.EncodeToString(consensus.pubKey.Serialize()), consensus.ShardID, consensus.nodeID, consensus.state)
}
// AddPeers adds new peers into the validator map of the consensus
@ -280,6 +362,7 @@ func (consensus *Consensus) AddPeers(peers []*p2p.Peer) int {
consensus.pubKeyLock.Lock()
consensus.PublicKeys = append(consensus.PublicKeys, peer.PubKey)
consensus.pubKeyLock.Unlock()
utils.GetLogInstance().Debug("[SYNC] new peer added", "pubKey", peer.PubKey, "ip", peer.IP, "port", peer.Port)
}
count++
}
@ -329,7 +412,7 @@ func (consensus *Consensus) RemovePeers(peers []p2p.Peer) int {
// Or the shard won't be able to reach consensus if public keys are mismatch
validators := consensus.GetValidatorPeers()
pong := proto_node.NewPongMessage(validators, consensus.PublicKeys)
pong := proto_discovery.NewPongMessage(validators, consensus.PublicKeys)
buffer := pong.ConstructPongMessage()
host.BroadcastMessageFromLeader(consensus.host, validators, buffer, consensus.OfflinePeers)
@ -341,11 +424,11 @@ func (consensus *Consensus) RemovePeers(peers []p2p.Peer) int {
// DebugPrintPublicKeys print all the PublicKeys in string format in Consensus
func (consensus *Consensus) DebugPrintPublicKeys() {
for _, k := range consensus.PublicKeys {
str := fmt.Sprintf("%s", k)
consensus.Log.Debug("pk:", "string", str)
str := fmt.Sprintf("%s", hex.EncodeToString(k.Serialize()))
utils.GetLogInstance().Debug("pk:", "string", str)
}
consensus.Log.Debug("PublicKeys:", "#", len(consensus.PublicKeys))
utils.GetLogInstance().Debug("PublicKeys:", "#", len(consensus.PublicKeys))
}
// DebugPrintValidators print all validator ip/port/key in string format in Consensus
@ -353,18 +436,18 @@ func (consensus *Consensus) DebugPrintValidators() {
count := 0
consensus.validators.Range(func(k, v interface{}) bool {
if p, ok := v.(p2p.Peer); ok {
str2 := fmt.Sprintf("%s", p.PubKey)
consensus.Log.Debug("validator:", "IP", p.IP, "Port", p.Port, "VID", p.ValidatorID, "Key", str2)
str2 := fmt.Sprintf("%s", p.PubKey.Serialize())
utils.GetLogInstance().Debug("validator:", "IP", p.IP, "Port", p.Port, "VID", p.ValidatorID, "Key", str2)
count++
return true
}
return false
})
consensus.Log.Debug("Validators", "#", count)
utils.GetLogInstance().Debug("Validators", "#", count)
}
// UpdatePublicKeys updates the PublicKeys variable, protected by a mutex
func (consensus *Consensus) UpdatePublicKeys(pubKeys []kyber.Point) int {
func (consensus *Consensus) UpdatePublicKeys(pubKeys []*bls.PublicKey) int {
consensus.pubKeyLock.Lock()
// consensus.PublicKeys = make([]kyber.Point, len(pubKeys))
consensus.PublicKeys = append(pubKeys[:0:0], pubKeys...)
@ -426,7 +509,7 @@ func (consensus *Consensus) VerifySeal(chain ChainReader, header *types.Header)
// Finalize implements consensus.Engine, accumulating the block and uncle rewards,
// setting the final state and assembling the block.
func (consensus *Consensus) Finalize(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, receipts []*types.Receipt) (*types.Block, error) {
func (consensus *Consensus) Finalize(chain ChainReader, header *types.Header, state *state.DB, txs []*types.Transaction, receipts []*types.Receipt) (*types.Block, error) {
// Accumulate any block and uncle rewards and commit the final state root
// Header seems complete, assemble into a block and return
accumulateRewards(chain.Config(), state, header)
@ -472,7 +555,7 @@ func (consensus *Consensus) Prepare(chain ChainReader, header *types.Header) err
// AccumulateRewards credits the coinbase of the given block with the mining
// reward. The total reward consists of the static block reward and rewards for
// included uncles. The coinbase of each uncle block is also rewarded.
func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header) {
func accumulateRewards(config *params.ChainConfig, state *state.DB, header *types.Header) {
// TODO: implement mining rewards
}
@ -481,7 +564,46 @@ func (consensus *Consensus) GetNodeID() uint32 {
return consensus.nodeID
}
// GetPeerFromID will get peer from peerID, bool value in return true means success and false means fail
func (consensus *Consensus) GetPeerFromID(peerID uint32) (p2p.Peer, bool) {
v, ok := consensus.validators.Load(peerID)
if !ok {
return p2p.Peer{}, false
}
value, ok := v.(p2p.Peer)
if !ok {
return p2p.Peer{}, false
}
return value, true
}
// SendMessage sends message thru p2p host to peer.
func (consensus *Consensus) SendMessage(peer p2p.Peer, message []byte) {
host.SendMessage(consensus.host, peer, message, nil)
}
// Populates the common basic fields for all consensus message.
func (consensus *Consensus) populateMessageFields(message *consensus_proto.Message) {
// 4 byte consensus id
message.ConsensusId = consensus.consensusID
// 32 byte block hash
message.BlockHash = consensus.blockHash[:]
// 4 byte sender id
message.SenderId = uint32(consensus.nodeID)
}
// Signs the consensus message and returns the marshaled message.
func (consensus *Consensus) signAndMarshalConsensusMessage(message *consensus_proto.Message) ([]byte, error) {
err := consensus.signConsensusMessage(message)
if err != nil {
return []byte{}, err
}
marshaledMessage, err := protobuf.Marshal(message)
if err != nil {
return []byte{}, err
}
return marshaledMessage, nil
}

@ -60,7 +60,7 @@ type Engine interface {
// and assembles the final block.
// Note: The block header and state database might be updated to reflect any
// consensus rules that happen at finalization (e.g. block rewards).
Finalize(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
Finalize(chain ChainReader, header *types.Header, state *state.DB, txs []*types.Transaction,
receipts []*types.Receipt) (*types.Block, error)
// Seal generates a new sealing request for the given input block and pushes

@ -1,23 +1,19 @@
package consensus
import (
"bytes"
"encoding/hex"
"errors"
"strconv"
"time"
"github.com/dedis/kyber"
"github.com/dedis/kyber/sign/schnorr"
"github.com/ethereum/go-ethereum/rlp"
protobuf "github.com/golang/protobuf/proto"
"github.com/harmony-one/bls/ffi/go/bls"
consensus_proto "github.com/harmony-one/harmony/api/consensus"
"github.com/harmony-one/harmony/api/services/explorer"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/crypto"
bls_cosi "github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/profiler"
"github.com/harmony-one/harmony/log"
"github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/p2p/host"
)
@ -30,53 +26,60 @@ var (
)
// WaitForNewBlock waits for the next new block to run consensus on
func (consensus *Consensus) WaitForNewBlock(blockChannel chan *types.Block) {
consensus.Log.Debug("Waiting for block", "consensus", consensus)
for { // keep waiting for new blocks
newBlock := <-blockChannel
// TODO: think about potential race condition
c := consensus.RemovePeers(consensus.OfflinePeerList)
if c > 0 {
consensus.Log.Debug("WaitForNewBlock", "removed peers", c)
}
func (consensus *Consensus) WaitForNewBlock(blockChannel chan *types.Block, stopChan chan struct{}, stoppedChan chan struct{}) {
go func() {
defer close(stoppedChan)
for {
select {
default:
utils.GetLogInstance().Debug("Waiting for block", "consensus", consensus)
// keep waiting for new blocks
newBlock := <-blockChannel
// TODO: think about potential race condition
c := consensus.RemovePeers(consensus.OfflinePeerList)
if c > 0 {
utils.GetLogInstance().Debug("WaitForNewBlock", "removed peers", c)
}
for !consensus.HasEnoughValidators() {
consensus.Log.Debug("Not enough validators", "# Validators", len(consensus.PublicKeys))
time.Sleep(waitForEnoughValidators * time.Millisecond)
}
for !consensus.HasEnoughValidators() {
utils.GetLogInstance().Debug("Not enough validators", "# Validators", len(consensus.PublicKeys))
time.Sleep(waitForEnoughValidators * time.Millisecond)
}
startTime = time.Now()
consensus.Log.Debug("STARTING CONSENSUS", "numTxs", len(newBlock.Transactions()), "consensus", consensus, "startTime", startTime, "publicKeys", len(consensus.PublicKeys))
for consensus.state == Finished {
// time.Sleep(500 * time.Millisecond)
consensus.ResetState()
consensus.startConsensus(newBlock)
break
startTime = time.Now()
utils.GetLogInstance().Debug("STARTING CONSENSUS", "numTxs", len(newBlock.Transactions()), "consensus", consensus, "startTime", startTime, "publicKeys", len(consensus.PublicKeys))
for { // Wait until last consensus is finished
if consensus.state == Finished {
consensus.ResetState()
consensus.startConsensus(newBlock)
break
}
time.Sleep(500 * time.Millisecond)
}
case <-stopChan:
return
}
}
}
}()
}
// ProcessMessageLeader dispatches consensus message for the leader.
func (consensus *Consensus) ProcessMessageLeader(payload []byte) {
message := consensus_proto.Message{}
err := message.XXX_Unmarshal(payload)
err := protobuf.Unmarshal(payload, &message)
if err != nil {
consensus.Log.Error("Failed to unmarshal message payload.", "err", err, "consensus", consensus)
utils.GetLogInstance().Error("Failed to unmarshal message payload.", "err", err, "consensus", consensus)
}
switch message.Type {
case consensus_proto.MessageType_PREPARE:
consensus.processPrepareMessage(message)
case consensus_proto.MessageType_COMMIT:
consensus.processCommitMessage(message, ChallengeDone)
case consensus_proto.MessageType_RESPONSE:
consensus.processResponseMessage(message, CollectiveSigDone)
case consensus_proto.MessageType_FINAL_COMMIT:
consensus.processCommitMessage(message, FinalChallengeDone)
case consensus_proto.MessageType_FINAL_RESPONSE:
consensus.processResponseMessage(message, Finished)
consensus.processCommitMessage(message)
default:
consensus.Log.Error("Unexpected message type", "msgType", message.Type, "consensus", consensus)
utils.GetLogInstance().Error("Unexpected message type", "msgType", message.Type, "consensus", consensus)
}
}
@ -86,328 +89,186 @@ func (consensus *Consensus) startConsensus(newBlock *types.Block) {
blockHash := newBlock.Hash()
copy(consensus.blockHash[:], blockHash[:])
consensus.Log.Debug("Start encoding block")
utils.GetLogInstance().Debug("Start encoding block")
// prepare message and broadcast to validators
encodedBlock, err := rlp.EncodeToBytes(newBlock)
if err != nil {
consensus.Log.Debug("Failed encoding block")
utils.GetLogInstance().Debug("Failed encoding block")
return
}
consensus.block = encodedBlock
utils.GetLogInstance().Debug("Stop encoding block")
consensus.Log.Debug("Stop encoding block")
msgToSend := consensus.constructAnnounceMessage()
// Set state to AnnounceDone
consensus.state = AnnounceDone
consensus.commitByLeader(true)
host.BroadcastMessageFromLeader(consensus.host, consensus.GetValidatorPeers(), msgToSend, consensus.OfflinePeers)
}
// commitByLeader commits to the message by leader himself before receiving others commits
func (consensus *Consensus) commitByLeader(firstRound bool) {
// Generate leader's own commitment
secret, commitment := crypto.Commit(crypto.Ed25519Curve)
consensus.secret[consensus.consensusID] = secret
if firstRound {
consensus.mutex.Lock()
defer consensus.mutex.Unlock()
(*consensus.commitments)[consensus.nodeID] = commitment
consensus.bitmap.SetKey(consensus.pubKey, true)
} else {
(*consensus.finalCommitments)[consensus.nodeID] = commitment
consensus.finalBitmap.SetKey(consensus.pubKey, true)
}
// Leader sign the block hash itself
(*consensus.prepareSigs)[consensus.nodeID] = consensus.priKey.SignHash(consensus.blockHash[:])
host.BroadcastMessageFromLeader(consensus.host, consensus.GetValidatorPeers(), msgToSend, consensus.OfflinePeers)
}
// processCommitMessage processes the commit message sent from validators
func (consensus *Consensus) processCommitMessage(message consensus_proto.Message, targetState State) {
consensusID := message.ConsensusId
blockHash := message.BlockHash
// processPrepareMessage processes the prepare message sent from validators
func (consensus *Consensus) processPrepareMessage(message consensus_proto.Message) {
validatorID := message.SenderId
commitment := message.Payload
signature := message.Signature
prepareSig := message.Payload
// Verify signature
v, ok := consensus.validators.Load(validatorID)
if !ok {
consensus.Log.Warn("Received message from unrecognized validator", "validatorID", validatorID, "consensus", consensus)
return
}
value, ok := v.(p2p.Peer)
if !ok {
consensus.Log.Warn("Invalid validator", "validatorID", validatorID, "consensus", consensus)
return
}
prepareSigs := consensus.prepareSigs
prepareBitmap := consensus.prepareBitmap
message.Signature = nil
messageBytes, err := message.XXX_Marshal([]byte{}, true)
if err != nil {
consensus.Log.Warn("Failed to marshal the announce message", "error", err)
}
if schnorr.Verify(crypto.Ed25519Curve, value.PubKey, messageBytes, signature) != nil {
consensus.Log.Warn("Received message with invalid signature", "validatorKey", consensus.leader.PubKey, "consensus", consensus)
return
}
// check consensus Id
consensus.mutex.Lock()
defer consensus.mutex.Unlock()
if consensusID != consensus.consensusID {
consensus.Log.Warn("Received Commit with wrong consensus Id", "myConsensusId", consensus.consensusID, "theirConsensusId", consensusID, "consensus", consensus)
validatorPeer := consensus.getValidatorPeerByID(validatorID)
if !consensus.checkConsensusMessage(message, validatorPeer.PubKey) {
utils.GetLogInstance().Debug("Failed to check the validator message", "validatorID", validatorID)
return
}
if !bytes.Equal(blockHash, consensus.blockHash[:]) {
consensus.Log.Warn("Received Commit with wrong blockHash", "myConsensusId", consensus.consensusID, "theirConsensusId", consensusID, "consensus", consensus)
// proceed only when the message is not received before
_, ok := (*prepareSigs)[validatorID]
if ok {
utils.GetLogInstance().Debug("Already received prepare message from the validator", "validatorID", validatorID)
return
}
commitments := consensus.commitments // targetState == ChallengeDone
bitmap := consensus.bitmap
if targetState == FinalChallengeDone {
commitments = consensus.finalCommitments
bitmap = consensus.finalBitmap
if len((*prepareSigs)) >= ((len(consensus.PublicKeys)*2)/3 + 1) {
utils.GetLogInstance().Debug("Received additional new prepare message", "validatorID", validatorID)
return
}
// proceed only when the message is not received before
_, ok = (*commitments)[validatorID]
shouldProcess := !ok
if len((*commitments)) >= ((len(consensus.PublicKeys)*2)/3 + 1) {
shouldProcess = false
}
if shouldProcess {
point := crypto.Ed25519Curve.Point()
point.UnmarshalBinary(commitment)
(*commitments)[validatorID] = point
consensus.Log.Debug("Received new commit message", "num", len(*commitments), "validatorID", validatorID, "PublicKeys", len(consensus.PublicKeys))
// Set the bitmap indicate this validate signed.
bitmap.SetKey(value.PubKey, true)
// Check BLS signature for the multi-sig
var sign bls.Sign
err := sign.Deserialize(prepareSig)
if err != nil {
utils.GetLogInstance().Error("Failed to deserialize bls signature", "validatorID", validatorID)
return
}
if !shouldProcess {
consensus.Log.Debug("Received additional new commit message", "validatorID", validatorID)
if !sign.VerifyHash(validatorPeer.PubKey, consensus.blockHash[:]) {
utils.GetLogInstance().Error("Received invalid BLS signature", "validatorID", validatorID)
return
}
if len((*commitments)) >= ((len(consensus.PublicKeys)*2)/3+1) && consensus.state < targetState {
consensus.Log.Debug("Enough commitments received with signatures", "num", len(*commitments), "state", consensus.state)
// Broadcast challenge
msgTypeToSend := consensus_proto.MessageType_CHALLENGE // targetState == ChallengeDone
if targetState == FinalChallengeDone {
msgTypeToSend = consensus_proto.MessageType_FINAL_CHALLENGE
}
utils.GetLogInstance().Debug("Received new prepare signature", "numReceivedSoFar", len(*prepareSigs), "validatorID", validatorID, "PublicKeys", len(consensus.PublicKeys))
(*prepareSigs)[validatorID] = &sign
prepareBitmap.SetKey(validatorPeer.PubKey, true) // Set the bitmap indicating that this validator signed.
msgToSend, challengeScalar, aggCommitment := consensus.constructChallengeMessage(msgTypeToSend)
bytes, err := challengeScalar.MarshalBinary()
if err != nil {
log.Error("Failed to serialize challenge")
}
if msgTypeToSend == consensus_proto.MessageType_CHALLENGE {
copy(consensus.challenge[:], bytes)
consensus.aggregatedCommitment = aggCommitment
} else if msgTypeToSend == consensus_proto.MessageType_FINAL_CHALLENGE {
copy(consensus.finalChallenge[:], bytes)
consensus.aggregatedFinalCommitment = aggCommitment
}
targetState := PreparedDone
if len((*prepareSigs)) >= ((len(consensus.PublicKeys)*2)/3+1) && consensus.state < targetState {
utils.GetLogInstance().Debug("Enough prepares received with signatures", "num", len(*prepareSigs), "state", consensus.state)
// Add leader's response
consensus.responseByLeader(challengeScalar, targetState == ChallengeDone)
// Broadcast challenge message
// Construct and broadcast prepared message
msgToSend, aggSig := consensus.constructPreparedMessage()
consensus.aggregatedPrepareSig = aggSig
host.BroadcastMessageFromLeader(consensus.host, consensus.GetValidatorPeers(), msgToSend, consensus.OfflinePeers)
// Set state to targetState (ChallengeDone or FinalChallengeDone)
// Set state to targetState
consensus.state = targetState
}
}
// Leader commit to the message itself before receiving others commits
func (consensus *Consensus) responseByLeader(challenge kyber.Scalar, firstRound bool) {
// Generate leader's own commitment
response, err := crypto.Response(crypto.Ed25519Curve, consensus.priKey, consensus.secret[consensus.consensusID], challenge)
if err == nil {
if firstRound {
(*consensus.responses)[consensus.nodeID] = response
consensus.bitmap.SetKey(consensus.pubKey, true)
} else {
(*consensus.finalResponses)[consensus.nodeID] = response
consensus.finalBitmap.SetKey(consensus.pubKey, true)
}
} else {
log.Warn("leader failed to generate response", "err", err)
// Leader sign the multi-sig and bitmap (for commit phase)
multiSigAndBitmap := append(aggSig.Serialize(), prepareBitmap.Bitmap...)
(*consensus.commitSigs)[consensus.nodeID] = consensus.priKey.SignHash(multiSigAndBitmap)
}
}
// Processes the response message sent from validators
func (consensus *Consensus) processResponseMessage(message consensus_proto.Message, targetState State) {
consensusID := message.ConsensusId
blockHash := message.BlockHash
// Processes the commit message sent from validators
func (consensus *Consensus) processCommitMessage(message consensus_proto.Message) {
validatorID := message.SenderId
response := message.Payload
signature := message.Signature
commitSig := message.Payload
shouldProcess := true
consensus.mutex.Lock()
defer consensus.mutex.Unlock()
// check consensus Id
if consensusID != consensus.consensusID {
shouldProcess = false
consensus.Log.Warn("Received Response with wrong consensus Id", "myConsensusId", consensus.consensusID, "theirConsensusId", consensusID, "consensus", consensus)
}
validatorPeer := consensus.getValidatorPeerByID(validatorID)
if !bytes.Equal(blockHash, consensus.blockHash[:]) {
consensus.Log.Warn("Received Response with wrong blockHash", "myConsensusId", consensus.consensusID, "theirConsensusId", consensusID, "consensus", consensus)
if !consensus.checkConsensusMessage(message, validatorPeer.PubKey) {
utils.GetLogInstance().Debug("Failed to check the validator message", "validatorID", validatorID)
return
}
// Verify signature
v, ok := consensus.validators.Load(validatorID)
if !ok {
consensus.Log.Warn("Received message from unrecognized validator", "validatorID", validatorID, "consensus", consensus)
commitSigs := consensus.commitSigs
commitBitmap := consensus.commitBitmap
// proceed only when the message is not received before
_, ok := (*commitSigs)[validatorID]
if ok {
utils.GetLogInstance().Debug("Already received commit message from the validator", "validatorID", validatorID)
return
}
value, ok := v.(p2p.Peer)
if !ok {
consensus.Log.Warn("Invalid validator", "validatorID", validatorID, "consensus", consensus)
if len((*commitSigs)) >= ((len(consensus.PublicKeys)*2)/3 + 1) {
utils.GetLogInstance().Debug("Received additional new commit message", "validatorID", strconv.Itoa(int(validatorID)))
return
}
message.Signature = nil
messageBytes, err := message.XXX_Marshal([]byte{}, true)
// Verify the signature on prepare multi-sig and bitmap is correct
var sign bls.Sign
err := sign.Deserialize(commitSig)
if err != nil {
consensus.Log.Warn("Failed to marshal the announce message", "error", err)
utils.GetLogInstance().Debug("Failed to deserialize bls signature", "validatorID", validatorID)
return
}
if schnorr.Verify(crypto.Ed25519Curve, value.PubKey, messageBytes, signature) != nil {
consensus.Log.Warn("Received message with invalid signature", "validatorKey", consensus.leader.PubKey, "consensus", consensus)
aggSig := bls_cosi.AggregateSig(consensus.GetPrepareSigsArray())
if !sign.VerifyHash(validatorPeer.PubKey, append(aggSig.Serialize(), consensus.prepareBitmap.Bitmap...)) {
utils.GetLogInstance().Error("Received invalid BLS signature", "validatorID", validatorID)
return
}
commitments := consensus.commitments // targetState == CollectiveSigDone
responses := consensus.responses
bitmap := consensus.bitmap
if targetState == Finished {
commitments = consensus.finalCommitments
responses = consensus.finalResponses
bitmap = consensus.finalBitmap
}
utils.GetLogInstance().Debug("Received new commit message", "numReceivedSoFar", len(*commitSigs), "validatorID", strconv.Itoa(int(validatorID)))
(*commitSigs)[validatorID] = &sign
// Set the bitmap indicating that this validator signed.
commitBitmap.SetKey(validatorPeer.PubKey, true)
// proceed only when the message is not received before
_, ok = (*responses)[validatorID]
shouldProcess = shouldProcess && !ok
targetState := CommittedDone
if len(*commitSigs) >= ((len(consensus.PublicKeys)*2)/3+1) && consensus.state != targetState {
utils.GetLogInstance().Info("Enough commits received!", "num", len(*commitSigs), "state", consensus.state)
if len((*responses)) >= ((len(consensus.PublicKeys)*2)/3 + 1) {
shouldProcess = false
}
// Construct and broadcast committed message
msgToSend, aggSig := consensus.constructCommittedMessage()
consensus.aggregatedCommitSig = aggSig
host.BroadcastMessageFromLeader(consensus.host, consensus.GetValidatorPeers(), msgToSend, consensus.OfflinePeers)
if shouldProcess {
// verify the response matches the received commit
responseScalar := crypto.Ed25519Curve.Scalar()
responseScalar.UnmarshalBinary(response)
err := consensus.verifyResponse(commitments, responseScalar, validatorID)
var blockObj types.Block
err := rlp.DecodeBytes(consensus.block, &blockObj)
if err != nil {
consensus.Log.Warn("leader failed to verify the response", "error", err, "VID", strconv.Itoa(int(validatorID)))
shouldProcess = false
} else {
(*responses)[validatorID] = responseScalar
consensus.Log.Debug("Received new response message", "num", len(*responses), "validatorID", strconv.Itoa(int(validatorID)))
// Set the bitmap indicate this validate signed.
bitmap.SetKey(value.PubKey, true)
utils.GetLogInstance().Debug("failed to construct the new block after consensus")
}
}
if !shouldProcess {
consensus.Log.Debug("Received new response message", "validatorID", strconv.Itoa(int(validatorID)))
return
}
threshold := 2
if targetState == Finished {
threshold = 1
}
if len(*responses) >= ((len(consensus.PublicKeys)*threshold)/3+1) && consensus.state != targetState {
if len(*responses) >= ((len(consensus.PublicKeys)*threshold)/3+1) && consensus.state != targetState {
consensus.Log.Debug("Enough responses received with signatures", "num", len(*responses), "state", consensus.state)
// Aggregate responses
responseScalars := []kyber.Scalar{}
for _, val := range *responses {
responseScalars = append(responseScalars, val)
}
aggregatedResponse, err := crypto.AggregateResponses(crypto.Ed25519Curve, responseScalars)
if err != nil {
log.Error("Failed to aggregate responses")
return
}
aggregatedCommitment := consensus.aggregatedCommitment
if targetState == Finished {
aggregatedCommitment = consensus.aggregatedFinalCommitment
}
collectiveSigAndBitmap, err := crypto.Sign(crypto.Ed25519Curve, aggregatedCommitment, aggregatedResponse, bitmap)
if err != nil {
log.Error("Failed to create collective signature")
return
}
log.Info("CollectiveSig and Bitmap created.", "size", len(collectiveSigAndBitmap))
collectiveSig := [64]byte{}
copy(collectiveSig[:], collectiveSigAndBitmap[:64])
bitmap := collectiveSigAndBitmap[64:]
// Set state to CollectiveSigDone or Finished
consensus.state = targetState
if consensus.state != Finished {
// Start the second round of Cosi
msgToSend := consensus.constructCollectiveSigMessage(collectiveSig, bitmap)
host.BroadcastMessageFromLeader(consensus.host, consensus.GetValidatorPeers(), msgToSend, consensus.OfflinePeers)
consensus.commitByLeader(false)
} else {
var blockObj types.Block
err = rlp.DecodeBytes(consensus.block, &blockObj)
if err != nil {
consensus.Log.Debug("failed to construct the new block after consensus")
}
// Sign the block
copy(blockObj.Header().PrepareSignature[:], consensus.aggregatedPrepareSig.Serialize()[:])
copy(blockObj.Header().PrepareBitmap[:], consensus.prepareBitmap.Bitmap)
copy(blockObj.Header().CommitSignature[:], consensus.aggregatedCommitSig.Serialize()[:])
copy(blockObj.Header().CommitBitmap[:], consensus.commitBitmap.Bitmap)
// Sign the block
copy(blockObj.Header().Signature[:], collectiveSig[:])
copy(blockObj.Header().Bitmap[:], bitmap)
consensus.OnConsensusDone(&blockObj)
consensus.OnConsensusDone(&blockObj)
consensus.state = targetState
consensus.reportMetrics(blockObj)
select {
case consensus.VerifiedNewBlock <- &blockObj:
default:
utils.GetLogInstance().Info("[SYNC] consensus verified block send to chan failed", "blockHash", blockObj.Hash())
}
// Dump new block into level db.
explorer.GetStorageInstance(consensus.leader.IP, consensus.leader.Port, true).Dump(&blockObj, consensus.consensusID)
consensus.reportMetrics(blockObj)
// Reset state to Finished, and clear other data.
consensus.ResetState()
consensus.consensusID++
// Dump new block into level db.
explorer.GetStorageInstance(consensus.leader.IP, consensus.leader.Port, true).Dump(&blockObj, consensus.consensusID)
consensus.Log.Debug("HOORAY!!! CONSENSUS REACHED!!!", "consensusID", consensus.consensusID, "numOfSignatures", len(*responses))
// Reset state to Finished, and clear other data.
consensus.ResetState()
consensus.consensusID++
// TODO: remove this temporary delay
time.Sleep(500 * time.Millisecond)
// Send signal to Node so the new block can be added and new round of consensus can be triggered
consensus.ReadySignal <- struct{}{}
}
}
}
}
utils.GetLogInstance().Debug("HOORAY!!! CONSENSUS REACHED!!!", "consensusID", consensus.consensusID, "numOfSignatures", len(*commitSigs))
func (consensus *Consensus) verifyResponse(commitments *map[uint32]kyber.Point, response kyber.Scalar, validatorID uint32) error {
if response.Equal(crypto.Ed25519Curve.Scalar()) {
return errors.New("response is zero valued")
}
_, ok := (*commitments)[validatorID]
if !ok {
return errors.New("no commit is received for the validator")
// TODO: remove this temporary delay
time.Sleep(500 * time.Millisecond)
// Send signal to Node so the new block can be added and new round of consensus can be triggered
consensus.ReadySignal <- struct{}{}
}
return nil
}
func (consensus *Consensus) reportMetrics(block types.Block) {
@ -415,7 +276,7 @@ func (consensus *Consensus) reportMetrics(block types.Block) {
timeElapsed := endTime.Sub(startTime)
numOfTxs := len(block.Transactions())
tps := float64(numOfTxs) / timeElapsed.Seconds()
consensus.Log.Info("TPS Report",
utils.GetLogInstance().Info("TPS Report",
"numOfTXs", numOfTxs,
"startTime", startTime,
"endTime", endTime,
@ -435,7 +296,7 @@ func (consensus *Consensus) reportMetrics(block types.Block) {
txHashes = append(txHashes, hex.EncodeToString(txHash[:]))
}
metrics := map[string]interface{}{
"key": consensus.pubKey.String(),
"key": hex.EncodeToString(consensus.pubKey.Serialize()),
"tps": tps,
"txCount": numOfTxs,
"nodeCount": len(consensus.PublicKeys) + 1,

@ -3,11 +3,11 @@ package consensus
import (
"bytes"
"github.com/dedis/kyber"
"github.com/harmony-one/bls/ffi/go/bls"
consensus_proto "github.com/harmony-one/harmony/api/consensus"
"github.com/harmony-one/harmony/api/proto"
"github.com/harmony-one/harmony/crypto"
"github.com/harmony-one/harmony/log"
bls_cosi "github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/utils"
)
// Constructs the announce message
@ -15,159 +15,68 @@ func (consensus *Consensus) constructAnnounceMessage() []byte {
message := consensus_proto.Message{}
message.Type = consensus_proto.MessageType_ANNOUNCE
// 4 byte consensus id
message.ConsensusId = consensus.consensusID
// 32 byte block hash
message.BlockHash = consensus.blockHash[:]
// 4 byte sender id
message.SenderId = uint32(consensus.nodeID)
consensus.populateMessageFields(&message)
// n byte of block header
message.Payload = consensus.block
marshaledMessage, err := message.XXX_Marshal([]byte{}, true)
if err != nil {
consensus.Log.Debug("Failed to marshal Announce message", "error", err)
}
// 64 byte of signature on previous data
signature := consensus.signMessage(marshaledMessage)
message.Signature = signature
message.Payload = consensus.block // TODO: send only block header in the announce phase.
marshaledMessage, err = message.XXX_Marshal([]byte{}, true)
marshaledMessage, err := consensus.signAndMarshalConsensusMessage(&message)
if err != nil {
consensus.Log.Debug("Failed to marshal Announce message", "error", err)
utils.GetLogInstance().Error("Failed to sign and marshal the Announce message", "error", err)
}
consensus.Log.Info("New Announce", "NodeID", consensus.nodeID, "bitmap", consensus.bitmap)
return proto.ConstructConsensusMessage(marshaledMessage)
}
// Construct the challenge message, returning challenge message in bytes, challenge scalar and aggregated commmitment point.
func (consensus *Consensus) constructChallengeMessage(msgType consensus_proto.MessageType) ([]byte, kyber.Scalar, kyber.Point) {
// Construct the prepared message, returning prepared message in bytes.
func (consensus *Consensus) constructPreparedMessage() ([]byte, *bls.Sign) {
message := consensus_proto.Message{}
message.Type = msgType
message.Type = consensus_proto.MessageType_PREPARED
// 4 byte consensus id
message.ConsensusId = consensus.consensusID
// 32 byte block hash
message.BlockHash = consensus.blockHash[:]
// 4 byte sender id
message.SenderId = uint32(consensus.nodeID)
consensus.populateMessageFields(&message)
//// Payload
buffer := bytes.NewBuffer([]byte{})
commitmentsMap := consensus.commitments // msgType == Challenge
bitmap := consensus.bitmap
if msgType == consensus_proto.MessageType_FINAL_CHALLENGE {
commitmentsMap = consensus.finalCommitments
bitmap = consensus.finalBitmap
}
// 48 bytes aggregated signature
aggSig := bls_cosi.AggregateSig(consensus.GetPrepareSigsArray())
buffer.Write(aggSig.Serialize())
// 33 byte aggregated commit
commitments := make([]kyber.Point, 0)
for _, val := range *commitmentsMap {
commitments = append(commitments, val)
}
aggCommitment, aggCommitmentBytes := getAggregatedCommit(commitments)
buffer.Write(aggCommitmentBytes)
// 33 byte aggregated key
buffer.Write(getAggregatedKey(bitmap))
// 32 byte challenge
challengeScalar := getChallenge(aggCommitment, bitmap.AggregatePublic, message.BlockHash)
bytes, err := challengeScalar.MarshalBinary()
if err != nil {
log.Error("Failed to serialize challenge")
}
buffer.Write(bytes)
// Bitmap
buffer.Write(consensus.prepareBitmap.Bitmap)
message.Payload = buffer.Bytes()
//// END Payload
marshaledMessage, err := message.XXX_Marshal([]byte{}, true)
marshaledMessage, err := consensus.signAndMarshalConsensusMessage(&message)
if err != nil {
consensus.Log.Debug("Failed to marshal Challenge message", "error", err)
utils.GetLogInstance().Error("Failed to sign and marshal the Prepared message", "error", err)
}
// 64 byte of signature on previous data
signature := consensus.signMessage(marshaledMessage)
message.Signature = signature
marshaledMessage, err = message.XXX_Marshal([]byte{}, true)
if err != nil {
consensus.Log.Debug("Failed to marshal Challenge message", "error", err)
}
consensus.Log.Info("New Challenge", "NodeID", consensus.nodeID, "bitmap", consensus.bitmap)
return proto.ConstructConsensusMessage(marshaledMessage), challengeScalar, aggCommitment
return proto.ConstructConsensusMessage(marshaledMessage), aggSig
}
// Construct the collective signature message
func (consensus *Consensus) constructCollectiveSigMessage(collectiveSig [64]byte, bitmap []byte) []byte {
// Construct the committed message, returning committed message in bytes.
func (consensus *Consensus) constructCommittedMessage() ([]byte, *bls.Sign) {
message := consensus_proto.Message{}
message.Type = consensus_proto.MessageType_COLLECTIVE_SIG
// 4 byte consensus id
message.ConsensusId = consensus.consensusID
message.Type = consensus_proto.MessageType_COMMITTED
// 32 byte block hash
message.BlockHash = consensus.blockHash[:]
// 4 byte sender id
message.SenderId = uint32(consensus.nodeID)
consensus.populateMessageFields(&message)
//// Payload
buffer := bytes.NewBuffer([]byte{})
// 64 byte collective signature
buffer.Write(collectiveSig[:])
// 48 bytes aggregated signature
aggSig := bls_cosi.AggregateSig(consensus.GetCommitSigsArray())
buffer.Write(aggSig.Serialize())
// N byte bitmap
buffer.Write(bitmap)
// Bitmap
buffer.Write(consensus.commitBitmap.Bitmap)
message.Payload = buffer.Bytes()
//// END Payload
marshaledMessage, err := message.XXX_Marshal([]byte{}, true)
if err != nil {
consensus.Log.Debug("Failed to marshal Challenge message", "error", err)
}
// 64 byte of signature on previous data
signature := consensus.signMessage(marshaledMessage)
message.Signature = signature
marshaledMessage, err = message.XXX_Marshal([]byte{}, true)
if err != nil {
consensus.Log.Debug("Failed to marshal Challenge message", "error", err)
}
consensus.Log.Info("New CollectiveSig", "NodeID", consensus.nodeID, "bitmap", consensus.bitmap)
return proto.ConstructConsensusMessage(marshaledMessage)
}
func getAggregatedCommit(commitments []kyber.Point) (commitment kyber.Point, bytes []byte) {
aggCommitment := crypto.AggregateCommitmentsOnly(crypto.Ed25519Curve, commitments)
bytes, err := aggCommitment.MarshalBinary()
if err != nil {
panic("Failed to deserialize the aggregated commitment")
}
return aggCommitment, append(bytes[:], byte(0))
}
func getAggregatedKey(bitmap *crypto.Mask) []byte {
bytes, err := bitmap.AggregatePublic.MarshalBinary()
if err != nil {
panic("Failed to deserialize the aggregated key")
}
return append(bytes[:], byte(0))
}
func getChallenge(aggCommitment, aggKey kyber.Point, message []byte) kyber.Scalar {
challenge, err := crypto.Challenge(crypto.Ed25519Curve, aggCommitment, aggKey, message)
marshaledMessage, err := consensus.signAndMarshalConsensusMessage(&message)
if err != nil {
log.Error("Failed to generate challenge")
utils.GetLogInstance().Error("Failed to sign and marshal the Committed message", "error", err)
}
return challenge
return proto.ConstructConsensusMessage(marshaledMessage), aggSig
}

@ -3,50 +3,54 @@ package consensus
import (
"testing"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/p2p/p2pimpl"
consensus_proto "github.com/harmony-one/harmony/api/consensus"
"github.com/harmony-one/harmony/crypto"
"github.com/harmony-one/harmony/crypto/pki"
"github.com/harmony-one/harmony/p2p"
)
func TestConstructAnnounceMessage(test *testing.T) {
leader := p2p.Peer{IP: "1", Port: "2"}
validator := p2p.Peer{IP: "3", Port: "5"}
host := p2pimpl.NewHost(leader)
leader := p2p.Peer{IP: "127.0.0.1", Port: "19999"}
validator := p2p.Peer{IP: "127.0.0.1", Port: "55555"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
if err != nil {
test.Fatalf("newhost failure: %v", err)
}
consensus := New(host, "0", []p2p.Peer{leader, validator}, leader)
consensus.blockHash = [32]byte{}
msg := consensus.constructAnnounceMessage()
if len(msg) != 105 {
if len(msg) != 93 {
test.Errorf("Annouce message is not constructed in the correct size: %d", len(msg))
}
}
func TestConstructChallengeMessage(test *testing.T) {
leaderPriKey := crypto.Ed25519Curve.Scalar()
priKeyInBytes := crypto.HashSha256("12")
leaderPriKey.UnmarshalBinary(priKeyInBytes[:])
leaderPubKey := pki.GetPublicKeyFromScalar(leaderPriKey)
leader := p2p.Peer{IP: "1", Port: "2", PubKey: leaderPubKey}
validatorPriKey := crypto.Ed25519Curve.Scalar()
priKeyInBytes = crypto.HashSha256("12")
validatorPriKey.UnmarshalBinary(priKeyInBytes[:])
validatorPubKey := pki.GetPublicKeyFromScalar(leaderPriKey)
validator := p2p.Peer{IP: "3", Port: "5", PubKey: validatorPubKey}
host := p2pimpl.NewHost(leader)
func TestConstructPreparedMessage(test *testing.T) {
leaderPriKey, leaderPubKey := utils.GenKeyBLS("127.0.0.1", "6000")
leader := p2p.Peer{IP: "127.0.0.1", Port: "6000", PubKey: leaderPubKey}
validatorPriKey, validatorPubKey := utils.GenKeyBLS("127.0.0.1", "5555")
validator := p2p.Peer{IP: "127.0.0.1", Port: "5555", PubKey: validatorPubKey}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
if err != nil {
test.Fatalf("newhost failure: %v", err)
}
consensus := New(host, "0", []p2p.Peer{leader, validator}, leader)
consensus.blockHash = [32]byte{}
(*consensus.commitments)[0] = leaderPubKey
(*consensus.commitments)[1] = validatorPubKey
consensus.bitmap.SetKey(leaderPubKey, true)
consensus.bitmap.SetKey(validatorPubKey, true)
msg, _, _ := consensus.constructChallengeMessage(consensus_proto.MessageType_CHALLENGE)
message := "test string"
(*consensus.prepareSigs)[0] = leaderPriKey.Sign(message)
(*consensus.prepareSigs)[1] = validatorPriKey.Sign(message)
consensus.prepareBitmap.SetKey(leaderPubKey, true)
consensus.prepareBitmap.SetKey(validatorPubKey, true)
msg, _ := consensus.constructPreparedMessage()
if len(msg) != 205 {
if len(msg) != 144 {
test.Errorf("Challenge message is not constructed in the correct size: %d", len(msg))
}
}

@ -1,33 +1,48 @@
package consensus
import (
"fmt"
"testing"
"time"
"crypto/sha256"
"github.com/ethereum/go-ethereum/rlp"
"github.com/golang/mock/gomock"
"github.com/harmony-one/harmony/crypto"
protobuf "github.com/golang/protobuf/proto"
consensus_proto "github.com/harmony-one/harmony/api/consensus"
"github.com/harmony-one/harmony/core/types"
bls_cosi "github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/utils"
mock_host "github.com/harmony-one/harmony/p2p/host/mock"
"github.com/stretchr/testify/assert"
"testing"
"time"
"github.com/harmony-one/harmony/p2p/p2pimpl"
consensus_proto "github.com/harmony-one/harmony/api/consensus"
"github.com/harmony-one/harmony/p2p"
)
func TestProcessMessageLeaderCommit(test *testing.T) {
var (
ip = "127.0.0.1"
blockHash = sha256.Sum256([]byte("test"))
)
func TestProcessMessageLeaderPrepare(test *testing.T) {
ctrl := gomock.NewController(test)
defer ctrl.Finish()
leader := p2p.Peer{IP: "1", Port: "2"}
_, leader.PubKey = utils.GenKey(leader.IP, leader.Port)
leader := p2p.Peer{IP: ip, Port: "7777"}
_, leader.PubKey = utils.GenKeyBLS(leader.IP, leader.Port)
validators := make([]p2p.Peer, 3)
hosts := make([]p2p.Host, 3)
validator1 := p2p.Peer{IP: "3", Port: "4", ValidatorID: 1}
_, validator1.PubKey = utils.GenKey(validator1.IP, validator1.Port)
validator2 := p2p.Peer{IP: "5", Port: "6", ValidatorID: 2}
_, validator2.PubKey = utils.GenKey(validator2.IP, validator2.Port)
validator3 := p2p.Peer{IP: "7", Port: "8", ValidatorID: 3}
_, validator3.PubKey = utils.GenKey(validator3.IP, validator3.Port)
for i := 0; i < 3; i++ {
port := fmt.Sprintf("%d", 7788+i)
validators[i] = p2p.Peer{IP: ip, Port: port, ValidatorID: i + 1}
_, validators[i].PubKey = utils.GenKeyBLS(validators[i].IP, validators[i].Port)
}
m := mock_host.NewMockHost(ctrl)
// Asserts that the first and only call to Bar() is passed 99.
@ -35,77 +50,136 @@ func TestProcessMessageLeaderCommit(test *testing.T) {
m.EXPECT().GetSelfPeer().Return(leader)
m.EXPECT().SendMessage(gomock.Any(), gomock.Any()).Times(3)
consensusLeader := New(m, "0", []p2p.Peer{validator1, validator2, validator3}, leader)
consensusLeader.blockHash = [32]byte{}
consensusLeader := New(m, "0", validators, leader)
consensusLeader.blockHash = blockHash
consensusValidator1 := New(p2pimpl.NewHost(validator1), "0", []p2p.Peer{validator1, validator2, validator3}, leader)
consensusValidator1.blockHash = [32]byte{}
_, msg := consensusValidator1.constructCommitMessage(consensus_proto.MessageType_COMMIT)
consensusLeader.ProcessMessageLeader(msg[1:])
consensusValidators := make([]*Consensus, 3)
for i := 0; i < 3; i++ {
priKey, _, _ := utils.GenKeyP2P(validators[i].IP, validators[i].Port)
host, err := p2pimpl.NewHost(&validators[i], priKey)
if err != nil {
test.Fatalf("newhost error: %v", err)
}
hosts[i] = host
consensusValidator2 := New(p2pimpl.NewHost(validator2), "0", []p2p.Peer{validator1, validator2, validator3}, leader)
consensusValidator2.blockHash = [32]byte{}
_, msg = consensusValidator2.constructCommitMessage(consensus_proto.MessageType_COMMIT)
consensusLeader.ProcessMessageLeader(msg[1:])
consensusValidators[i] = New(hosts[i], "0", validators, leader)
consensusValidators[i].blockHash = blockHash
msg := consensusValidators[i].constructPrepareMessage()
consensusLeader.ProcessMessageLeader(msg[1:])
}
consensusValidator3 := New(p2pimpl.NewHost(validator3), "0", []p2p.Peer{validator1, validator2, validator3}, leader)
consensusValidator3.blockHash = [32]byte{}
_, msg = consensusValidator3.constructCommitMessage(consensus_proto.MessageType_COMMIT)
consensusLeader.ProcessMessageLeader(msg[1:])
assert.Equal(test, ChallengeDone, consensusLeader.state)
assert.Equal(test, PreparedDone, consensusLeader.state)
time.Sleep(1 * time.Second)
}
func TestProcessMessageLeaderResponse(test *testing.T) {
func TestProcessMessageLeaderPrepareInvalidSignature(test *testing.T) {
ctrl := gomock.NewController(test)
defer ctrl.Finish()
leader := p2p.Peer{IP: "1", Port: "2"}
_, leader.PubKey = utils.GenKey(leader.IP, leader.Port)
leader := p2p.Peer{IP: ip, Port: "7777"}
_, leader.PubKey = utils.GenKeyBLS(leader.IP, leader.Port)
validators := make([]p2p.Peer, 3)
hosts := make([]p2p.Host, 3)
validator1 := p2p.Peer{IP: "3", Port: "4", ValidatorID: 1}
_, validator1.PubKey = utils.GenKey(validator1.IP, validator1.Port)
validator2 := p2p.Peer{IP: "5", Port: "6", ValidatorID: 2}
_, validator2.PubKey = utils.GenKey(validator2.IP, validator2.Port)
validator3 := p2p.Peer{IP: "7", Port: "8", ValidatorID: 3}
_, validator3.PubKey = utils.GenKey(validator3.IP, validator3.Port)
for i := 0; i < 3; i++ {
port := fmt.Sprintf("%d", 7788+i)
validators[i] = p2p.Peer{IP: ip, Port: port, ValidatorID: i + 1}
_, validators[i].PubKey = utils.GenKeyBLS(validators[i].IP, validators[i].Port)
}
m := mock_host.NewMockHost(ctrl)
// Asserts that the first and only call to Bar() is passed 99.
// Anything else will fail.
m.EXPECT().GetSelfPeer().Return(leader)
m.EXPECT().SendMessage(gomock.Any(), gomock.Any()).Times(6)
m.EXPECT().SendMessage(gomock.Any(), gomock.Any()).Times(0)
consensusLeader := New(m, "0", validators, leader)
consensusLeader.blockHash = blockHash
consensusValidators := make([]*Consensus, 3)
for i := 0; i < 3; i++ {
priKey, _, _ := utils.GenKeyP2P(validators[i].IP, validators[i].Port)
host, err := p2pimpl.NewHost(&validators[i], priKey)
if err != nil {
test.Fatalf("newhost error: %v", err)
}
hosts[i] = host
consensusValidators[i] = New(hosts[i], "0", validators, leader)
consensusValidators[i].blockHash = blockHash
msg := consensusValidators[i].constructPrepareMessage()
message := consensus_proto.Message{}
protobuf.Unmarshal(msg[1:], &message)
// Put invalid signature
message.Signature = consensusValidators[i].signMessage([]byte("random string"))
msg, _ = protobuf.Marshal(&message)
consensusLeader.ProcessMessageLeader(msg[1:])
}
assert.Equal(test, Finished, consensusLeader.state)
consensusLeader := New(m, "0", []p2p.Peer{validator1, validator2, validator3}, leader)
consensusLeader.blockHash = [32]byte{}
consensusValidator1 := New(p2pimpl.NewHost(validator1), "0", []p2p.Peer{validator1, validator2, validator3}, leader)
consensusValidator1.blockHash = [32]byte{}
_, msg := consensusValidator1.constructCommitMessage(consensus_proto.MessageType_COMMIT)
consensusLeader.ProcessMessageLeader(msg[1:])
time.Sleep(1 * time.Second)
}
consensusValidator2 := New(p2pimpl.NewHost(validator2), "0", []p2p.Peer{validator1, validator2, validator3}, leader)
consensusValidator2.blockHash = [32]byte{}
_, msg = consensusValidator2.constructCommitMessage(consensus_proto.MessageType_COMMIT)
consensusLeader.ProcessMessageLeader(msg[1:])
func TestProcessMessageLeaderCommit(test *testing.T) {
ctrl := gomock.NewController(test)
defer ctrl.Finish()
consensusValidator3 := New(p2pimpl.NewHost(validator3), "0", []p2p.Peer{validator1, validator2, validator3}, leader)
consensusValidator3.blockHash = [32]byte{}
_, msg = consensusValidator3.constructCommitMessage(consensus_proto.MessageType_COMMIT)
consensusLeader.ProcessMessageLeader(msg[1:])
leader := p2p.Peer{IP: ip, Port: "8889"}
_, leader.PubKey = utils.GenKeyBLS(leader.IP, leader.Port)
msg = consensusValidator1.constructResponseMessage(consensus_proto.MessageType_RESPONSE, crypto.Ed25519Curve.Scalar().One())
consensusLeader.ProcessMessageLeader(msg[1:])
validators := make([]p2p.Peer, 3)
hosts := make([]p2p.Host, 3)
msg = consensusValidator2.constructResponseMessage(consensus_proto.MessageType_RESPONSE, crypto.Ed25519Curve.Scalar().One())
consensusLeader.ProcessMessageLeader(msg[1:])
for i := 0; i < 3; i++ {
port := fmt.Sprintf("%d", 8788+i)
validators[i] = p2p.Peer{IP: ip, Port: port, ValidatorID: i + 1}
_, validators[i].PubKey = utils.GenKeyBLS(validators[i].IP, validators[i].Port)
}
msg = consensusValidator3.constructResponseMessage(consensus_proto.MessageType_RESPONSE, crypto.Ed25519Curve.Scalar().One())
consensusLeader.ProcessMessageLeader(msg[1:])
m := mock_host.NewMockHost(ctrl)
// Asserts that the first and only call to Bar() is passed 99.
// Anything else will fail.
m.EXPECT().GetSelfPeer().Return(leader)
m.EXPECT().SendMessage(gomock.Any(), gomock.Any()).Times(3)
assert.Equal(test, CollectiveSigDone, consensusLeader.state)
for i := 0; i < 3; i++ {
priKey, _, _ := utils.GenKeyP2P(validators[i].IP, validators[i].Port)
host, err := p2pimpl.NewHost(&validators[i], priKey)
if err != nil {
test.Fatalf("newhost error: %v", err)
}
hosts[i] = host
}
consensusLeader := New(m, "0", validators, leader)
consensusLeader.state = PreparedDone
consensusLeader.blockHash = blockHash
consensusLeader.OnConsensusDone = func(newBlock *types.Block) {}
consensusLeader.block, _ = rlp.EncodeToBytes(types.NewBlock(&types.Header{}, nil, nil))
(*consensusLeader.prepareSigs)[consensusLeader.nodeID] = consensusLeader.priKey.SignHash(consensusLeader.blockHash[:])
aggSig := bls_cosi.AggregateSig(consensusLeader.GetPrepareSigsArray())
multiSigAndBitmap := append(aggSig.Serialize(), consensusLeader.prepareBitmap.Bitmap...)
consensusLeader.aggregatedPrepareSig = aggSig
consensusValidators := make([]*Consensus, 3)
go func() {
<-consensusLeader.ReadySignal
<-consensusLeader.ReadySignal
}()
for i := 0; i < 3; i++ {
consensusValidators[i] = New(hosts[i], "0", validators, leader)
consensusValidators[i].blockHash = blockHash
msg := consensusValidators[i].constructCommitMessage(multiSigAndBitmap)
consensusLeader.ProcessMessageLeader(msg[1:])
}
assert.Equal(test, Finished, consensusLeader.state)
time.Sleep(1 * time.Second)
}

@ -7,13 +7,10 @@ type State int
const (
Finished State = iota
AnnounceDone
PrepareDone
PreparedDone
CommitDone
ChallengeDone
ResponseDone
CollectiveSigDone
FinalCommitDone
FinalChallengeDone
FinalResponseDone
CommittedDone
)
// Returns string name for the State enum
@ -21,15 +18,12 @@ func (state State) String() string {
names := [...]string{
"Finished",
"AnnounceDone",
"PrepareDone",
"PreparedDone",
"CommitDone",
"ChallengeDone",
"ResponseDone",
"CollectiveSigDone",
"FinalCommitDone",
"FinalChallengeDone",
"FinalResponseDone"}
"CommittedDone"}
if state < Finished || state > FinalResponseDone {
if state < Finished || state > CommittedDone {
return "Unknown"
}
return names[state]

@ -1,17 +1,23 @@
package consensus
import (
"bytes"
"testing"
consensus_proto "github.com/harmony-one/harmony/api/consensus"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/p2p/p2pimpl"
)
func TestNew(test *testing.T) {
leader := p2p.Peer{IP: "1", Port: "2"}
validator := p2p.Peer{IP: "3", Port: "5"}
host := p2pimpl.NewHost(leader)
leader := p2p.Peer{IP: "127.0.0.1", Port: "9902"}
validator := p2p.Peer{IP: "127.0.0.1", Port: "9905"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
if err != nil {
test.Fatalf("newhost failure: %v", err)
}
consensus := New(host, "0", []p2p.Peer{leader, validator}, leader)
if consensus.consensusID != 0 {
test.Errorf("Consensus Id is initialized to the wrong value: %d", consensus.consensusID)
@ -25,29 +31,33 @@ func TestNew(test *testing.T) {
test.Error("Consensus ReadySignal should be initialized")
}
if consensus.leader != leader {
if consensus.leader.IP != leader.IP || consensus.leader.Port != leader.Port {
test.Error("Consensus Leader is set to wrong Peer")
}
}
func TestRemovePeers(t *testing.T) {
_, pk1 := utils.GenKey("1", "1")
_, pk2 := utils.GenKey("2", "2")
_, pk3 := utils.GenKey("3", "3")
_, pk4 := utils.GenKey("4", "4")
_, pk5 := utils.GenKey("5", "5")
_, pk1 := utils.GenKeyBLS("1", "1")
_, pk2 := utils.GenKeyBLS("2", "2")
_, pk3 := utils.GenKeyBLS("3", "3")
_, pk4 := utils.GenKeyBLS("4", "4")
_, pk5 := utils.GenKeyBLS("5", "5")
p1 := p2p.Peer{IP: "1", Port: "1", PubKey: pk1}
p2 := p2p.Peer{IP: "2", Port: "2", PubKey: pk2}
p3 := p2p.Peer{IP: "3", Port: "3", PubKey: pk3}
p4 := p2p.Peer{IP: "4", Port: "4", PubKey: pk4}
p1 := p2p.Peer{IP: "127.0.0.1", Port: "19901", PubKey: pk1}
p2 := p2p.Peer{IP: "127.0.0.1", Port: "19902", PubKey: pk2}
p3 := p2p.Peer{IP: "127.0.0.1", Port: "19903", PubKey: pk3}
p4 := p2p.Peer{IP: "127.0.0.1", Port: "19904", PubKey: pk4}
peers := []p2p.Peer{p1, p2, p3, p4}
peerRemove := []p2p.Peer{p1, p2}
leader := p2p.Peer{IP: "127.0.0.1", Port: "9000", PubKey: pk5}
host := p2pimpl.NewHost(leader)
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
if err != nil {
t.Fatalf("newhost failure: %v", err)
}
consensus := New(host, "0", peers, leader)
// consensus.DebugPrintPublicKeys()
@ -57,3 +67,75 @@ func TestRemovePeers(t *testing.T) {
consensus.DebugPrintPublicKeys()
}
}
func TestGetPeerFromID(t *testing.T) {
leader := p2p.Peer{IP: "127.0.0.1", Port: "9902"}
validator := p2p.Peer{IP: "127.0.0.1", Port: "9905"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
if err != nil {
t.Fatalf("newhost failure: %v", err)
}
consensus := New(host, "0", []p2p.Peer{leader, validator}, leader)
leaderID := utils.GetUniqueIDFromIPPort(leader.IP, leader.Port)
validatorID := utils.GetUniqueIDFromIPPort(validator.IP, validator.Port)
l, _ := consensus.GetPeerFromID(leaderID)
v, _ := consensus.GetPeerFromID(validatorID)
if l.IP != leader.IP || l.Port != leader.Port {
t.Errorf("leader IP not equal")
}
if v.IP != validator.IP || v.Port != validator.Port {
t.Errorf("validator IP not equal")
}
}
func TestPopulateMessageFields(t *testing.T) {
leader := p2p.Peer{IP: "127.0.0.1", Port: "9902"}
validator := p2p.Peer{IP: "127.0.0.1", Port: "9905"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
if err != nil {
t.Fatalf("newhost failure: %v", err)
}
consensus := New(host, "0", []p2p.Peer{leader, validator}, leader)
consensus.consensusID = 2
consensus.blockHash = blockHash
consensus.nodeID = 3
msg := consensus_proto.Message{}
consensus.populateMessageFields(&msg)
if msg.ConsensusId != 2 {
t.Errorf("Consensus ID is not populated correctly")
}
if !bytes.Equal(msg.BlockHash[:], blockHash[:]) {
t.Errorf("Block hash is not populated correctly")
}
if msg.SenderId != 3 {
t.Errorf("Sender ID is not populated correctly")
}
}
func TestSignAndMarshalConsensusMessage(t *testing.T) {
leader := p2p.Peer{IP: "127.0.0.1", Port: "9902"}
validator := p2p.Peer{IP: "127.0.0.1", Port: "9905"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
if err != nil {
t.Fatalf("newhost failure: %v", err)
}
consensus := New(host, "0", []p2p.Peer{leader, validator}, leader)
consensus.consensusID = 2
consensus.blockHash = blockHash
consensus.nodeID = 3
msg := consensus_proto.Message{}
marshaledMessage, err := consensus.signAndMarshalConsensusMessage(&msg)
if err != nil || len(marshaledMessage) == 0 {
t.Errorf("Failed to sign and marshal the message: %s", err)
}
if len(msg.Signature) == 0 {
t.Error("No signature is signed on the consensus message.")
}
}

@ -1,319 +1,241 @@
package consensus
import (
"bytes"
"github.com/harmony-one/bls/ffi/go/bls"
bls_cosi "github.com/harmony-one/harmony/crypto/bls"
"github.com/dedis/kyber/sign/schnorr"
"github.com/ethereum/go-ethereum/rlp"
protobuf "github.com/golang/protobuf/proto"
consensus_proto "github.com/harmony-one/harmony/api/consensus"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/crypto"
"github.com/harmony-one/harmony/internal/attack"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/log"
)
// ProcessMessageValidator dispatches validator's consensus message.
func (consensus *Consensus) ProcessMessageValidator(payload []byte) {
message := consensus_proto.Message{}
err := message.XXX_Unmarshal(payload)
err := protobuf.Unmarshal(payload, &message)
if err != nil {
consensus.Log.Error("Failed to unmarshal message payload.", "err", err, "consensus", consensus)
utils.GetLogInstance().Error("Failed to unmarshal message payload.", "err", err, "consensus", consensus)
}
switch message.Type {
case consensus_proto.MessageType_ANNOUNCE:
consensus.processAnnounceMessage(message)
case consensus_proto.MessageType_CHALLENGE:
consensus.processChallengeMessage(message, ResponseDone)
case consensus_proto.MessageType_FINAL_CHALLENGE:
consensus.processChallengeMessage(message, FinalResponseDone)
case consensus_proto.MessageType_COLLECTIVE_SIG:
consensus.processCollectiveSigMessage(message)
case consensus_proto.MessageType_PREPARED:
consensus.processPreparedMessage(message)
case consensus_proto.MessageType_COMMITTED:
consensus.processCommittedMessage(message)
default:
consensus.Log.Error("Unexpected message type", "msgType", message.Type, "consensus", consensus)
utils.GetLogInstance().Error("Unexpected message type", "msgType", message.Type, "consensus", consensus)
}
}
// Processes the announce message sent from the leader
func (consensus *Consensus) processAnnounceMessage(message consensus_proto.Message) {
consensus.Log.Info("Received Announce Message", "nodeID", consensus.nodeID)
utils.GetLogInstance().Info("Received Announce Message", "nodeID", consensus.nodeID)
consensusID := message.ConsensusId
blockHash := message.BlockHash
leaderID := message.SenderId
block := message.Payload
signature := message.Signature
copy(consensus.blockHash[:], blockHash[:])
consensus.block = block
// Verify block data
// check leader Id
myLeaderID := utils.GetUniqueIDFromPeer(consensus.leader)
if leaderID != myLeaderID {
consensus.Log.Warn("Received message from wrong leader", "myLeaderID", myLeaderID, "receivedLeaderId", leaderID, "consensus", consensus)
return
}
// Verify signature
message.Signature = nil
messageBytes, err := message.XXX_Marshal([]byte{}, true)
if err != nil {
consensus.Log.Warn("Failed to marshal the announce message", "error", err)
}
if schnorr.Verify(crypto.Ed25519Curve, consensus.leader.PubKey, messageBytes, signature) != nil {
consensus.Log.Warn("Received message with invalid signature", "leaderKey", consensus.leader.PubKey, "consensus", consensus)
if !consensus.checkConsensusMessage(message, consensus.leader.PubKey) {
utils.GetLogInstance().Debug("Failed to check the leader message")
return
}
// check block header is valid
var blockObj types.Block
err = rlp.DecodeBytes(block, &blockObj)
err := rlp.DecodeBytes(block, &blockObj)
if err != nil {
consensus.Log.Warn("Unparseable block header data", "error", err)
utils.GetLogInstance().Warn("Unparseable block header data", "error", err)
return
}
consensus.block = block
// Add block to received block cache
consensus.mutex.Lock()
consensus.blocksReceived[consensusID] = &BlockConsensusStatus{block, consensus.state}
consensus.mutex.Unlock()
// Add attack model of IncorrectResponse.
// Add attack model of IncorrectResponse
if attack.GetInstance().IncorrectResponse() {
consensus.Log.Warn("IncorrectResponse attacked")
return
}
// check block hash
hash := blockObj.Hash()
if !bytes.Equal(blockHash[:], hash[:]) {
consensus.Log.Warn("Block hash doesn't match", "consensus", consensus)
utils.GetLogInstance().Warn("IncorrectResponse attacked")
return
}
// check block data (transactions
// check block data transactions
if !consensus.BlockVerifier(&blockObj) {
consensus.Log.Warn("Block content is not verified successfully", "consensus", consensus)
utils.GetLogInstance().Warn("Block content is not verified successfully", "consensus", consensus)
return
}
// Commit and store the commit
secret, msgToSend := consensus.constructCommitMessage(consensus_proto.MessageType_COMMIT)
consensus.secret[consensusID] = secret
// Construct and send prepare message
msgToSend := consensus.constructPrepareMessage()
consensus.SendMessage(consensus.leader, msgToSend)
// consensus.Log.Warn("Sending Commit to leader", "state", targetState)
// Set state to CommitDone
consensus.state = CommitDone
consensus.state = PrepareDone
}
// Processes the challenge message sent from the leader
func (consensus *Consensus) processChallengeMessage(message consensus_proto.Message, targetState State) {
consensus.Log.Info("Received Challenge Message", "nodeID", consensus.nodeID)
// Processes the prepared message sent from the leader
func (consensus *Consensus) processPreparedMessage(message consensus_proto.Message) {
utils.GetLogInstance().Info("Received Prepared Message", "nodeID", consensus.nodeID)
consensusID := message.ConsensusId
blockHash := message.BlockHash
leaderID := message.SenderId
messagePayload := message.Payload
signature := message.Signature
//#### Read payload data
// TODO: use BLS-based multi-sig
offset := 0
// 33 byte of aggregated commit
aggreCommit := messagePayload[offset : offset+33]
offset += 33
// 48 byte of multi-sig
multiSig := messagePayload[offset : offset+48]
offset += 48
// 33 byte of aggregated key
aggreKey := messagePayload[offset : offset+33]
offset += 33
// 32 byte of challenge
challenge := messagePayload[offset : offset+32]
offset += 32
// bitmap
bitmap := messagePayload[offset:]
//#### END Read payload data
// Update readyByConsensus for attack.
attack.GetInstance().UpdateConsensusReady(consensusID)
// Verify block data and the aggregated signatures
// check leader Id
myLeaderID := utils.GetUniqueIDFromPeer(consensus.leader)
if uint32(leaderID) != myLeaderID {
consensus.Log.Warn("Received message from wrong leader", "myLeaderID", myLeaderID, "receivedLeaderId", leaderID, "consensus", consensus)
return
}
// Verify signature
message.Signature = nil
messageBytes, err := message.XXX_Marshal([]byte{}, true)
if err != nil {
consensus.Log.Warn("Failed to marshal the announce message", "error", err)
}
if schnorr.Verify(crypto.Ed25519Curve, consensus.leader.PubKey, messageBytes, signature) != nil {
consensus.Log.Warn("Received message with invalid signature", "leaderKey", consensus.leader.PubKey, "consensus", consensus)
if !consensus.checkConsensusMessage(message, consensus.leader.PubKey) {
utils.GetLogInstance().Debug("Failed to check the leader message")
return
}
// Add attack model of IncorrectResponse.
if attack.GetInstance().IncorrectResponse() {
consensus.Log.Warn("IncorrectResponse attacked")
utils.GetLogInstance().Warn("IncorrectResponse attacked")
return
}
consensus.mutex.Lock()
defer consensus.mutex.Unlock()
// check block hash
if !bytes.Equal(blockHash[:], consensus.blockHash[:]) {
consensus.Log.Warn("Block hash doesn't match", "consensus", consensus)
return
}
aggCommitment := crypto.Ed25519Curve.Point()
aggCommitment.UnmarshalBinary(aggreCommit[:32])
aggKey := crypto.Ed25519Curve.Point()
aggKey.UnmarshalBinary(aggreKey[:32])
reconstructedChallenge, err := crypto.Challenge(crypto.Ed25519Curve, aggCommitment, aggKey, blockHash)
// Verify the multi-sig for prepare phase
deserializedMultiSig := bls.Sign{}
err := deserializedMultiSig.Deserialize(multiSig)
if err != nil {
log.Error("Failed to reconstruct the challenge from commits and keys")
utils.GetLogInstance().Warn("Failed to deserialize the multi signature for prepare phase", "Error", err, "leader ID", leaderID)
return
}
// For now, simply return the private key of this node.
receivedChallenge := crypto.Ed25519Curve.Scalar()
err = receivedChallenge.UnmarshalBinary(challenge)
if err != nil {
log.Error("Failed to deserialize challenge", "err", err)
return
}
if !reconstructedChallenge.Equal(receivedChallenge) {
log.Error("The challenge doesn't match the commitments and keys")
return
}
response, err := crypto.Response(crypto.Ed25519Curve, consensus.priKey, consensus.secret[consensusID], receivedChallenge)
if err != nil {
log.Warn("validator failed to generate response", "err", err, "priKey", consensus.priKey, "nodeID", consensus.nodeID, "secret", consensus.secret[consensusID])
mask, err := bls_cosi.NewMask(consensus.PublicKeys, nil)
mask.SetMask(bitmap)
if !deserializedMultiSig.VerifyHash(mask.AggregatePublic, blockHash) || err != nil {
utils.GetLogInstance().Warn("Failed to verify the multi signature for prepare phase", "Error", err, "leader ID", leaderID)
return
}
consensus.aggregatedPrepareSig = &deserializedMultiSig
consensus.prepareBitmap = mask
msgTypeToSend := consensus_proto.MessageType_RESPONSE
if targetState == FinalResponseDone {
msgTypeToSend = consensus_proto.MessageType_FINAL_RESPONSE
}
msgToSend := consensus.constructResponseMessage(msgTypeToSend, response)
// Construct and send the commit message
multiSigAndBitmap := append(multiSig, bitmap...)
msgToSend := consensus.constructCommitMessage(multiSigAndBitmap)
consensus.SendMessage(consensus.leader, msgToSend)
// consensus.Log.Warn("Sending Response to leader", "state", targetState)
// Set state to target state (ResponseDone, FinalResponseDone)
consensus.state = targetState
if consensus.state == FinalResponseDone {
// TODO: the block catch up logic is a temporary workaround for full failure node catchup. Implement the full node catchup logic
// The logic is to roll up to the latest blocks one by one to try catching up with the leader.
for {
val, ok := consensus.blocksReceived[consensus.consensusID]
if ok {
delete(consensus.blocksReceived, consensus.consensusID)
consensus.blockHash = [32]byte{}
delete(consensus.secret, consensusID)
consensus.consensusID = consensusID + 1 // roll up one by one, until the next block is not received yet.
var blockObj types.Block
err := rlp.DecodeBytes(val.block, &blockObj)
if err != nil {
consensus.Log.Warn("Unparseable block header data", "error", err)
return
}
if err != nil {
consensus.Log.Debug("failed to construct the new block after consensus")
}
// check block data (transactions
if !consensus.BlockVerifier(&blockObj) {
consensus.Log.Debug("[WARNING] Block content is not verified successfully", "consensusID", consensus.consensusID)
return
}
consensus.Log.Info("Finished Response. Adding block to chain", "numTx", len(blockObj.Transactions()))
consensus.OnConsensusDone(&blockObj)
} else {
break
}
}
}
consensus.state = CommitDone
}
// Processes the collective signature message sent from the leader
func (consensus *Consensus) processCollectiveSigMessage(message consensus_proto.Message) {
// Processes the committed message sent from the leader
func (consensus *Consensus) processCommittedMessage(message consensus_proto.Message) {
utils.GetLogInstance().Warn("Received Committed Message", "nodeID", consensus.nodeID)
consensusID := message.ConsensusId
blockHash := message.BlockHash
leaderID := message.SenderId
messagePayload := message.Payload
signature := message.Signature
//#### Read payload data
collectiveSig := messagePayload[0:64]
bitmap := messagePayload[64:]
//#### END: Read payload data
// Verify block data
// check leader Id
myLeaderID := utils.GetUniqueIDFromPeer(consensus.leader)
if uint32(leaderID) != myLeaderID {
consensus.Log.Warn("Received message from wrong leader", "myLeaderID", myLeaderID, "receivedLeaderId", leaderID, "consensus", consensus)
return
}
offset := 0
// 48 byte of multi-sig
multiSig := messagePayload[offset : offset+48]
offset += 48
// Verify signature
message.Signature = nil
messageBytes, err := message.XXX_Marshal([]byte{}, true)
if err != nil {
consensus.Log.Warn("Failed to marshal the announce message", "error", err)
}
if schnorr.Verify(crypto.Ed25519Curve, consensus.leader.PubKey, messageBytes, signature) != nil {
consensus.Log.Warn("Received message with invalid signature", "leaderKey", consensus.leader.PubKey, "consensus", consensus)
return
}
// bitmap
bitmap := messagePayload[offset:]
//#### END Read payload data
// Verify collective signature
err = crypto.Verify(crypto.Ed25519Curve, consensus.PublicKeys, blockHash, append(collectiveSig, bitmap...), crypto.NewThresholdPolicy((2*len(consensus.PublicKeys)/3)+1))
if err != nil {
consensus.Log.Warn("Failed to verify the collective sig message", "consensusID", consensusID, "err", err, "bitmap", bitmap, "NodeID", consensus.nodeID, "#PK", len(consensus.PublicKeys))
// Update readyByConsensus for attack.
attack.GetInstance().UpdateConsensusReady(consensusID)
if !consensus.checkConsensusMessage(message, consensus.leader.PubKey) {
utils.GetLogInstance().Debug("Failed to check the leader message")
return
}
// Add attack model of IncorrectResponse.
if attack.GetInstance().IncorrectResponse() {
consensus.Log.Warn("IncorrectResponse attacked")
utils.GetLogInstance().Warn("IncorrectResponse attacked")
return
}
// check consensus Id
if consensusID != consensus.consensusID {
consensus.Log.Warn("Received message with wrong consensus Id", "myConsensusId", consensus.consensusID, "theirConsensusId", consensusID, "consensus", consensus)
consensus.mutex.Lock()
defer consensus.mutex.Unlock()
// Verify the multi-sig for commit phase
deserializedMultiSig := bls.Sign{}
err := deserializedMultiSig.Deserialize(multiSig)
if err != nil {
utils.GetLogInstance().Warn("Failed to deserialize the multi signature for commit phase", "Error", err, "leader ID", leaderID)
return
}
// check block hash
if !bytes.Equal(blockHash[:], consensus.blockHash[:]) {
consensus.Log.Warn("Block hash doesn't match", "consensus", consensus)
mask, err := bls_cosi.NewMask(consensus.PublicKeys, nil)
mask.SetMask(bitmap)
prepareMultiSigAndBitmap := append(consensus.aggregatedPrepareSig.Serialize(), consensus.prepareBitmap.Bitmap...)
if !deserializedMultiSig.VerifyHash(mask.AggregatePublic, prepareMultiSigAndBitmap) || err != nil {
utils.GetLogInstance().Warn("Failed to verify the multi signature for commit phase", "Error", err, "leader ID", leaderID)
return
}
consensus.aggregatedCommitSig = &deserializedMultiSig
consensus.commitBitmap = mask
secret, msgToSend := consensus.constructCommitMessage(consensus_proto.MessageType_FINAL_COMMIT)
// Store the commitment secret
consensus.secret[consensusID] = secret
consensus.state = CommittedDone
// TODO: the block catch up logic is a temporary workaround for full failure node catchup. Implement the full node catchup logic
// The logic is to roll up to the latest blocks one by one to try catching up with the leader.
for {
val, ok := consensus.blocksReceived[consensus.consensusID]
if ok {
delete(consensus.blocksReceived, consensus.consensusID)
consensus.SendMessage(consensus.leader, msgToSend)
consensus.blockHash = [32]byte{}
consensus.consensusID = consensusID + 1 // roll up one by one, until the next block is not received yet.
// Set state to CommitDone
consensus.state = FinalCommitDone
var blockObj types.Block
err := rlp.DecodeBytes(val.block, &blockObj)
if err != nil {
utils.GetLogInstance().Warn("Unparseable block header data", "error", err)
return
}
if err != nil {
utils.GetLogInstance().Debug("failed to construct the new block after consensus")
}
// check block data (transactions
if !consensus.BlockVerifier(&blockObj) {
utils.GetLogInstance().Debug("[WARNING] Block content is not verified successfully", "consensusID", consensus.consensusID)
return
}
// Put the signatures into the block
copy(blockObj.Header().PrepareSignature[:], consensus.aggregatedPrepareSig.Serialize()[:])
copy(blockObj.Header().PrepareBitmap[:], consensus.prepareBitmap.Bitmap)
copy(blockObj.Header().CommitSignature[:], consensus.aggregatedCommitSig.Serialize()[:])
copy(blockObj.Header().CommitBitmap[:], consensus.commitBitmap.Bitmap)
utils.GetLogInstance().Info("Adding block to chain", "numTx", len(blockObj.Transactions()))
consensus.OnConsensusDone(&blockObj)
consensus.ResetState()
select {
case consensus.VerifiedNewBlock <- &blockObj:
default:
utils.GetLogInstance().Info("[SYNC] consensus verified block send to chan failed", "blockHash", blockObj.Hash())
continue
}
} else {
break
}
}
}

@ -1,81 +1,47 @@
package consensus
import (
"github.com/dedis/kyber"
consensus_proto "github.com/harmony-one/harmony/api/consensus"
"github.com/harmony-one/harmony/api/proto"
"github.com/harmony-one/harmony/crypto"
"github.com/harmony-one/harmony/internal/utils"
)
// Construct the commit message to send to leader (assumption the consensus data is already verified)
func (consensus *Consensus) constructCommitMessage(msgType consensus_proto.MessageType) (secret kyber.Scalar, commitMsg []byte) {
// Construct the prepare message to send to leader (assumption the consensus data is already verified)
func (consensus *Consensus) constructPrepareMessage() []byte {
message := consensus_proto.Message{}
message.Type = msgType
message.Type = consensus_proto.MessageType_PREPARE
// 4 byte consensus id
message.ConsensusId = consensus.consensusID
consensus.populateMessageFields(&message)
// 32 byte block hash
message.BlockHash = consensus.blockHash[:]
// 4 byte sender id
message.SenderId = uint32(consensus.nodeID)
// 32 byte of commit
secret, commitment := crypto.Commit(crypto.Ed25519Curve)
bytes, err := commitment.MarshalBinary()
if err != nil {
consensus.Log.Debug("Failed to marshal commit", "error", err)
// 48 byte of bls signature
sign := consensus.priKey.SignHash(message.BlockHash)
if sign != nil {
message.Payload = sign.Serialize()
}
message.Payload = bytes
marshaledMessage, err := message.XXX_Marshal([]byte{}, true)
marshaledMessage, err := consensus.signAndMarshalConsensusMessage(&message)
if err != nil {
consensus.Log.Debug("Failed to marshal Announce message", "error", err)
utils.GetLogInstance().Error("Failed to sign and marshal the Prepare message", "error", err)
}
// 64 byte of signature on previous data
signature := consensus.signMessage(marshaledMessage)
message.Signature = signature
marshaledMessage, err = message.XXX_Marshal([]byte{}, true)
if err != nil {
consensus.Log.Debug("Failed to marshal Announce message", "error", err)
}
return secret, proto.ConstructConsensusMessage(marshaledMessage)
return proto.ConstructConsensusMessage(marshaledMessage)
}
// Construct the response message to send to leader (assumption the consensus data is already verified)
func (consensus *Consensus) constructResponseMessage(msgType consensus_proto.MessageType, response kyber.Scalar) []byte {
// Construct the commit message which contains the signature on the multi-sig of prepare phase.
func (consensus *Consensus) constructCommitMessage(multiSigAndBitmap []byte) []byte {
message := consensus_proto.Message{}
message.Type = msgType
// 4 byte consensus id
message.ConsensusId = consensus.consensusID
message.Type = consensus_proto.MessageType_COMMIT
// 32 byte block hash
message.BlockHash = consensus.blockHash[:]
consensus.populateMessageFields(&message)
// 4 byte sender id
message.SenderId = uint32(consensus.nodeID)
bytes, err := response.MarshalBinary()
if err != nil {
consensus.Log.Debug("Failed to marshal response", "error", err)
}
message.Payload = bytes
marshaledMessage, err := message.XXX_Marshal([]byte{}, true)
if err != nil {
consensus.Log.Debug("Failed to marshal Announce message", "error", err)
// 48 byte of bls signature
sign := consensus.priKey.SignHash(multiSigAndBitmap)
if sign != nil {
message.Payload = sign.Serialize()
}
// 64 byte of signature on previous data
signature := consensus.signMessage(marshaledMessage)
message.Signature = signature
marshaledMessage, err = message.XXX_Marshal([]byte{}, true)
marshaledMessage, err := consensus.signAndMarshalConsensusMessage(&message)
if err != nil {
consensus.Log.Debug("Failed to marshal Announce message", "error", err)
utils.GetLogInstance().Error("Failed to sign and marshal the Commit message", "error", err)
}
return proto.ConstructConsensusMessage(marshaledMessage)
}

@ -5,33 +5,40 @@ import (
"github.com/harmony-one/harmony/p2p/p2pimpl"
consensus_proto "github.com/harmony-one/harmony/api/consensus"
"github.com/harmony-one/harmony/crypto"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/p2p"
)
func TestConstructCommitMessage(test *testing.T) {
leader := p2p.Peer{IP: "1", Port: "2"}
validator := p2p.Peer{IP: "3", Port: "5"}
host := p2pimpl.NewHost(leader)
func TestConstructPrepareMessage(test *testing.T) {
leader := p2p.Peer{IP: "127.0.0.1", Port: "9992"}
validator := p2p.Peer{IP: "127.0.0.1", Port: "9995"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
if err != nil {
test.Fatalf("newhost failure: %v", err)
}
consensus := New(host, "0", []p2p.Peer{leader, validator}, leader)
consensus.blockHash = [32]byte{}
_, msg := consensus.constructCommitMessage(consensus_proto.MessageType_COMMIT)
msg := consensus.constructPrepareMessage()
if len(msg) != 139 {
test.Errorf("Commit message is not constructed in the correct size: %d", len(msg))
if len(msg) != 93 {
test.Errorf("Prepare message is not constructed in the correct size: %d", len(msg))
}
}
func TestConstructResponseMessage(test *testing.T) {
leader := p2p.Peer{IP: "1", Port: "2"}
validator := p2p.Peer{IP: "3", Port: "5"}
host := p2pimpl.NewHost(leader)
func TestConstructCommitMessage(test *testing.T) {
leader := p2p.Peer{IP: "127.0.0.1", Port: "9902"}
validator := p2p.Peer{IP: "127.0.0.1", Port: "9905"}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
if err != nil {
test.Fatalf("newhost failure: %v", err)
}
consensus := New(host, "0", []p2p.Peer{leader, validator}, leader)
consensus.blockHash = [32]byte{}
msg := consensus.constructResponseMessage(consensus_proto.MessageType_RESPONSE, crypto.Ed25519Curve.Scalar())
msg := consensus.constructCommitMessage([]byte("random string"))
if len(msg) != 139 {
test.Errorf("Response message is not constructed in the correct size: %d", len(msg))
if len(msg) != 143 {
test.Errorf("Commit message is not constructed in the correct size: %d", len(msg))
}
}

@ -2,13 +2,15 @@ package consensus
import (
"encoding/hex"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/harmony-one/harmony/core/types"
bls_cosi "github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/utils"
mock_host "github.com/harmony-one/harmony/p2p/host/mock"
"github.com/stretchr/testify/assert"
"testing"
"time"
"github.com/harmony-one/harmony/p2p/p2pimpl"
@ -20,15 +22,15 @@ func TestProcessMessageValidatorAnnounce(test *testing.T) {
ctrl := gomock.NewController(test)
defer ctrl.Finish()
leader := p2p.Peer{IP: "1", Port: "2"}
_, leader.PubKey = utils.GenKey(leader.IP, leader.Port)
leader := p2p.Peer{IP: "127.0.0.1", Port: "9982"}
_, leader.PubKey = utils.GenKeyBLS(leader.IP, leader.Port)
validator1 := p2p.Peer{IP: "3", Port: "4", ValidatorID: 1}
_, validator1.PubKey = utils.GenKey(validator1.IP, validator1.Port)
validator2 := p2p.Peer{IP: "5", Port: "6", ValidatorID: 2}
_, validator2.PubKey = utils.GenKey(validator2.IP, validator2.Port)
validator3 := p2p.Peer{IP: "7", Port: "8", ValidatorID: 3}
_, validator3.PubKey = utils.GenKey(validator3.IP, validator3.Port)
validator1 := p2p.Peer{IP: "127.0.0.1", Port: "9984", ValidatorID: 1}
_, validator1.PubKey = utils.GenKeyBLS(validator1.IP, validator1.Port)
validator2 := p2p.Peer{IP: "127.0.0.1", Port: "9986", ValidatorID: 2}
_, validator2.PubKey = utils.GenKeyBLS(validator2.IP, validator2.Port)
validator3 := p2p.Peer{IP: "127.0.0.1", Port: "9988", ValidatorID: 3}
_, validator3.PubKey = utils.GenKeyBLS(validator3.IP, validator3.Port)
m := mock_host.NewMockHost(ctrl)
// Asserts that the first and only call to Bar() is passed 99.
@ -36,10 +38,15 @@ func TestProcessMessageValidatorAnnounce(test *testing.T) {
m.EXPECT().GetSelfPeer().Return(leader)
m.EXPECT().SendMessage(gomock.Any(), gomock.Any()).Times(1)
consensusLeader := New(p2pimpl.NewHost(leader), "0", []p2p.Peer{validator1, validator2, validator3}, leader)
blockBytes, err := hex.DecodeString("f90461f90222a0f7007987c6f26b20cbd6384e3587445eca556beb6716f8eb6a2f590ce8ed3925940000000000000000000000000000000000000000a0f4f2f8416b65c98890630b105f016370abaab236c92faf7fc73a13d037958c52a0db025c6f785698feb447b509908fe488486062e4607afaae85c3336692445b01a03688be0d6b3d0651911204b4539e11096045cacbb676401e2655653823014c8cb90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008001850254a0e6f88303295d845c2e4f0e80a00000000000000000000000000000000000000000000000000000000000000000880000000000000000840000000080b842000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f90238f9023580808083081650808b069e10de76676d08000000b901db6080604052678ac7230489e8000060015560028054600160a060020a031916331790556101aa806100316000396000f3fe608060405260043610610045577c0100000000000000000000000000000000000000000000000000000000600035046327c78c42811461004a5780634ddd108a1461008c575b600080fd5b34801561005657600080fd5b5061008a6004803603602081101561006d57600080fd5b503573ffffffffffffffffffffffffffffffffffffffff166100b3565b005b34801561009857600080fd5b506100a1610179565b60408051918252519081900360200190f35b60025473ffffffffffffffffffffffffffffffffffffffff1633146100d757600080fd5b600154303110156100e757600080fd5b73ffffffffffffffffffffffffffffffffffffffff811660009081526020819052604090205460ff161561011a57600080fd5b73ffffffffffffffffffffffffffffffffffffffff8116600081815260208190526040808220805460ff1916600190811790915554905181156108fc0292818181858888f19350505050158015610175573d6000803e3d6000fd5b5050565b30319056fea165627a7a723058203e799228fee2fa7c5d15e71c04267a0cc2687c5eff3b48b98f21f355e1064ab300291ba0a87b9130f7f127af3a713a270610da48d56dedc9501e624bdfe04871859c88f3a05a94b087c05c6395825c5fc35d5ce96b2e61f0ce5f2d67b28f9b2d1178fa90f0c0")
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
if err != nil {
test.Fatalf("newhost failure: %v", err)
}
consensusLeader := New(host, "0", []p2p.Peer{validator1, validator2, validator3}, leader)
blockBytes, err := hex.DecodeString("f90242f9023da00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a02b418211410ee3e75b32abd925bbeba215172afa509d65c1953d4b4e505a4a2aa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808080a000000000000000000000000000000000000000000000000000000000000000008800000000000000008400000001b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080c0c0")
consensusLeader.block = blockBytes
hashBytes, err := hex.DecodeString("2e002b2b91a08b6e94d21200103828d9f2ae7cd9eb0c26d2679966699486dee1")
hashBytes, err := hex.DecodeString("a0b3344bd84d41e59b8d84857196080dc8bf91df2787ed5e3e7d65bf8a8cea050b")
copy(consensusLeader.blockHash[:], hashBytes[:])
@ -60,24 +67,84 @@ func TestProcessMessageValidatorAnnounce(test *testing.T) {
copy(consensusValidator1.blockHash[:], hashBytes[:])
consensusValidator1.processAnnounceMessage(message)
assert.Equal(test, PrepareDone, consensusValidator1.state)
time.Sleep(1 * time.Second)
}
func TestProcessMessageValidatorPrepared(test *testing.T) {
ctrl := gomock.NewController(test)
defer ctrl.Finish()
leader := p2p.Peer{IP: "127.0.0.1", Port: "7782"}
_, leader.PubKey = utils.GenKeyBLS(leader.IP, leader.Port)
validator1 := p2p.Peer{IP: "127.0.0.1", Port: "7784", ValidatorID: 1}
_, validator1.PubKey = utils.GenKeyBLS(validator1.IP, validator1.Port)
validator2 := p2p.Peer{IP: "127.0.0.1", Port: "7786", ValidatorID: 2}
_, validator2.PubKey = utils.GenKeyBLS(validator2.IP, validator2.Port)
validator3 := p2p.Peer{IP: "127.0.0.1", Port: "7788", ValidatorID: 3}
_, validator3.PubKey = utils.GenKeyBLS(validator3.IP, validator3.Port)
m := mock_host.NewMockHost(ctrl)
// Asserts that the first and only call to Bar() is passed 99.
// Anything else will fail.
m.EXPECT().GetSelfPeer().Return(leader)
m.EXPECT().SendMessage(gomock.Any(), gomock.Any()).Times(2)
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
if err != nil {
test.Fatalf("newhost failure: %v", err)
}
consensusLeader := New(host, "0", []p2p.Peer{validator1, validator2, validator3}, leader)
blockBytes, err := hex.DecodeString("f90242f9023da00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a02b418211410ee3e75b32abd925bbeba215172afa509d65c1953d4b4e505a4a2aa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808080a000000000000000000000000000000000000000000000000000000000000000008800000000000000008400000001b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080c0c0")
consensusLeader.block = blockBytes
hashBytes, err := hex.DecodeString("a0b3344bd84d41e59b8d84857196080dc8bf91df2787ed5e3e7d65bf8a8cea050b")
copy(consensusLeader.blockHash[:], hashBytes[:])
announceMsg := consensusLeader.constructAnnounceMessage()
(*consensusLeader.prepareSigs)[consensusLeader.nodeID] = consensusLeader.priKey.SignHash(consensusLeader.blockHash[:])
preparedMsg, _ := consensusLeader.constructPreparedMessage()
if err != nil {
test.Errorf("Failed to unmarshal message payload")
}
consensusValidator1 := New(m, "0", []p2p.Peer{validator1, validator2, validator3}, leader)
consensusValidator1.BlockVerifier = func(block *types.Block) bool {
return true
}
message := consensus_proto.Message{}
err = message.XXX_Unmarshal(announceMsg[1:])
copy(consensusValidator1.blockHash[:], hashBytes[:])
consensusValidator1.processAnnounceMessage(message)
message = consensus_proto.Message{}
err = message.XXX_Unmarshal(preparedMsg[1:])
consensusValidator1.processPreparedMessage(message)
assert.Equal(test, CommitDone, consensusValidator1.state)
time.Sleep(1 * time.Second)
}
func TestProcessMessageValidatorChallenge(test *testing.T) {
func TestProcessMessageValidatorCommitted(test *testing.T) {
ctrl := gomock.NewController(test)
defer ctrl.Finish()
leader := p2p.Peer{IP: "1", Port: "2"}
_, leader.PubKey = utils.GenKey(leader.IP, leader.Port)
leader := p2p.Peer{IP: "127.0.0.1", Port: "7782"}
_, leader.PubKey = utils.GenKeyBLS(leader.IP, leader.Port)
validator1 := p2p.Peer{IP: "3", Port: "4", ValidatorID: 1}
_, validator1.PubKey = utils.GenKey(validator1.IP, validator1.Port)
validator2 := p2p.Peer{IP: "5", Port: "6", ValidatorID: 2}
_, validator2.PubKey = utils.GenKey(validator2.IP, validator2.Port)
validator3 := p2p.Peer{IP: "7", Port: "8", ValidatorID: 3}
_, validator3.PubKey = utils.GenKey(validator3.IP, validator3.Port)
validator1 := p2p.Peer{IP: "127.0.0.1", Port: "7784", ValidatorID: 1}
_, validator1.PubKey = utils.GenKeyBLS(validator1.IP, validator1.Port)
validator2 := p2p.Peer{IP: "127.0.0.1", Port: "7786", ValidatorID: 2}
_, validator2.PubKey = utils.GenKeyBLS(validator2.IP, validator2.Port)
validator3 := p2p.Peer{IP: "127.0.0.1", Port: "7788", ValidatorID: 3}
_, validator3.PubKey = utils.GenKeyBLS(validator3.IP, validator3.Port)
m := mock_host.NewMockHost(ctrl)
// Asserts that the first and only call to Bar() is passed 99.
@ -85,15 +152,27 @@ func TestProcessMessageValidatorChallenge(test *testing.T) {
m.EXPECT().GetSelfPeer().Return(leader)
m.EXPECT().SendMessage(gomock.Any(), gomock.Any()).Times(2)
consensusLeader := New(p2pimpl.NewHost(leader), "0", []p2p.Peer{validator1, validator2, validator3}, leader)
blockBytes, err := hex.DecodeString("f90461f90222a0f7007987c6f26b20cbd6384e3587445eca556beb6716f8eb6a2f590ce8ed3925940000000000000000000000000000000000000000a0f4f2f8416b65c98890630b105f016370abaab236c92faf7fc73a13d037958c52a0db025c6f785698feb447b509908fe488486062e4607afaae85c3336692445b01a03688be0d6b3d0651911204b4539e11096045cacbb676401e2655653823014c8cb90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008001850254a0e6f88303295d845c2e4f0e80a00000000000000000000000000000000000000000000000000000000000000000880000000000000000840000000080b842000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f90238f9023580808083081650808b069e10de76676d08000000b901db6080604052678ac7230489e8000060015560028054600160a060020a031916331790556101aa806100316000396000f3fe608060405260043610610045577c0100000000000000000000000000000000000000000000000000000000600035046327c78c42811461004a5780634ddd108a1461008c575b600080fd5b34801561005657600080fd5b5061008a6004803603602081101561006d57600080fd5b503573ffffffffffffffffffffffffffffffffffffffff166100b3565b005b34801561009857600080fd5b506100a1610179565b60408051918252519081900360200190f35b60025473ffffffffffffffffffffffffffffffffffffffff1633146100d757600080fd5b600154303110156100e757600080fd5b73ffffffffffffffffffffffffffffffffffffffff811660009081526020819052604090205460ff161561011a57600080fd5b73ffffffffffffffffffffffffffffffffffffffff8116600081815260208190526040808220805460ff1916600190811790915554905181156108fc0292818181858888f19350505050158015610175573d6000803e3d6000fd5b5050565b30319056fea165627a7a723058203e799228fee2fa7c5d15e71c04267a0cc2687c5eff3b48b98f21f355e1064ab300291ba0a87b9130f7f127af3a713a270610da48d56dedc9501e624bdfe04871859c88f3a05a94b087c05c6395825c5fc35d5ce96b2e61f0ce5f2d67b28f9b2d1178fa90f0c0")
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2pimpl.NewHost(&leader, priKey)
if err != nil {
test.Fatalf("newhost failure: %v", err)
}
consensusLeader := New(host, "0", []p2p.Peer{validator1, validator2, validator3}, leader)
blockBytes, err := hex.DecodeString("f90242f9023da00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a02b418211410ee3e75b32abd925bbeba215172afa509d65c1953d4b4e505a4a2aa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808080a000000000000000000000000000000000000000000000000000000000000000008800000000000000008400000001b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080c0c0")
consensusLeader.block = blockBytes
hashBytes, err := hex.DecodeString("2e002b2b91a08b6e94d21200103828d9f2ae7cd9eb0c26d2679966699486dee1")
hashBytes, err := hex.DecodeString("a0b3344bd84d41e59b8d84857196080dc8bf91df2787ed5e3e7d65bf8a8cea050b")
copy(consensusLeader.blockHash[:], hashBytes[:])
commitMsg := consensusLeader.constructAnnounceMessage()
challengeMsg, _, _ := consensusLeader.constructChallengeMessage(consensus_proto.MessageType_CHALLENGE)
announceMsg := consensusLeader.constructAnnounceMessage()
(*consensusLeader.prepareSigs)[consensusLeader.nodeID] = consensusLeader.priKey.SignHash(consensusLeader.blockHash[:])
preparedMsg, _ := consensusLeader.constructPreparedMessage()
aggSig := bls_cosi.AggregateSig(consensusLeader.GetPrepareSigsArray())
multiSigAndBitmap := append(aggSig.Serialize(), consensusLeader.prepareBitmap.Bitmap...)
(*consensusLeader.commitSigs)[consensusLeader.nodeID] = consensusLeader.priKey.SignHash(multiSigAndBitmap)
committedMsg, _ := consensusLeader.constructCommittedMessage()
if err != nil {
test.Errorf("Failed to unmarshal message payload")
@ -103,17 +182,22 @@ func TestProcessMessageValidatorChallenge(test *testing.T) {
consensusValidator1.BlockVerifier = func(block *types.Block) bool {
return true
}
consensusValidator1.OnConsensusDone = func(newBlock *types.Block) {}
message := consensus_proto.Message{}
err = message.XXX_Unmarshal(commitMsg[1:])
err = message.XXX_Unmarshal(announceMsg[1:])
copy(consensusValidator1.blockHash[:], hashBytes[:])
consensusValidator1.processAnnounceMessage(message)
message = consensus_proto.Message{}
err = message.XXX_Unmarshal(challengeMsg[1:])
consensusValidator1.processChallengeMessage(message, ResponseDone)
err = message.XXX_Unmarshal(preparedMsg[1:])
consensusValidator1.processPreparedMessage(message)
message = consensus_proto.Message{}
err = message.XXX_Unmarshal(committedMsg[1:])
consensusValidator1.processCommittedMessage(message)
assert.Equal(test, ResponseDone, consensusValidator1.state)
assert.Equal(test, Finished, consensusValidator1.state)
time.Sleep(1 * time.Second)
}

@ -74,7 +74,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
// transition, such as amount of used gas, the receipt roots and the state root
// itself. ValidateState returns a database batch if the validation was a success
// otherwise nil and an error is returned.
func (v *BlockValidator) ValidateState(block, parent *types.Block, statedb *state.StateDB, receipts types.Receipts, usedGas uint64) error {
func (v *BlockValidator) ValidateState(block, parent *types.Block, statedb *state.DB, receipts types.Receipts, usedGas uint64) error {
header := block.Header()
if block.GasUsed() != usedGas {
return fmt.Errorf("invalid gas used (remote: %d local: %d)", block.GasUsed(), usedGas)

@ -31,19 +31,19 @@ import (
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/common/prque"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/core/vm"
hdb "github.com/harmony-one/harmony/internal/db"
"github.com/harmony-one/harmony/internal/trie"
"github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru"
)
var (
@ -93,9 +93,9 @@ type BlockChain struct {
chainConfig *params.ChainConfig // Chain & network configuration
cacheConfig *CacheConfig // Cache configuration for pruning
db hdb.Database // Low level persistent database to store final content in
triegc *prque.Prque // Priority queue mapping block numbers to tries to gc
gcproc time.Duration // Accumulates canonical block processing for trie dumping
db ethdb.Database // Low level persistent database to store final content in
triegc *prque.Prque // Priority queue mapping block numbers to tries to gc
gcproc time.Duration // Accumulates canonical block processing for trie dumping
hc *HeaderChain
rmLogsFeed event.Feed
@ -139,7 +139,7 @@ type BlockChain struct {
// NewBlockChain returns a fully initialised block chain using information
// available in the database. It initialises the default Ethereum Validator and
// Processor.
func NewBlockChain(db hdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
if cacheConfig == nil {
cacheConfig = &CacheConfig{
TrieNodeLimit: 256 * 1024 * 1024,
@ -401,12 +401,12 @@ func (bc *BlockChain) Processor() Processor {
}
// State returns a new mutable state based on the current HEAD block.
func (bc *BlockChain) State() (*state.StateDB, error) {
func (bc *BlockChain) State() (*state.DB, error) {
return bc.StateAt(bc.CurrentBlock().Root())
}
// StateAt returns a new mutable state based on a particular point in time.
func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
func (bc *BlockChain) StateAt(root common.Hash) (*state.DB, error) {
return state.New(root, bc.stateCache)
}
@ -855,7 +855,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
stats.processed++
if batch.ValueSize() >= hdb.IdealBatchSize {
if batch.ValueSize() >= ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
return 0, err
}
@ -913,7 +913,7 @@ func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (e
}
// WriteBlockWithState writes the block and all associated state to the database.
func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.DB) (status WriteStatus, err error) {
bc.wg.Add(1)
defer bc.wg.Done()
@ -961,7 +961,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024
)
if nodes > limit || imgs > 4*1024*1024 {
triedb.Cap(limit - hdb.IdealBatchSize)
triedb.Cap(limit - ethdb.IdealBatchSize)
}
// Find the next state trie we need to commit
header := bc.GetHeaderByNumber(current - triesInMemory)

@ -21,12 +21,12 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/core/vm"
hdb "github.com/harmony-one/harmony/internal/db"
)
// BlockGen creates blocks for testing.
@ -36,7 +36,7 @@ type BlockGen struct {
parent *types.Block
chain []*types.Block
header *types.Header
statedb *state.StateDB
statedb *state.DB
gasPool *GasPool
txs []*types.Transaction
@ -161,13 +161,13 @@ func (b *BlockGen) PrevBlock(index int) *types.Block {
// Blocks created by GenerateChain do not contain valid proof of work
// values. Inserting them into BlockChain requires use of FakePow or
// a similar non-validating proof of work implementation.
func GenerateChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db hdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) {
func GenerateChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) {
if config == nil {
config = params.TestChainConfig
}
blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n)
chainreader := &fakeChainReader{config: config}
genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) {
genblock := func(i int, parent *types.Block, statedb *state.DB) (*types.Block, types.Receipts) {
b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine}
b.header = makeHeader(chainreader, parent, statedb, b.engine)
@ -216,7 +216,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
return blocks, receipts
}
func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header {
func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.DB, engine consensus.Engine) *types.Header {
var time *big.Int
if parent.Time() == nil {
time = big.NewInt(10)
@ -241,7 +241,7 @@ func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.S
}
// makeHeaderChain creates a deterministic chain of headers rooted at parent.
func makeHeaderChain(parent *types.Header, n int, engine consensus.Engine, db hdb.Database, seed int) []*types.Header {
func makeHeaderChain(parent *types.Header, n int, engine consensus.Engine, db ethdb.Database, seed int) []*types.Header {
blocks := makeBlockChain(types.NewBlockWithHeader(parent), n, engine, db, seed)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
@ -251,7 +251,7 @@ func makeHeaderChain(parent *types.Header, n int, engine consensus.Engine, db hd
}
// makeBlockChain creates a deterministic chain of blocks rooted at parent.
func makeBlockChain(parent *types.Block, n int, engine consensus.Engine, db hdb.Database, seed int) []*types.Block {
func makeBlockChain(parent *types.Block, n int, engine consensus.Engine, db ethdb.Database, seed int) []*types.Block {
blocks, _ := GenerateChain(params.TestChainConfig, parent, engine, db, n, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
})

@ -23,16 +23,18 @@ import (
"errors"
"fmt"
"math/big"
"os"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types"
hdb "github.com/harmony-one/harmony/internal/db"
"github.com/harmony-one/harmony/internal/utils"
)
//go:generate gencodec -type Genesis -field-override genesisSpecMarshaling -out gen_genesis.go
@ -149,7 +151,7 @@ func (e *GenesisMismatchError) Error() string {
// error is a *params.ConfigCompatError and the new, unwritten config is returned.
//
// The returned chain configuration is never nil.
func SetupGenesisBlock(db hdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
}
@ -219,9 +221,10 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
// ToBlock creates the genesis block and writes state of a genesis specification
// to the given database (or discards it if nil).
func (g *Genesis) ToBlock(db hdb.Database) *types.Block {
func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
if db == nil {
db = hdb.NewMemDatabase()
utils.GetLogInstance().Error("db should be initialized")
os.Exit(1)
}
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
for addr, account := range g.Alloc {
@ -261,7 +264,7 @@ func (g *Genesis) ToBlock(db hdb.Database) *types.Block {
// Commit writes the block and state of a genesis specification to the database.
// The block is committed as the canonical head block.
func (g *Genesis) Commit(db hdb.Database) (*types.Block, error) {
func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
block := g.ToBlock(db)
if block.Number().Sign() != 0 {
return nil, fmt.Errorf("can't commit genesis block with number > 0")
@ -283,7 +286,7 @@ func (g *Genesis) Commit(db hdb.Database) (*types.Block, error) {
// MustCommit writes the genesis block and state to db, panicking on error.
// The block is committed as the canonical head block.
func (g *Genesis) MustCommit(db hdb.Database) *types.Block {
func (g *Genesis) MustCommit(db ethdb.Database) *types.Block {
block, err := g.Commit(db)
if err != nil {
panic(err)
@ -292,7 +295,7 @@ func (g *Genesis) MustCommit(db hdb.Database) *types.Block {
}
// GenesisBlockForTesting creates and writes a block in which addr has the given wei balance.
func GenesisBlockForTesting(db hdb.Database, addr common.Address, balance *big.Int) *types.Block {
func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance *big.Int) *types.Block {
g := Genesis{Alloc: GenesisAlloc{addr: {Balance: balance}}}
return g.MustCommit(db)
}

@ -27,13 +27,13 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/db"
"github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru"
)
const (
@ -50,7 +50,7 @@ const (
type HeaderChain struct {
config *params.ChainConfig
chainDb db.Database
chainDb ethdb.Database
genesisHeader *types.Header
currentHeader atomic.Value // Current head of the header chain (may be above the block chain!)
@ -70,7 +70,7 @@ type HeaderChain struct {
// getValidator should return the parent's validator
// procInterrupt points to the parent's interrupt semaphore
// wg points to the parent's shutdown wait group
func NewHeaderChain(chainDb db.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) {
func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) {
headerCache, _ := lru.New(headerCacheLimit)
tdCache, _ := lru.New(tdCacheLimit)
numberCache, _ := lru.New(numberCacheLimit)

@ -21,8 +21,8 @@ import (
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/harmony-one/harmony/internal/db"
"github.com/harmony-one/harmony/internal/trie"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
lru "github.com/hashicorp/golang-lru"
)
@ -68,14 +68,14 @@ type Trie interface {
Hash() common.Hash
NodeIterator(startKey []byte) trie.NodeIterator
GetKey([]byte) []byte // TODO(fjl): remove this when SecureTrie is removed
Prove(key []byte, fromLevel uint, proofDb db.Putter) error
Prove(key []byte, fromLevel uint, proofDb ethdb.Putter) error
}
// NewDatabase creates a backing store for state. The returned database is safe for
// concurrent use and retains cached trie nodes in memory. The pool is an optional
// intermediate trie-node memory pool between the low level storage layer and the
// high level trie abstraction.
func NewDatabase(db db.Database) Database {
func NewDatabase(db ethdb.Database) Database {
csc, _ := lru.New(codeSizeCacheSize)
return &cachingDB{
db: trie.NewDatabase(db),
@ -173,6 +173,6 @@ func (m cachedTrie) Commit(onleaf trie.LeafCallback) (common.Hash, error) {
return root, err
}
func (m cachedTrie) Prove(key []byte, fromLevel uint, proofDb db.Putter) error {
func (m cachedTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.Putter) error {
return m.SecureTrie.Prove(key, fromLevel, proofDb)
}

@ -41,8 +41,8 @@ type Dump struct {
Accounts map[string]DumpAccount `json:"accounts"`
}
// RawDump returns Dump from given StateDB.
func (stateDB *StateDB) RawDump() Dump {
// RawDump returns Dump from given DB.
func (stateDB *DB) RawDump() Dump {
dump := Dump{
Root: fmt.Sprintf("%x", stateDB.trie.Hash()),
Accounts: make(map[string]DumpAccount),
@ -75,7 +75,7 @@ func (stateDB *StateDB) RawDump() Dump {
}
// Dump dumps into []byte.
func (stateDB *StateDB) Dump() []byte {
func (stateDB *DB) Dump() []byte {
json, err := json.MarshalIndent(stateDB.RawDump(), "", " ")
if err != nil {
fmt.Println("dump err", err)

@ -26,7 +26,7 @@ import (
// reverted on demand.
type journalEntry interface {
// revert undoes the changes introduced by this journal entry.
revert(*StateDB)
revert(*DB)
// dirtied returns the Ethereum address modified by this journal entry.
dirtied() *common.Address
@ -57,7 +57,7 @@ func (j *journal) append(entry journalEntry) {
// revert undoes a batch of journalled modifications along with any reverted
// dirty handling too.
func (j *journal) revert(statedb *StateDB, snapshot int) {
func (j *journal) revert(statedb *DB, snapshot int) {
for i := len(j.entries) - 1; i >= snapshot; i-- {
// Undo the changes made by the operation
j.entries[i].revert(statedb)
@ -90,7 +90,7 @@ type (
account *common.Address
}
resetObjectChange struct {
prev *stateObject
prev *Object
}
suicideChange struct {
account *common.Address
@ -133,7 +133,7 @@ type (
}
)
func (ch createObjectChange) revert(s *StateDB) {
func (ch createObjectChange) revert(s *DB) {
delete(s.stateObjects, *ch.account)
delete(s.stateObjectsDirty, *ch.account)
}
@ -142,7 +142,7 @@ func (ch createObjectChange) dirtied() *common.Address {
return ch.account
}
func (ch resetObjectChange) revert(s *StateDB) {
func (ch resetObjectChange) revert(s *DB) {
s.setStateObject(ch.prev)
}
@ -150,7 +150,7 @@ func (ch resetObjectChange) dirtied() *common.Address {
return nil
}
func (ch suicideChange) revert(s *StateDB) {
func (ch suicideChange) revert(s *DB) {
obj := s.getStateObject(*ch.account)
if obj != nil {
obj.suicided = ch.prev
@ -164,14 +164,14 @@ func (ch suicideChange) dirtied() *common.Address {
var ripemd = common.HexToAddress("0000000000000000000000000000000000000003")
func (ch touchChange) revert(s *StateDB) {
func (ch touchChange) revert(s *DB) {
}
func (ch touchChange) dirtied() *common.Address {
return ch.account
}
func (ch balanceChange) revert(s *StateDB) {
func (ch balanceChange) revert(s *DB) {
s.getStateObject(*ch.account).setBalance(ch.prev)
}
@ -179,7 +179,7 @@ func (ch balanceChange) dirtied() *common.Address {
return ch.account
}
func (ch nonceChange) revert(s *StateDB) {
func (ch nonceChange) revert(s *DB) {
s.getStateObject(*ch.account).setNonce(ch.prev)
}
@ -187,7 +187,7 @@ func (ch nonceChange) dirtied() *common.Address {
return ch.account
}
func (ch codeChange) revert(s *StateDB) {
func (ch codeChange) revert(s *DB) {
s.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode)
}
@ -195,7 +195,7 @@ func (ch codeChange) dirtied() *common.Address {
return ch.account
}
func (ch storageChange) revert(s *StateDB) {
func (ch storageChange) revert(s *DB) {
s.getStateObject(*ch.account).setState(ch.key, ch.prevalue)
}
@ -203,7 +203,7 @@ func (ch storageChange) dirtied() *common.Address {
return ch.account
}
func (ch refundChange) revert(s *StateDB) {
func (ch refundChange) revert(s *DB) {
s.refund = ch.prev
}
@ -211,7 +211,7 @@ func (ch refundChange) dirtied() *common.Address {
return nil
}
func (ch addLogChange) revert(s *StateDB) {
func (ch addLogChange) revert(s *DB) {
logs := s.logs[ch.txhash]
if len(logs) == 1 {
delete(s.logs, ch.txhash)
@ -225,7 +225,7 @@ func (ch addLogChange) dirtied() *common.Address {
return nil
}
func (ch addPreimageChange) revert(s *StateDB) {
func (ch addPreimageChange) revert(s *DB) {
delete(s.preimages, ch.hash)
}

@ -23,14 +23,14 @@ import (
)
type account struct {
stateObject *stateObject
stateObject *Object
nstart uint64
nonces []bool
}
// ManagedState is the managed state.
type ManagedState struct {
*StateDB
*DB
mu sync.RWMutex
@ -38,18 +38,18 @@ type ManagedState struct {
}
// ManageState returns a new managed state with the statedb as it's backing layer
func ManageState(statedb *StateDB) *ManagedState {
func ManageState(statedb *DB) *ManagedState {
return &ManagedState{
StateDB: statedb.Copy(),
DB: statedb.Copy(),
accounts: make(map[common.Address]*account),
}
}
// SetState sets the backing layer of the managed state
func (ms *ManagedState) SetState(statedb *StateDB) {
func (ms *ManagedState) SetState(statedb *DB) {
ms.mu.Lock()
defer ms.mu.Unlock()
ms.StateDB = statedb
ms.DB = statedb
}
// RemoveNonce removed the nonce from the managed state and all future pending nonces
@ -94,7 +94,7 @@ func (ms *ManagedState) GetNonce(addr common.Address) uint64 {
account := ms.getAccount(addr)
return uint64(len(account.nonces)) + account.nstart
}
return ms.StateDB.GetNonce(addr)
return ms.DB.GetNonce(addr)
}
// SetNonce sets the new canonical nonce for the managed state
@ -128,7 +128,7 @@ func (ms *ManagedState) getAccount(addr common.Address) *account {
} else {
// Always make sure the state account nonce isn't actually higher
// than the tracked one.
so := ms.StateDB.getStateObject(addr)
so := ms.DB.getStateObject(addr)
if so != nil && uint64(len(account.nonces))+account.nstart < so.Nonce() {
ms.accounts[addr] = newAccount(so)
}
@ -138,6 +138,6 @@ func (ms *ManagedState) getAccount(addr common.Address) *account {
return ms.accounts[addr]
}
func newAccount(so *stateObject) *account {
func newAccount(so *Object) *account {
return &account{so, so.Nonce(), nil}
}

@ -20,16 +20,16 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/harmony-one/harmony/internal/db"
"github.com/ethereum/go-ethereum/ethdb"
)
var addr = common.BytesToAddress([]byte("test"))
func create() (*ManagedState, *account) {
statedb, _ := New(common.Hash{}, NewDatabase(db.NewMemDatabase()))
statedb, _ := New(common.Hash{}, NewDatabase(ethdb.NewMemDatabase()))
ms := ManageState(statedb)
ms.StateDB.SetNonce(addr, 100)
ms.accounts[addr] = newAccount(ms.StateDB.getStateObject(addr))
ms.DB.SetNonce(addr, 100)
ms.accounts[addr] = newAccount(ms.DB.getStateObject(addr))
return ms, ms.accounts[addr]
}
@ -89,7 +89,7 @@ func TestRemoteNonceChange(t *testing.T) {
account.nonces = append(account.nonces, nn...)
ms.NewNonce(addr)
ms.StateDB.stateObjects[addr].data.Nonce = 200
ms.DB.stateObjects[addr].data.Nonce = 200
nonce := ms.NewNonce(addr)
if nonce != 200 {
t.Error("expected nonce after remote update to be", 200, "got", nonce)
@ -97,7 +97,7 @@ func TestRemoteNonceChange(t *testing.T) {
ms.NewNonce(addr)
ms.NewNonce(addr)
ms.NewNonce(addr)
ms.StateDB.stateObjects[addr].data.Nonce = 200
ms.DB.stateObjects[addr].data.Nonce = 200
nonce = ms.NewNonce(addr)
if nonce != 204 {
t.Error("expected nonce after remote update to be", 204, "got", nonce)
@ -115,7 +115,7 @@ func TestSetNonce(t *testing.T) {
}
addr[0] = 1
ms.StateDB.SetNonce(addr, 1)
ms.DB.SetNonce(addr, 1)
if ms.GetNonce(addr) != 1 {
t.Error("Expected nonce of 1, got", ms.GetNonce(addr))

@ -56,23 +56,23 @@ func (storage Storage) Copy() Storage {
return cpy
}
// stateObject represents an Ethereum account which is being modified.
// Object represents an Ethereum account which is being modified.
//
// The usage pattern is as follows:
// First you need to obtain a state object.
// Account values can be accessed and modified through the object.
// Finally, call CommitTrie to write the modified storage trie into a database.
type stateObject struct {
type Object struct {
address common.Address
addrHash common.Hash // hash of ethereum address of the account
data Account
db *StateDB
db *DB
// DB error.
// State objects are used by the consensus core and VM which are
// unable to deal with database-level errors. Any error that occurs
// during a database read is memoized here and will eventually be returned
// by StateDB.Commit.
// by DB.Commit.
dbErr error
// Write caches.
@ -91,7 +91,7 @@ type stateObject struct {
}
// empty returns whether the account is considered empty.
func (s *stateObject) empty() bool {
func (s *Object) empty() bool {
return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash)
}
@ -105,14 +105,14 @@ type Account struct {
}
// newObject creates a state object.
func newObject(db *StateDB, address common.Address, data Account) *stateObject {
func newObject(db *DB, address common.Address, data Account) *Object {
if data.Balance == nil {
data.Balance = new(big.Int)
}
if data.CodeHash == nil {
data.CodeHash = emptyCodeHash
}
return &stateObject{
return &Object{
db: db,
address: address,
addrHash: crypto.Keccak256Hash(address[:]),
@ -123,22 +123,22 @@ func newObject(db *StateDB, address common.Address, data Account) *stateObject {
}
// EncodeRLP implements rlp.Encoder.
func (s *stateObject) EncodeRLP(w io.Writer) error {
func (s *Object) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, s.data)
}
// setError remembers the first non-nil error it is called with.
func (s *stateObject) setError(err error) {
func (s *Object) setError(err error) {
if s.dbErr == nil {
s.dbErr = err
}
}
func (s *stateObject) markSuicided() {
func (s *Object) markSuicided() {
s.suicided = true
}
func (s *stateObject) touch() {
func (s *Object) touch() {
s.db.journal.append(touchChange{
account: &s.address,
})
@ -149,7 +149,7 @@ func (s *stateObject) touch() {
}
}
func (s *stateObject) getTrie(db Database) Trie {
func (s *Object) getTrie(db Database) Trie {
if s.trie == nil {
var err error
s.trie, err = db.OpenStorageTrie(s.addrHash, s.data.Root)
@ -162,7 +162,7 @@ func (s *stateObject) getTrie(db Database) Trie {
}
// GetState retrieves a value from the account storage trie.
func (s *stateObject) GetState(db Database, key common.Hash) common.Hash {
func (s *Object) GetState(db Database, key common.Hash) common.Hash {
// If we have a dirty value for this state entry, return it
value, dirty := s.dirtyStorage[key]
if dirty {
@ -173,7 +173,7 @@ func (s *stateObject) GetState(db Database, key common.Hash) common.Hash {
}
// GetCommittedState retrieves a value from the committed account storage trie.
func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Hash {
func (s *Object) GetCommittedState(db Database, key common.Hash) common.Hash {
// If we have the original value cached, return that
value, cached := s.originStorage[key]
if cached {
@ -197,7 +197,7 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
}
// SetState updates a value in account storage.
func (s *stateObject) SetState(db Database, key, value common.Hash) {
func (s *Object) SetState(db Database, key, value common.Hash) {
// If the new value is the same as old, don't set
prev := s.GetState(db, key)
if prev == value {
@ -212,12 +212,12 @@ func (s *stateObject) SetState(db Database, key, value common.Hash) {
s.setState(key, value)
}
func (s *stateObject) setState(key, value common.Hash) {
func (s *Object) setState(key, value common.Hash) {
s.dirtyStorage[key] = value
}
// updateTrie writes cached storage modifications into the object's storage trie.
func (s *stateObject) updateTrie(db Database) Trie {
func (s *Object) updateTrie(db Database) Trie {
tr := s.getTrie(db)
for key, value := range s.dirtyStorage {
delete(s.dirtyStorage, key)
@ -240,14 +240,14 @@ func (s *stateObject) updateTrie(db Database) Trie {
}
// UpdateRoot sets the trie root to the current root hash of
func (s *stateObject) updateRoot(db Database) {
func (s *Object) updateRoot(db Database) {
s.updateTrie(db)
s.data.Root = s.trie.Hash()
}
// CommitTrie the storage trie of the object to db.
// This updates the trie root.
func (s *stateObject) CommitTrie(db Database) error {
func (s *Object) CommitTrie(db Database) error {
s.updateTrie(db)
if s.dbErr != nil {
return s.dbErr
@ -261,7 +261,7 @@ func (s *stateObject) CommitTrie(db Database) error {
// AddBalance removes amount from c's balance.
// It is used to add funds to the destination account of a transfer.
func (s *stateObject) AddBalance(amount *big.Int) {
func (s *Object) AddBalance(amount *big.Int) {
// EIP158: We must check emptiness for the objects such that the account
// clearing (0,0,0 objects) can take effect.
if amount.Sign() == 0 {
@ -276,14 +276,15 @@ func (s *stateObject) AddBalance(amount *big.Int) {
// SubBalance removes amount from c's balance.
// It is used to remove funds from the origin account of a transfer.
func (s *stateObject) SubBalance(amount *big.Int) {
func (s *Object) SubBalance(amount *big.Int) {
if amount.Sign() == 0 {
return
}
s.SetBalance(new(big.Int).Sub(s.Balance(), amount))
}
func (s *stateObject) SetBalance(amount *big.Int) {
// SetBalance sets the account balance to the given amount.
func (s *Object) SetBalance(amount *big.Int) {
s.db.journal.append(balanceChange{
account: &s.address,
prev: new(big.Int).Set(s.data.Balance),
@ -291,14 +292,15 @@ func (s *stateObject) SetBalance(amount *big.Int) {
s.setBalance(amount)
}
func (s *stateObject) setBalance(amount *big.Int) {
func (s *Object) setBalance(amount *big.Int) {
s.data.Balance = amount
}
// Return the gas back to the origin. Used by the Virtual machine or Closures
func (s *stateObject) ReturnGas(gas *big.Int) {}
// ReturnGas returns the gas back to the origin.
// Used by the Virtual machine or Closures
func (s *Object) ReturnGas(gas *big.Int) {}
func (s *stateObject) deepCopy(db *StateDB) *stateObject {
func (s *Object) deepCopy(db *DB) *Object {
stateObject := newObject(db, s.address, s.data)
if s.trie != nil {
stateObject.trie = db.db.CopyTrie(s.trie)
@ -316,13 +318,13 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject {
// Attribute accessors
//
// Returns the address of the contract/account
func (s *stateObject) Address() common.Address {
// Address returns the address of the contract/account.
func (s *Object) Address() common.Address {
return s.address
}
// Code returns the contract code associated with this object, if any.
func (s *stateObject) Code(db Database) []byte {
func (s *Object) Code(db Database) []byte {
if s.code != nil {
return s.code
}
@ -337,7 +339,8 @@ func (s *stateObject) Code(db Database) []byte {
return code
}
func (s *stateObject) SetCode(codeHash common.Hash, code []byte) {
// SetCode sets the object's contract code to the given code.
func (s *Object) SetCode(codeHash common.Hash, code []byte) {
prevcode := s.Code(s.db.db)
s.db.journal.append(codeChange{
account: &s.address,
@ -347,13 +350,14 @@ func (s *stateObject) SetCode(codeHash common.Hash, code []byte) {
s.setCode(codeHash, code)
}
func (s *stateObject) setCode(codeHash common.Hash, code []byte) {
func (s *Object) setCode(codeHash common.Hash, code []byte) {
s.code = code
s.data.CodeHash = codeHash[:]
s.dirtyCode = true
}
func (s *stateObject) SetNonce(nonce uint64) {
// SetNonce sets the account's nonce to the given nonce value.
func (s *Object) SetNonce(nonce uint64) {
s.db.journal.append(nonceChange{
account: &s.address,
prev: s.data.Nonce,
@ -361,25 +365,28 @@ func (s *stateObject) SetNonce(nonce uint64) {
s.setNonce(nonce)
}
func (s *stateObject) setNonce(nonce uint64) {
func (s *Object) setNonce(nonce uint64) {
s.data.Nonce = nonce
}
func (s *stateObject) CodeHash() []byte {
// CodeHash returns the hash of the account's contract code.
func (s *Object) CodeHash() []byte {
return s.data.CodeHash
}
func (s *stateObject) Balance() *big.Int {
// Balance returns the account balance.
func (s *Object) Balance() *big.Int {
return s.data.Balance
}
func (s *stateObject) Nonce() uint64 {
// Nonce returns the account nonce.
func (s *Object) Nonce() uint64 {
return s.data.Nonce
}
// Never called, but must be present to allow stateObject to be used
// Value is never called, but must be present to allow Object to be used
// as a vm.Account interface that also satisfies the vm.ContractRef
// interface. Interfaces are awesome.
func (s *stateObject) Value() *big.Int {
panic("Value on stateObject should never be called")
func (s *Object) Value() *big.Int {
panic("Value on Object should never be called")
}

@ -23,13 +23,13 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/harmony-one/harmony/internal/db"
"github.com/ethereum/go-ethereum/ethdb"
checker "gopkg.in/check.v1"
)
type StateSuite struct {
db *db.MemDatabase
state *StateDB
db *ethdb.MemDatabase
state *DB
}
var _ = checker.Suite(&StateSuite{})
@ -87,7 +87,7 @@ func (s *StateSuite) TestDump(c *checker.C) {
}
func (s *StateSuite) SetUpTest(c *checker.C) {
s.db = db.NewMemDatabase()
s.db = ethdb.NewMemDatabase()
s.state, _ = New(common.Hash{}, NewDatabase(s.db))
}
@ -141,7 +141,7 @@ func (s *StateSuite) TestSnapshotEmpty(c *checker.C) {
// use testing instead of checker because checker does not support
// printing/logging in tests (-check.vv does not work)
func TestSnapshot2(t *testing.T) {
state, _ := New(common.Hash{}, NewDatabase(db.NewMemDatabase()))
state, _ := New(common.Hash{}, NewDatabase(ethdb.NewMemDatabase()))
stateobjaddr0 := toAddr([]byte("so0"))
stateobjaddr1 := toAddr([]byte("so1"))
@ -196,7 +196,7 @@ func TestSnapshot2(t *testing.T) {
}
}
func compareStateObjects(so0, so1 *stateObject, t *testing.T) {
func compareStateObjects(so0, so1 *Object, t *testing.T) {
if so0.Address() != so1.Address() {
t.Fatalf("Address mismatch: have %v, want %v", so0.address, so1.address)
}

@ -52,24 +52,24 @@ func (n *proofList) Put(key []byte, value []byte) error {
return nil
}
// StateDB within the ethereum protocol are used to store anything
// DB within the ethereum protocol are used to store anything
// within the merkle trie. StateDBs take care of caching and storing
// nested states. It's the general query interface to retrieve:
// * Contracts
// * Accounts
type StateDB struct {
type DB struct {
db Database
trie Trie
// This map holds 'live' objects, which will get modified while processing a state transition.
stateObjects map[common.Address]*stateObject
stateObjects map[common.Address]*Object
stateObjectsDirty map[common.Address]struct{}
// DB error.
// State objects are used by the consensus core and VM which are
// unable to deal with database-level errors. Any error that occurs
// during a database read is memoized here and will eventually be returned
// by StateDB.Commit.
// by DB.Commit.
dbErr error
// The refund counter, also used by state transitioning.
@ -92,15 +92,15 @@ type StateDB struct {
}
// New creates a new state from a given trie.
func New(root common.Hash, db Database) (*StateDB, error) {
func New(root common.Hash, db Database) (*DB, error) {
tr, err := db.OpenTrie(root)
if err != nil {
return nil, err
}
return &StateDB{
return &DB{
db: db,
trie: tr,
stateObjects: make(map[common.Address]*stateObject),
stateObjects: make(map[common.Address]*Object),
stateObjectsDirty: make(map[common.Address]struct{}),
logs: make(map[common.Hash][]*types.Log),
preimages: make(map[common.Hash][]byte),
@ -109,25 +109,25 @@ func New(root common.Hash, db Database) (*StateDB, error) {
}
// setError remembers the first non-nil error it is called with.
func (stateDB *StateDB) setError(err error) {
func (stateDB *DB) setError(err error) {
if stateDB.dbErr == nil {
stateDB.dbErr = err
}
}
func (stateDB *StateDB) Error() error {
func (stateDB *DB) Error() error {
return stateDB.dbErr
}
// Reset clears out all ephemeral state objects from the state db, but keeps
// the underlying state trie to avoid reloading data for the next operations.
func (stateDB *StateDB) Reset(root common.Hash) error {
func (stateDB *DB) Reset(root common.Hash) error {
tr, err := stateDB.db.OpenTrie(root)
if err != nil {
return err
}
stateDB.trie = tr
stateDB.stateObjects = make(map[common.Address]*stateObject)
stateDB.stateObjects = make(map[common.Address]*Object)
stateDB.stateObjectsDirty = make(map[common.Address]struct{})
stateDB.thash = common.Hash{}
stateDB.bhash = common.Hash{}
@ -140,7 +140,7 @@ func (stateDB *StateDB) Reset(root common.Hash) error {
}
// AddLog adds logs into stateDB
func (stateDB *StateDB) AddLog(log *types.Log) {
func (stateDB *DB) AddLog(log *types.Log) {
stateDB.journal.append(addLogChange{txhash: stateDB.thash})
log.TxHash = stateDB.thash
@ -152,12 +152,12 @@ func (stateDB *StateDB) AddLog(log *types.Log) {
}
// GetLogs gets logs from stateDB given a hash
func (stateDB *StateDB) GetLogs(hash common.Hash) []*types.Log {
func (stateDB *DB) GetLogs(hash common.Hash) []*types.Log {
return stateDB.logs[hash]
}
// Logs returns a list of Log.
func (stateDB *StateDB) Logs() []*types.Log {
func (stateDB *DB) Logs() []*types.Log {
var logs []*types.Log
for _, lgs := range stateDB.logs {
logs = append(logs, lgs...)
@ -166,7 +166,7 @@ func (stateDB *StateDB) Logs() []*types.Log {
}
// AddPreimage records a SHA3 preimage seen by the VM.
func (stateDB *StateDB) AddPreimage(hash common.Hash, preimage []byte) {
func (stateDB *DB) AddPreimage(hash common.Hash, preimage []byte) {
if _, ok := stateDB.preimages[hash]; !ok {
stateDB.journal.append(addPreimageChange{hash: hash})
pi := make([]byte, len(preimage))
@ -176,19 +176,19 @@ func (stateDB *StateDB) AddPreimage(hash common.Hash, preimage []byte) {
}
// Preimages returns a list of SHA3 preimages that have been submitted.
func (stateDB *StateDB) Preimages() map[common.Hash][]byte {
func (stateDB *DB) Preimages() map[common.Hash][]byte {
return stateDB.preimages
}
// AddRefund adds gas to the refund counter
func (stateDB *StateDB) AddRefund(gas uint64) {
func (stateDB *DB) AddRefund(gas uint64) {
stateDB.journal.append(refundChange{prev: stateDB.refund})
stateDB.refund += gas
}
// SubRefund removes gas from the refund counter.
// This method will panic if the refund counter goes below zero
func (stateDB *StateDB) SubRefund(gas uint64) {
func (stateDB *DB) SubRefund(gas uint64) {
stateDB.journal.append(refundChange{prev: stateDB.refund})
if gas > stateDB.refund {
panic("Refund counter below zero")
@ -198,19 +198,19 @@ func (stateDB *StateDB) SubRefund(gas uint64) {
// Exist reports whether the given account address exists in the state.
// Notably this also returns true for suicided accounts.
func (stateDB *StateDB) Exist(addr common.Address) bool {
func (stateDB *DB) Exist(addr common.Address) bool {
return stateDB.getStateObject(addr) != nil
}
// Empty returns whether the state object is either non-existent
// or empty according to the EIP161 specification (balance = nonce = code = 0)
func (stateDB *StateDB) Empty(addr common.Address) bool {
func (stateDB *DB) Empty(addr common.Address) bool {
so := stateDB.getStateObject(addr)
return so == nil || so.empty()
}
// GetBalance retrieves the balance from the given address or 0 if object not found
func (stateDB *StateDB) GetBalance(addr common.Address) *big.Int {
func (stateDB *DB) GetBalance(addr common.Address) *big.Int {
stateObject := stateDB.getStateObject(addr)
if stateObject != nil {
return stateObject.Balance()
@ -219,7 +219,7 @@ func (stateDB *StateDB) GetBalance(addr common.Address) *big.Int {
}
// GetNonce returns the nonce of the given address.
func (stateDB *StateDB) GetNonce(addr common.Address) uint64 {
func (stateDB *DB) GetNonce(addr common.Address) uint64 {
stateObject := stateDB.getStateObject(addr)
if stateObject != nil {
return stateObject.Nonce()
@ -229,7 +229,7 @@ func (stateDB *StateDB) GetNonce(addr common.Address) uint64 {
}
// GetCode returns code of a given address.
func (stateDB *StateDB) GetCode(addr common.Address) []byte {
func (stateDB *DB) GetCode(addr common.Address) []byte {
stateObject := stateDB.getStateObject(addr)
if stateObject != nil {
return stateObject.Code(stateDB.db)
@ -238,7 +238,7 @@ func (stateDB *StateDB) GetCode(addr common.Address) []byte {
}
// GetCodeSize returns code size of a given address in stateDB.
func (stateDB *StateDB) GetCodeSize(addr common.Address) int {
func (stateDB *DB) GetCodeSize(addr common.Address) int {
stateObject := stateDB.getStateObject(addr)
if stateObject == nil {
return 0
@ -254,7 +254,7 @@ func (stateDB *StateDB) GetCodeSize(addr common.Address) int {
}
// GetCodeHash returns code hash of a given address.
func (stateDB *StateDB) GetCodeHash(addr common.Address) common.Hash {
func (stateDB *DB) GetCodeHash(addr common.Address) common.Hash {
stateObject := stateDB.getStateObject(addr)
if stateObject == nil {
return common.Hash{}
@ -263,7 +263,7 @@ func (stateDB *StateDB) GetCodeHash(addr common.Address) common.Hash {
}
// GetState retrieves a value from the given account's storage trie.
func (stateDB *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
func (stateDB *DB) GetState(addr common.Address, hash common.Hash) common.Hash {
stateObject := stateDB.getStateObject(addr)
if stateObject != nil {
return stateObject.GetState(stateDB.db, hash)
@ -272,14 +272,14 @@ func (stateDB *StateDB) GetState(addr common.Address, hash common.Hash) common.H
}
// GetProof returns the MerkleProof for a given Account
func (stateDB *StateDB) GetProof(a common.Address) ([][]byte, error) {
func (stateDB *DB) GetProof(a common.Address) ([][]byte, error) {
var proof proofList
err := stateDB.trie.Prove(crypto.Keccak256(a.Bytes()), 0, &proof)
return [][]byte(proof), err
}
// GetStorageProof returns the StorageProof for given key
func (stateDB *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) {
func (stateDB *DB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) {
var proof proofList
trie := stateDB.StorageTrie(a)
if trie == nil {
@ -290,7 +290,7 @@ func (stateDB *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]
}
// GetCommittedState retrieves a value from the given account's committed storage trie.
func (stateDB *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
func (stateDB *DB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
stateObject := stateDB.getStateObject(addr)
if stateObject != nil {
return stateObject.GetCommittedState(stateDB.db, hash)
@ -299,13 +299,13 @@ func (stateDB *StateDB) GetCommittedState(addr common.Address, hash common.Hash)
}
// Database retrieves the low level database supporting the lower level trie ops.
func (stateDB *StateDB) Database() Database {
func (stateDB *DB) Database() Database {
return stateDB.db
}
// StorageTrie returns the storage trie of an account.
// The return value is a copy and is nil for non-existent accounts.
func (stateDB *StateDB) StorageTrie(addr common.Address) Trie {
func (stateDB *DB) StorageTrie(addr common.Address) Trie {
stateObject := stateDB.getStateObject(addr)
if stateObject == nil {
return nil
@ -315,7 +315,7 @@ func (stateDB *StateDB) StorageTrie(addr common.Address) Trie {
}
// HasSuicided checks if the state object of the given addr is suicided.
func (stateDB *StateDB) HasSuicided(addr common.Address) bool {
func (stateDB *DB) HasSuicided(addr common.Address) bool {
stateObject := stateDB.getStateObject(addr)
if stateObject != nil {
return stateObject.suicided
@ -328,7 +328,7 @@ func (stateDB *StateDB) HasSuicided(addr common.Address) bool {
*/
// AddBalance adds amount to the account associated with addr.
func (stateDB *StateDB) AddBalance(addr common.Address, amount *big.Int) {
func (stateDB *DB) AddBalance(addr common.Address, amount *big.Int) {
stateObject := stateDB.GetOrNewStateObject(addr)
if stateObject != nil {
stateObject.AddBalance(amount)
@ -336,7 +336,7 @@ func (stateDB *StateDB) AddBalance(addr common.Address, amount *big.Int) {
}
// SubBalance subtracts amount from the account associated with addr.
func (stateDB *StateDB) SubBalance(addr common.Address, amount *big.Int) {
func (stateDB *DB) SubBalance(addr common.Address, amount *big.Int) {
stateObject := stateDB.GetOrNewStateObject(addr)
if stateObject != nil {
stateObject.SubBalance(amount)
@ -344,7 +344,7 @@ func (stateDB *StateDB) SubBalance(addr common.Address, amount *big.Int) {
}
// SetBalance sets balance of an address.
func (stateDB *StateDB) SetBalance(addr common.Address, amount *big.Int) {
func (stateDB *DB) SetBalance(addr common.Address, amount *big.Int) {
stateObject := stateDB.GetOrNewStateObject(addr)
if stateObject != nil {
stateObject.SetBalance(amount)
@ -352,7 +352,7 @@ func (stateDB *StateDB) SetBalance(addr common.Address, amount *big.Int) {
}
// SetNonce sets nonce of a given address.
func (stateDB *StateDB) SetNonce(addr common.Address, nonce uint64) {
func (stateDB *DB) SetNonce(addr common.Address, nonce uint64) {
stateObject := stateDB.GetOrNewStateObject(addr)
if stateObject != nil {
stateObject.SetNonce(nonce)
@ -360,7 +360,7 @@ func (stateDB *StateDB) SetNonce(addr common.Address, nonce uint64) {
}
// SetCode sets code of a given address.
func (stateDB *StateDB) SetCode(addr common.Address, code []byte) {
func (stateDB *DB) SetCode(addr common.Address, code []byte) {
stateObject := stateDB.GetOrNewStateObject(addr)
if stateObject != nil {
stateObject.SetCode(crypto.Keccak256Hash(code), code)
@ -368,7 +368,7 @@ func (stateDB *StateDB) SetCode(addr common.Address, code []byte) {
}
// SetState sets hash value of a given address.
func (stateDB *StateDB) SetState(addr common.Address, key, value common.Hash) {
func (stateDB *DB) SetState(addr common.Address, key, value common.Hash) {
stateObject := stateDB.GetOrNewStateObject(addr)
if stateObject != nil {
stateObject.SetState(stateDB.db, key, value)
@ -380,7 +380,7 @@ func (stateDB *StateDB) SetState(addr common.Address, key, value common.Hash) {
//
// The account's state object is still available until the state is committed,
// getStateObject will return a non-nil account after Suicide.
func (stateDB *StateDB) Suicide(addr common.Address) bool {
func (stateDB *DB) Suicide(addr common.Address) bool {
stateObject := stateDB.getStateObject(addr)
if stateObject == nil {
return false
@ -401,7 +401,7 @@ func (stateDB *StateDB) Suicide(addr common.Address) bool {
//
// updateStateObject writes the given object to the trie.
func (stateDB *StateDB) updateStateObject(stateObject *stateObject) {
func (stateDB *DB) updateStateObject(stateObject *Object) {
addr := stateObject.Address()
data, err := rlp.EncodeToBytes(stateObject)
if err != nil {
@ -411,14 +411,14 @@ func (stateDB *StateDB) updateStateObject(stateObject *stateObject) {
}
// deleteStateObject removes the given object from the state trie.
func (stateDB *StateDB) deleteStateObject(stateObject *stateObject) {
func (stateDB *DB) deleteStateObject(stateObject *Object) {
stateObject.deleted = true
addr := stateObject.Address()
stateDB.setError(stateDB.trie.TryDelete(addr[:]))
}
// Retrieve a state object given by the address. Returns nil if not found.
func (stateDB *StateDB) getStateObject(addr common.Address) (stateObject *stateObject) {
func (stateDB *DB) getStateObject(addr common.Address) (stateObject *Object) {
// Prefer 'live' objects.
if obj := stateDB.stateObjects[addr]; obj != nil {
if obj.deleted {
@ -444,12 +444,12 @@ func (stateDB *StateDB) getStateObject(addr common.Address) (stateObject *stateO
return obj
}
func (stateDB *StateDB) setStateObject(object *stateObject) {
func (stateDB *DB) setStateObject(object *Object) {
stateDB.stateObjects[object.Address()] = object
}
// GetOrNewStateObject retrieves a state object or create a new state object if nil.
func (stateDB *StateDB) GetOrNewStateObject(addr common.Address) *stateObject {
func (stateDB *DB) GetOrNewStateObject(addr common.Address) *Object {
stateObject := stateDB.getStateObject(addr)
if stateObject == nil || stateObject.deleted {
stateObject, _ = stateDB.createObject(addr)
@ -459,7 +459,7 @@ func (stateDB *StateDB) GetOrNewStateObject(addr common.Address) *stateObject {
// createObject creates a new state object. If there is an existing account with
// the given address, it is overwritten and returned as the second return value.
func (stateDB *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) {
func (stateDB *DB) createObject(addr common.Address) (newobj, prev *Object) {
prev = stateDB.getStateObject(addr)
newobj = newObject(stateDB, addr, Account{})
newobj.setNonce(0) // sets the object to dirty
@ -482,7 +482,7 @@ func (stateDB *StateDB) createObject(addr common.Address) (newobj, prev *stateOb
// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1)
//
// Carrying over the balance ensures that Ether doesn't disappear.
func (stateDB *StateDB) CreateAccount(addr common.Address) {
func (stateDB *DB) CreateAccount(addr common.Address) {
new, prev := stateDB.createObject(addr)
if prev != nil {
new.setBalance(prev.data.Balance)
@ -490,7 +490,7 @@ func (stateDB *StateDB) CreateAccount(addr common.Address) {
}
// ForEachStorage runs a function on every item in state DB.
func (stateDB *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) {
func (stateDB *DB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) {
so := stateDB.getStateObject(addr)
if so == nil {
return
@ -508,15 +508,15 @@ func (stateDB *StateDB) ForEachStorage(addr common.Address, cb func(key, value c
// Copy creates a deep, independent copy of the state.
// Snapshots of the copied state cannot be applied to the copy.
func (stateDB *StateDB) Copy() *StateDB {
func (stateDB *DB) Copy() *DB {
stateDB.lock.Lock()
defer stateDB.lock.Unlock()
// Copy all the basic fields, initialize the memory ones
state := &StateDB{
state := &DB{
db: stateDB.db,
trie: stateDB.db.CopyTrie(stateDB.trie),
stateObjects: make(map[common.Address]*stateObject, len(stateDB.journal.dirties)),
stateObjects: make(map[common.Address]*Object, len(stateDB.journal.dirties)),
stateObjectsDirty: make(map[common.Address]struct{}, len(stateDB.journal.dirties)),
refund: stateDB.refund,
logs: make(map[common.Hash][]*types.Log, len(stateDB.logs)),
@ -559,7 +559,7 @@ func (stateDB *StateDB) Copy() *StateDB {
}
// Snapshot returns an identifier for the current revision of the state.
func (stateDB *StateDB) Snapshot() int {
func (stateDB *DB) Snapshot() int {
id := stateDB.nextRevisionID
stateDB.nextRevisionID++
stateDB.validRevisions = append(stateDB.validRevisions, revision{id, stateDB.journal.length()})
@ -567,7 +567,7 @@ func (stateDB *StateDB) Snapshot() int {
}
// RevertToSnapshot reverts all state changes made since the given revision.
func (stateDB *StateDB) RevertToSnapshot(revid int) {
func (stateDB *DB) RevertToSnapshot(revid int) {
// Find the snapshot in the stack of valid snapshots.
idx := sort.Search(len(stateDB.validRevisions), func(i int) bool {
return stateDB.validRevisions[i].id >= revid
@ -583,13 +583,13 @@ func (stateDB *StateDB) RevertToSnapshot(revid int) {
}
// GetRefund returns the current value of the refund counter.
func (stateDB *StateDB) GetRefund() uint64 {
func (stateDB *DB) GetRefund() uint64 {
return stateDB.refund
}
// Finalise finalises the state by removing the self destructed objects
// and clears the journal as well as the refunds.
func (stateDB *StateDB) Finalise(deleteEmptyObjects bool) {
func (stateDB *DB) Finalise(deleteEmptyObjects bool) {
for addr := range stateDB.journal.dirties {
stateObject, exist := stateDB.stateObjects[addr]
if !exist {
@ -617,27 +617,27 @@ func (stateDB *StateDB) Finalise(deleteEmptyObjects bool) {
// IntermediateRoot computes the current root hash of the state trie.
// It is called in between transactions to get the root hash that
// goes into transaction receipts.
func (stateDB *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
func (stateDB *DB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
stateDB.Finalise(deleteEmptyObjects)
return stateDB.trie.Hash()
}
// Prepare sets the current transaction hash and index and block hash which is
// used when the EVM emits new state logs.
func (stateDB *StateDB) Prepare(thash, bhash common.Hash, ti int) {
func (stateDB *DB) Prepare(thash, bhash common.Hash, ti int) {
stateDB.thash = thash
stateDB.bhash = bhash
stateDB.txIndex = ti
}
func (stateDB *StateDB) clearJournalAndRefund() {
func (stateDB *DB) clearJournalAndRefund() {
stateDB.journal = newJournal()
stateDB.validRevisions = stateDB.validRevisions[:0]
stateDB.refund = 0
}
// Commit writes the state to the underlying in-memory trie database.
func (stateDB *StateDB) Commit(deleteEmptyObjects bool) (root common.Hash, err error) {
func (stateDB *DB) Commit(deleteEmptyObjects bool) (root common.Hash, err error) {
defer stateDB.clearJournalAndRefund()
for addr := range stateDB.journal.dirties {

@ -31,15 +31,15 @@ import (
check "gopkg.in/check.v1"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/db"
)
// Tests that updating a state trie does not leak any database writes prior to
// actually committing the state.
func TestUpdateLeaks(t *testing.T) {
// Create an empty state database
db := db.NewMemDatabase()
db := ethdb.NewMemDatabase()
state, _ := New(common.Hash{}, NewDatabase(db))
// Update it with some accounts
@ -66,12 +66,12 @@ func TestUpdateLeaks(t *testing.T) {
// only the one right before the commit.
func TestIntermediateLeaks(t *testing.T) {
// Create two state databases, one transitioning to the final state, the other final from the beginning
transDb := db.NewMemDatabase()
finalDb := db.NewMemDatabase()
transDb := ethdb.NewMemDatabase()
finalDb := ethdb.NewMemDatabase()
transState, _ := New(common.Hash{}, NewDatabase(transDb))
finalState, _ := New(common.Hash{}, NewDatabase(finalDb))
modify := func(state *StateDB, addr common.Address, i, tweak byte) {
modify := func(state *DB, addr common.Address, i, tweak byte) {
state.SetBalance(addr, big.NewInt(int64(11*i)+int64(tweak)))
state.SetNonce(addr, uint64(42*i+tweak))
if i%2 == 0 {
@ -122,7 +122,7 @@ func TestIntermediateLeaks(t *testing.T) {
// https://github.com/ethereum/go-ethereum/pull/15549.
func TestCopy(t *testing.T) {
// Create a random state test to copy and modify "independently"
orig, _ := New(common.Hash{}, NewDatabase(db.NewMemDatabase()))
orig, _ := New(common.Hash{}, NewDatabase(ethdb.NewMemDatabase()))
for i := byte(0); i < 255; i++ {
obj := orig.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
@ -178,7 +178,7 @@ func TestSnapshotRandom(t *testing.T) {
}
}
// A snapshotTest checks that reverting StateDB snapshots properly undoes all changes
// A snapshotTest checks that reverting DB snapshots properly undoes all changes
// captured by the snapshot. Instances of this test with pseudorandom content are created
// by Generate.
//
@ -198,7 +198,7 @@ type snapshotTest struct {
type testAction struct {
name string
fn func(testAction, *StateDB)
fn func(testAction, *DB)
args []int64
noAddr bool
}
@ -208,28 +208,28 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction {
actions := []testAction{
{
name: "SetBalance",
fn: func(a testAction, s *StateDB) {
fn: func(a testAction, s *DB) {
s.SetBalance(addr, big.NewInt(a.args[0]))
},
args: make([]int64, 1),
},
{
name: "AddBalance",
fn: func(a testAction, s *StateDB) {
fn: func(a testAction, s *DB) {
s.AddBalance(addr, big.NewInt(a.args[0]))
},
args: make([]int64, 1),
},
{
name: "SetNonce",
fn: func(a testAction, s *StateDB) {
fn: func(a testAction, s *DB) {
s.SetNonce(addr, uint64(a.args[0]))
},
args: make([]int64, 1),
},
{
name: "SetState",
fn: func(a testAction, s *StateDB) {
fn: func(a testAction, s *DB) {
var key, val common.Hash
binary.BigEndian.PutUint16(key[:], uint16(a.args[0]))
binary.BigEndian.PutUint16(val[:], uint16(a.args[1]))
@ -239,7 +239,7 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction {
},
{
name: "SetCode",
fn: func(a testAction, s *StateDB) {
fn: func(a testAction, s *DB) {
code := make([]byte, 16)
binary.BigEndian.PutUint64(code, uint64(a.args[0]))
binary.BigEndian.PutUint64(code[8:], uint64(a.args[1]))
@ -249,19 +249,19 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction {
},
{
name: "CreateAccount",
fn: func(a testAction, s *StateDB) {
fn: func(a testAction, s *DB) {
s.CreateAccount(addr)
},
},
{
name: "Suicide",
fn: func(a testAction, s *StateDB) {
fn: func(a testAction, s *DB) {
s.Suicide(addr)
},
},
{
name: "AddRefund",
fn: func(a testAction, s *StateDB) {
fn: func(a testAction, s *DB) {
s.AddRefund(uint64(a.args[0]))
},
args: make([]int64, 1),
@ -269,7 +269,7 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction {
},
{
name: "AddLog",
fn: func(a testAction, s *StateDB) {
fn: func(a testAction, s *DB) {
data := make([]byte, 2)
binary.BigEndian.PutUint16(data, uint16(a.args[0]))
s.AddLog(&types.Log{Address: addr, Data: data})
@ -333,7 +333,7 @@ func (test *snapshotTest) String() string {
func (test *snapshotTest) run() bool {
// Run all actions and create snapshots.
var (
state, _ = New(common.Hash{}, NewDatabase(db.NewMemDatabase()))
state, _ = New(common.Hash{}, NewDatabase(ethdb.NewMemDatabase()))
snapshotRevs = make([]int, len(test.snapshots))
sindex = 0
)
@ -361,7 +361,7 @@ func (test *snapshotTest) run() bool {
}
// checkEqual checks that methods of state and checkstate return the same values.
func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
func (test *snapshotTest) checkEqual(state, checkstate *DB) error {
for _, addr := range test.addrs {
var err error
checkeq := func(op string, a, b interface{}) bool {
@ -424,7 +424,7 @@ func (s *StateSuite) TestTouchDelete(c *check.C) {
// TestCopyOfCopy tests that modified objects are carried over to the copy, and the copy of the copy.
// See https://github.com/ethereum/go-ethereum/pull/15225#issuecomment-380191512
func TestCopyOfCopy(t *testing.T) {
sdb, _ := New(common.Hash{}, NewDatabase(db.NewMemDatabase()))
sdb, _ := New(common.Hash{}, NewDatabase(ethdb.NewMemDatabase()))
addr := common.HexToAddress("aaaa")
sdb.SetBalance(addr, big.NewInt(42))

@ -52,7 +52,7 @@ func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consen
// Process returns the receipts and logs accumulated during the process and
// returns the amount of gas that was used in the process. If any of the
// transactions failed to execute due to insufficient gas it will return an error.
func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error) {
func (p *StateProcessor) Process(block *types.Block, statedb *state.DB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error) {
var (
receipts types.Receipts
usedGas = new(uint64)
@ -84,7 +84,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
// and uses the input parameters for its environment. It returns the receipt
// for the transaction, gas used and an error if the transaction failed,
// indicating the block was invalid.
func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, uint64, error) {
func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.DB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, uint64, error) {
msg, err := tx.AsMessage(types.MakeSigner(config, header.Number))
if err != nil {
return nil, 0, err

@ -117,7 +117,7 @@ const (
type blockChain interface {
CurrentBlock() *types.Block
GetBlock(hash common.Hash, number uint64) *types.Block
StateAt(root common.Hash) (*state.StateDB, error)
StateAt(root common.Hash) (*state.DB, error)
SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription
}
@ -195,7 +195,7 @@ type TxPool struct {
signer types.Signer
mu sync.RWMutex
currentState *state.StateDB // Current state in the blockchain head
currentState *state.DB // Current state in the blockchain head
pendingState *state.ManagedState // Pending state tracking virtual nonces
currentMaxGas uint64 // Current gas limit for transaction caps

@ -28,11 +28,11 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/db"
)
// testTxPoolConfig is a transaction pool configuration without stateful disk
@ -45,7 +45,7 @@ func init() {
}
type testBlockChain struct {
statedb *state.StateDB
statedb *state.DB
gasLimit uint64
chainHeadFeed *event.Feed
}
@ -60,7 +60,7 @@ func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block
return bc.CurrentBlock()
}
func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) {
func (bc *testBlockChain) StateAt(common.Hash) (*state.DB, error) {
return bc.statedb, nil
}
@ -78,7 +78,7 @@ func pricedTransaction(nonce uint64, gaslimit uint64, gasprice *big.Int, key *ec
}
func setupTxPool() (*TxPool, *ecdsa.PrivateKey) {
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db.NewMemDatabase()))
statedb, _ := state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
key, _ := crypto.GenerateKey()
@ -157,13 +157,13 @@ type testChain struct {
// testChain.State() is used multiple times to reset the pending state.
// when simulate is true it will create a state that indicates
// that tx0 and tx1 are included in the chain.
func (c *testChain) State() (*state.StateDB, error) {
func (c *testChain) State() (*state.DB, error) {
// delay "state change" by one. The tx pool fetches the
// state multiple times and by delaying it a bit we simulate
// a state change between those fetches.
stdb := c.statedb
if *c.trigger {
c.statedb, _ = state.New(common.Hash{}, state.NewDatabase(db.NewMemDatabase()))
c.statedb, _ = state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
// simulate that the new head block included tx0 and tx1
c.statedb.SetNonce(c.address, 2)
c.statedb.SetBalance(c.address, new(big.Int).SetUint64(params.Ether))
@ -181,7 +181,7 @@ func TestStateChangeDuringTransactionPoolReset(t *testing.T) {
var (
key, _ = crypto.GenerateKey()
address = crypto.PubkeyToAddress(key.PublicKey)
statedb, _ = state.New(common.Hash{}, state.NewDatabase(db.NewMemDatabase()))
statedb, _ = state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
trigger = false
)
@ -335,7 +335,7 @@ func TestTransactionChainFork(t *testing.T) {
addr := crypto.PubkeyToAddress(key.PublicKey)
resetState := func() {
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db.NewMemDatabase()))
statedb, _ := state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
statedb.AddBalance(addr, big.NewInt(100000000000000))
pool.chain = &testBlockChain{statedb, 1000000, new(event.Feed)}
@ -364,7 +364,7 @@ func TestTransactionDoubleNonce(t *testing.T) {
addr := crypto.PubkeyToAddress(key.PublicKey)
resetState := func() {
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db.NewMemDatabase()))
statedb, _ := state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
statedb.AddBalance(addr, big.NewInt(100000000000000))
pool.chain = &testBlockChain{statedb, 1000000, new(event.Feed)}
@ -554,7 +554,7 @@ func TestTransactionPostponing(t *testing.T) {
t.Parallel()
// Create the pool to test the postponing with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db.NewMemDatabase()))
statedb, _ := state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
@ -712,7 +712,7 @@ func testTransactionQueueGlobalLimiting(t *testing.T, nolocals bool) {
t.Parallel()
// Create the pool to test the limit enforcement with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db.NewMemDatabase()))
statedb, _ := state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
config := testTxPoolConfig
@ -800,7 +800,7 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) {
evictionInterval = time.Second
// Create the pool to test the non-expiration enforcement
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db.NewMemDatabase()))
statedb, _ := state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
config := testTxPoolConfig
@ -913,7 +913,7 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) {
t.Parallel()
// Create the pool to test the limit enforcement with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db.NewMemDatabase()))
statedb, _ := state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
config := testTxPoolConfig
@ -959,7 +959,7 @@ func TestTransactionCapClearsFromAll(t *testing.T) {
t.Parallel()
// Create the pool to test the limit enforcement with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db.NewMemDatabase()))
statedb, _ := state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
config := testTxPoolConfig
@ -993,7 +993,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) {
t.Parallel()
// Create the pool to test the limit enforcement with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db.NewMemDatabase()))
statedb, _ := state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
config := testTxPoolConfig
@ -1038,7 +1038,7 @@ func TestTransactionPoolRepricingKeepsLocals(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db.NewMemDatabase()))
statedb, _ := state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
@ -1112,7 +1112,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) {
os.Remove(journal)
// Create the original pool to inject transaction into the journal
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db.NewMemDatabase()))
statedb, _ := state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
config := testTxPoolConfig
@ -1210,7 +1210,7 @@ func TestTransactionStatusCheck(t *testing.T) {
t.Parallel()
// Create the pool to test the status retrievals with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db.NewMemDatabase()))
statedb, _ := state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)

@ -32,7 +32,7 @@ type Validator interface {
// ValidateState validates the given statedb and optionally the receipts and
// gas used.
ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas uint64) error
ValidateState(block, parent *types.Block, state *state.DB, receipts types.Receipts, usedGas uint64) error
}
// Processor is an interface for processing blocks using a given initial state.
@ -42,5 +42,5 @@ type Validator interface {
// of gas used in the process and return an error if any of the internal rules
// failed.
type Processor interface {
Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error)
Process(block *types.Block, statedb *state.DB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error)
}

@ -91,9 +91,12 @@ type Header struct {
Extra []byte `json:"extraData" gencodec:"required"`
MixDigest common.Hash `json:"mixHash" gencodec:"required"`
Nonce BlockNonce `json:"nonce" gencodec:"required"`
ShardID ShardID `json:"shardID" gencodec:"required"`
Bitmap []byte `json:"bitmap" gencodec:"required"` // Contains which validator signed the block.
Signature [66]byte `json:"signature" gencodec:"required"` // Schnorr collective signature.
// Additional Fields
ShardID ShardID `json:"shardID" gencodec:"required"`
PrepareSignature [48]byte `json:"signature" gencodec:"required"`
PrepareBitmap []byte `json:"bitmap" gencodec:"required"` // Contains which validator signed
CommitSignature [48]byte `json:"signature" gencodec:"required"`
CommitBitmap []byte `json:"bitmap" gencodec:"required"` // Contains which validator signed
// TODO(RJ): add epoch info
}

@ -43,7 +43,7 @@ func (d *dummyContractRef) SetNonce(uint64) {}
func (d *dummyContractRef) Balance() *big.Int { return new(big.Int) }
type dummyStatedb struct {
state.StateDB
state.DB
}
// GetRefund ...

@ -23,10 +23,10 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/vm"
"github.com/harmony-one/harmony/internal/db"
)
// Config is a basic type specifying certain configuration flags for running
@ -44,7 +44,7 @@ type Config struct {
Debug bool
EVMConfig vm.Config
State *state.StateDB
State *state.DB
GetHashFn func(n uint64) common.Hash
}
@ -92,14 +92,14 @@ func setDefaults(cfg *Config) {
//
// Executes sets up a in memory, temporarily, environment for the execution of
// the given code. It makes sure that it's restored to it's original state afterwards.
func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) {
func Execute(code, input []byte, cfg *Config) ([]byte, *state.DB, error) {
if cfg == nil {
cfg = new(Config)
}
setDefaults(cfg)
if cfg.State == nil {
cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(db.NewMemDatabase()))
cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
}
var (
address = common.BytesToAddress([]byte("contract"))
@ -129,7 +129,7 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) {
setDefaults(cfg)
if cfg.State == nil {
cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(db.NewMemDatabase()))
cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
}
var (
vmenv = NewEnv(cfg)

@ -23,10 +23,10 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/vm"
"github.com/harmony-one/harmony/internal/db"
)
func TestDefaults(t *testing.T) {
@ -95,7 +95,7 @@ func TestExecute(t *testing.T) {
}
func TestCall(t *testing.T) {
state, _ := state.New(common.Hash{}, state.NewDatabase(db.NewMemDatabase()))
state, _ := state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
address := common.HexToAddress("0x0a")
state.SetCode(address, []byte{
byte(vm.PUSH1), 10,
@ -152,7 +152,7 @@ func BenchmarkCall(b *testing.B) {
func benchmarkEVMCreate(bench *testing.B, code string) {
var (
statedb, _ = state.New(common.Hash{}, state.NewDatabase(db.NewMemDatabase()))
statedb, _ = state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
sender = common.BytesToAddress([]byte("sender"))
receiver = common.BytesToAddress([]byte("receiver"))
)

@ -1,3 +0,0 @@
go test ./... -coverprofile=/tmp/coverage.out;
grep -v "harmony-one/harmony/core" /tmp/coverage.out | grep -v "harmony-one/harmony/internal/trie" | grep -v "harmony-one/harmony/internal/db" | grep -v "harmony-one/harmony/log" > /tmp/coverage1.out
go tool cover -func=/tmp/coverage1.out

@ -0,0 +1,230 @@
package bls
import (
"errors"
"fmt"
"github.com/harmony-one/bls/ffi/go/bls"
)
func init() {
bls.Init(bls.BLS12_381)
}
// AggregateSig aggregates all the BLS signature into a single multi-signature.
func AggregateSig(sigs []*bls.Sign) *bls.Sign {
var aggregatedSig bls.Sign
for _, sig := range sigs {
aggregatedSig.Add(sig)
}
return &aggregatedSig
}
// Mask represents a cosigning participation bitmask.
type Mask struct {
Bitmap []byte
publics []*bls.PublicKey
AggregatePublic *bls.PublicKey
}
// NewMask returns a new participation bitmask for cosigning where all
// cosigners are disabled by default. If a public key is given it verifies that
// it is present in the list of keys and sets the corresponding index in the
// bitmask to 1 (enabled).
func NewMask(publics []*bls.PublicKey, myKey *bls.PublicKey) (*Mask, error) {
m := &Mask{
publics: publics,
}
m.Bitmap = make([]byte, m.Len())
m.AggregatePublic = &bls.PublicKey{}
if myKey != nil {
found := false
for i, key := range publics {
if key.IsEqual(myKey) {
m.SetBit(i, true)
found = true
break
}
}
if !found {
return nil, errors.New("key not found")
}
}
return m, nil
}
// Mask returns a copy of the participation bitmask.
func (m *Mask) Mask() []byte {
clone := make([]byte, len(m.Bitmap))
copy(clone[:], m.Bitmap)
return clone
}
// Len returns the Bitmap length in bytes.
func (m *Mask) Len() int {
return (len(m.publics) + 7) >> 3
}
// SetMask sets the participation bitmask according to the given byte slice
// interpreted in little-endian order, i.e., bits 0-7 of byte 0 correspond to
// cosigners 0-7, bits 0-7 of byte 1 correspond to cosigners 8-15, etc.
func (m *Mask) SetMask(mask []byte) error {
if m.Len() != len(mask) {
return fmt.Errorf("mismatching Bitmap lengths")
}
for i := range m.publics {
byt := i >> 3
msk := byte(1) << uint(i&7)
if ((m.Bitmap[byt] & msk) == 0) && ((mask[byt] & msk) != 0) {
m.Bitmap[byt] ^= msk // flip bit in Bitmap from 0 to 1
m.AggregatePublic.Add(m.publics[i])
}
if ((m.Bitmap[byt] & msk) != 0) && ((mask[byt] & msk) == 0) {
m.Bitmap[byt] ^= msk // flip bit in Bitmap from 1 to 0
m.AggregatePublic.Sub(m.publics[i])
}
}
return nil
}
// SetBit enables (enable: true) or disables (enable: false) the bit
// in the participation Bitmap of the given cosigner.
func (m *Mask) SetBit(i int, enable bool) error {
if i >= len(m.publics) {
return errors.New("index out of range")
}
byt := i >> 3
msk := byte(1) << uint(i&7)
if ((m.Bitmap[byt] & msk) == 0) && enable {
m.Bitmap[byt] ^= msk // flip bit in Bitmap from 0 to 1
m.AggregatePublic.Add(m.publics[i])
}
if ((m.Bitmap[byt] & msk) != 0) && !enable {
m.Bitmap[byt] ^= msk // flip bit in Bitmap from 1 to 0
m.AggregatePublic.Sub(m.publics[i])
}
return nil
}
// GetPubKeyFromMask will return pubkeys which masked either zero or one depending on the flag
// it is used to show which signers are signed or not in the cosign message
func (m *Mask) GetPubKeyFromMask(flag bool) []*bls.PublicKey {
pubKeys := []*bls.PublicKey{}
for i := range m.publics {
byt := i >> 3
msk := byte(1) << uint(i&7)
if flag == true {
if (m.Bitmap[byt] & msk) != 0 {
pubKeys = append(pubKeys, m.publics[i])
}
} else {
if (m.Bitmap[byt] & msk) == 0 {
pubKeys = append(pubKeys, m.publics[i])
}
}
}
return pubKeys
}
// IndexEnabled checks whether the given index is enabled in the Bitmap or not.
func (m *Mask) IndexEnabled(i int) (bool, error) {
if i >= len(m.publics) {
return false, errors.New("index out of range")
}
byt := i >> 3
msk := byte(1) << uint(i&7)
return ((m.Bitmap[byt] & msk) != 0), nil
}
// KeyEnabled checks whether the index, corresponding to the given key, is
// enabled in the Bitmap or not.
func (m *Mask) KeyEnabled(public *bls.PublicKey) (bool, error) {
for i, key := range m.publics {
if key.IsEqual(public) {
return m.IndexEnabled(i)
}
}
return false, errors.New("key not found")
}
// SetKey set the bit in the Bitmap for the given cosigner
func (m *Mask) SetKey(public *bls.PublicKey, enable bool) error {
for i, key := range m.publics {
if key.IsEqual(public) {
return m.SetBit(i, enable)
}
}
return errors.New("key not found")
}
// CountEnabled returns the number of enabled nodes in the CoSi participation
// Bitmap.
func (m *Mask) CountEnabled() int {
// hw is hamming weight
hw := 0
for i := range m.publics {
byt := i >> 3
msk := byte(1) << uint(i&7)
if (m.Bitmap[byt] & msk) != 0 {
hw++
}
}
return hw
}
// CountTotal returns the total number of nodes this CoSi instance knows.
func (m *Mask) CountTotal() int {
return len(m.publics)
}
// AggregateMasks computes the bitwise OR of the two given participation masks.
func AggregateMasks(a, b []byte) ([]byte, error) {
if len(a) != len(b) {
return nil, errors.New("mismatching Bitmap lengths")
}
m := make([]byte, len(a))
for i := range m {
m[i] = a[i] | b[i]
}
return m, nil
}
// Policy represents a fully customizable cosigning policy deciding what
// cosigner sets are and aren't sufficient for a collective signature to be
// considered acceptable to a verifier. The Check method may inspect the set of
// participants that cosigned by invoking cosi.Mask and/or cosi.MaskBit, and may
// use any other relevant contextual information (e.g., how security-critical
// the operation relying on the collective signature is) in determining whether
// the collective signature was produced by an acceptable set of cosigners.
type Policy interface {
Check(m *Mask) bool
}
// CompletePolicy is the default policy requiring that all participants have
// cosigned to make a collective signature valid.
type CompletePolicy struct {
}
// Check verifies that all participants have contributed to a collective
// signature.
func (p CompletePolicy) Check(m *Mask) bool {
return m.CountEnabled() == m.CountTotal()
}
// ThresholdPolicy allows to specify a simple t-of-n policy requring that at
// least the given threshold number of participants t have cosigned to make a
// collective signature valid.
type ThresholdPolicy struct {
thold int
}
// NewThresholdPolicy returns a new ThresholdPolicy with the given threshold.
func NewThresholdPolicy(thold int) *ThresholdPolicy {
return &ThresholdPolicy{thold: thold}
}
// Check verifies that at least a threshold number of participants have
// contributed to a collective signature.
func (p ThresholdPolicy) Check(m *Mask) bool {
return m.CountEnabled() >= p.thold
}

@ -0,0 +1,38 @@
package bls
import (
"testing"
"github.com/harmony-one/bls/ffi/go/bls"
"github.com/harmony-one/harmony/internal/utils"
)
// Test the basic functionality of a BLS multi-sig mask.
func TestNewMask(test *testing.T) {
_, pubKey1 := utils.GenKeyBLS("127.0.0.1", "5555")
_, pubKey2 := utils.GenKeyBLS("127.0.0.1", "6666")
_, pubKey3 := utils.GenKeyBLS("127.0.0.1", "7777")
mask, err := NewMask([]*bls.PublicKey{pubKey1, pubKey2, pubKey3}, pubKey1)
if err != nil {
test.Errorf("Failed to create a new Mask: %s", err)
}
if mask.Len() != 1 {
test.Errorf("Mask created with wrong size: %d", mask.Len())
}
enabled, err := mask.KeyEnabled(pubKey1)
if !enabled || err != nil {
test.Errorf("My key pubKey1 should have been enabled: %s", err)
}
if mask.CountEnabled() != 1 {
test.Error("Only one key should have been enabled")
}
if mask.CountTotal() != 3 {
test.Error("Should have a total of 3 keys")
}
}

@ -316,6 +316,26 @@ func (m *Mask) SetBit(i int, enable bool) error {
return nil
}
// GetPubKeyFromMask will return pubkeys which masked either zero or one depending on the flag
// it is used to show which signers are signed or not in the cosign message
func (m *Mask) GetPubKeyFromMask(flag bool) []kyber.Point {
pubKeys := []kyber.Point{}
for i := range m.publics {
byt := i >> 3
msk := byte(1) << uint(i&7)
if flag == true {
if (m.mask[byt] & msk) != 0 {
pubKeys = append(pubKeys, m.publics[i])
}
} else {
if (m.mask[byt] & msk) == 0 {
pubKeys = append(pubKeys, m.publics[i])
}
}
}
return pubKeys
}
// IndexEnabled checks whether the given index is enabled in the mask or not.
func (m *Mask) IndexEnabled(i int) (bool, error) {
if i >= len(m.publics) {

@ -2,17 +2,20 @@ package pki
import (
"crypto/sha256"
"encoding/binary"
"github.com/dedis/kyber"
"github.com/harmony-one/bls/ffi/go/bls"
"github.com/harmony-one/harmony/crypto"
"github.com/harmony-one/harmony/log"
)
func init() {
bls.Init(bls.BLS12_381)
}
// GetAddressFromPublicKey returns address given a public key.
func GetAddressFromPublicKey(pubKey kyber.Point) [20]byte {
bytes, err := pubKey.MarshalBinary()
if err != nil {
log.Error("Failed to serialize challenge")
}
func GetAddressFromPublicKey(pubKey *bls.PublicKey) [20]byte {
bytes := pubKey.Serialize()
address := [20]byte{}
hash := sha256.Sum256(bytes)
copy(address[:], hash[12:])
@ -20,18 +23,23 @@ func GetAddressFromPublicKey(pubKey kyber.Point) [20]byte {
}
// GetAddressFromPrivateKey returns address given a private key.
func GetAddressFromPrivateKey(priKey kyber.Scalar) [20]byte {
return GetAddressFromPublicKey(GetPublicKeyFromScalar(priKey))
func GetAddressFromPrivateKey(priKey *bls.SecretKey) [20]byte {
return GetAddressFromPublicKey(priKey.GetPublicKey())
}
// GetAddressFromPrivateKeyBytes returns address from private key in bytes.
func GetAddressFromPrivateKeyBytes(priKey [32]byte) [20]byte {
return GetAddressFromPublicKey(GetPublicKeyFromScalar(crypto.Ed25519Curve.Scalar().SetBytes(priKey[:])))
var privateKey bls.SecretKey
privateKey.SetLittleEndian(priKey[:])
return GetAddressFromPublicKey(privateKey.GetPublicKey())
}
// GetAddressFromInt is the temporary helper function for benchmark use
func GetAddressFromInt(value int) [20]byte {
return GetAddressFromPublicKey(GetPublicKeyFromScalar(GetPrivateKeyScalarFromInt(value)))
priKey := [32]byte{}
binary.LittleEndian.PutUint32(priKey[:], uint32(value))
return GetAddressFromPrivateKeyBytes(priKey)
}
// GetPrivateKeyScalarFromInt return private key scalar.
@ -39,6 +47,15 @@ func GetPrivateKeyScalarFromInt(value int) kyber.Scalar {
return crypto.Ed25519Curve.Scalar().SetInt64(int64(value))
}
// GetBLSPrivateKeyFromInt returns bls private key
func GetBLSPrivateKeyFromInt(value int) *bls.SecretKey {
priKey := [32]byte{}
binary.LittleEndian.PutUint32(priKey[:], uint32(value))
var privateKey bls.SecretKey
privateKey.SetLittleEndian(priKey[:])
return &privateKey
}
// GetPrivateKeyFromInt returns private key in bytes given an interger.
func GetPrivateKeyFromInt(value int) [32]byte {
priKey, err := crypto.Ed25519Curve.Scalar().SetInt64(int64(value)).MarshalBinary()

@ -1,19 +1,23 @@
package pki
import (
"github.com/harmony-one/harmony/crypto"
"encoding/binary"
"reflect"
"testing"
"time"
"github.com/harmony-one/bls/ffi/go/bls"
"github.com/harmony-one/harmony/crypto"
)
func TestGetAddressFromPublicKey(test *testing.T) {
suite := crypto.Ed25519Curve
t := time.Now().UnixNano()
scalar := suite.Scalar().SetInt64(t)
pubKey := GetPublicKeyFromScalar(scalar)
addr1 := GetAddressFromPublicKey(pubKey)
addr2 := GetAddressFromPrivateKey(scalar)
priKey := [32]byte{}
binary.LittleEndian.PutUint32(priKey[:], uint32(t))
var privateKey bls.SecretKey
privateKey.SetLittleEndian(priKey[:])
addr1 := GetAddressFromPublicKey(privateKey.GetPublicKey())
addr2 := GetAddressFromPrivateKey(&privateKey)
if !reflect.DeepEqual(addr1, addr2) {
test.Error("two public address should be equal")
}

@ -0,0 +1 @@
module github.com/harmony-one/harmony

@ -6,7 +6,7 @@ import (
"sync"
"time"
"github.com/harmony-one/harmony/log"
"github.com/harmony-one/harmony/internal/utils"
)
// Constants used for attack model.
@ -34,7 +34,6 @@ type Model struct {
attackType Type
ConsensusIDThreshold uint32
readyByConsensusThreshold bool
log log.Logger // Log utility
}
var attackModel *Model
@ -64,11 +63,6 @@ func (attack *Model) SetAttackEnabled(AttackEnabled bool) {
}
}
// SetLogger sets the logger for doing logging.
func (attack *Model) SetLogger(log log.Logger) {
attack.log = log
}
// Run runs enabled attacks.
func (attack *Model) Run() {
attack.NodeKilledByItSelf()
@ -82,7 +76,7 @@ func (attack *Model) NodeKilledByItSelf() {
}
if rand.Intn(HitRate) == 0 {
attack.log.Debug("******************Killing myself******************", "PID: ", os.Getpid())
utils.GetLogInstance().Debug("******************Killing myself******************", "PID: ", os.Getpid())
os.Exit(1)
}
}
@ -93,7 +87,7 @@ func (attack *Model) DelayResponse() {
return
}
if rand.Intn(HitRate) == 0 {
attack.log.Debug("******************Model: DelayResponse******************", "PID: ", os.Getpid())
utils.GetLogInstance().Debug("******************Model: DelayResponse******************", "PID: ", os.Getpid())
time.Sleep(DelayResponseDuration)
}
}
@ -104,7 +98,7 @@ func (attack *Model) IncorrectResponse() bool {
return false
}
if rand.Intn(HitRate) == 0 {
attack.log.Debug("******************Model: IncorrectResponse******************", "PID: ", os.Getpid())
utils.GetLogInstance().Debug("******************Model: IncorrectResponse******************", "PID: ", os.Getpid())
return true
}
return false

@ -3,7 +3,6 @@ package attack
import (
"testing"
"github.com/harmony-one/harmony/log"
"github.com/stretchr/testify/assert"
)
@ -17,7 +16,6 @@ func TestIncorrectResponse(t *testing.T) {
// Simple test for UpdateConsensusReady
func TestUpdateConsensusReady(t *testing.T) {
model := GetInstance()
model.SetLogger(log.New())
model.NodeKilledByItSelf()
model.UpdateConsensusReady(model.ConsensusIDThreshold - 1)

@ -0,0 +1,3 @@
The beaconchain package currently is a centralized service that allocates every potential new node (uses newnode package) a specific shard.
If N is the number of shards, supplied as a parameter at bootup, then first N joining nodes are assigned to be the leaders of those N shards. The nodes that come after that are then assigned shards based on their order of entry.
In the future, the generation of randomness would be decentralized. Such randomness would be provided to a new node once its PoS has been verified and then the node would be able to calculate its own shard automatically.

@ -2,20 +2,22 @@ package beaconchain
import (
"math/rand"
"os"
"strconv"
"sync"
"github.com/dedis/kyber"
"github.com/harmony-one/bls/ffi/go/bls"
"github.com/harmony-one/harmony/api/proto/bcconn"
proto_identity "github.com/harmony-one/harmony/api/proto/identity"
"github.com/harmony-one/harmony/api/proto/node"
"github.com/harmony-one/harmony/crypto/pki"
"github.com/harmony-one/harmony/internal/beaconchain/rpc"
beaconchain "github.com/harmony-one/harmony/internal/beaconchain/rpc"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/log"
"github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/p2p/host"
"github.com/harmony-one/harmony/p2p/p2pimpl"
p2p_crypto "github.com/libp2p/go-libp2p-crypto"
peer "github.com/libp2p/go-libp2p-peer"
)
//BCState keeps track of the state the beaconchain is in
@ -27,21 +29,31 @@ var identityPerBlock = 100000
// BeaconchainServicePortDiff is the positive port diff from beacon chain's self port
const BeaconchainServicePortDiff = 4444
//BCInfo is the information that needs to be stored on the disk in order to allow for a restart.
type BCInfo struct {
Leaders []*node.Info `json:"leaders"`
ShardLeaderMap map[int]*node.Info `json:"shardLeaderMap"`
NumberOfShards int `json:"numShards"`
NumberOfNodesAdded int `json:"numNodesAdded"`
IP string `json:"ip"`
Port string `json:"port"`
}
// BeaconChain (Blockchain) keeps Identities per epoch, currently centralized!
type BeaconChain struct {
Leaders []*bcconn.NodeInfo
log log.Logger
ShardLeaderMap map[int]*bcconn.NodeInfo
PubKey kyber.Point
NumberOfShards int
NumberOfNodesAdded int
IP string
Port string
host host.Host
state BCState
rpcServer *beaconchain.Server
BCInfo BCInfo
ShardLeaderMap map[int]*node.Info
PubKey *bls.PublicKey
host p2p.Host
state BCState
rpcServer *beaconchain.Server
Peer p2p.Peer
Self p2p.Peer // self Peer
}
//SaveFile is to store the file in which beaconchain info will be stored.
var SaveFile string
// Followings are the set of states of that beaconchain can be in.
const (
NodeInfoReceived BCState = iota
@ -61,64 +73,73 @@ func (bc *BeaconChain) InitRPCServer() {
// StartRPCServer starts Rpc server.
func (bc *BeaconChain) StartRPCServer() {
port, err := strconv.Atoi(bc.Port)
port, err := strconv.Atoi(bc.BCInfo.Port)
if err != nil {
port = 0
}
bc.log.Info("support_client: StartRpcServer on port:", "port", strconv.Itoa(port+BeaconchainServicePortDiff))
bc.rpcServer.Start(bc.IP, strconv.Itoa(port+BeaconchainServicePortDiff))
utils.GetLogInstance().Info("support_client: StartRpcServer on port:", "port", strconv.Itoa(port+BeaconchainServicePortDiff))
bc.rpcServer.Start(bc.BCInfo.IP, strconv.Itoa(port+BeaconchainServicePortDiff))
}
// GetShardLeaderMap returns the map from shard id to leader.
func (bc *BeaconChain) GetShardLeaderMap() map[int]*bcconn.NodeInfo {
result := make(map[int]*bcconn.NodeInfo)
for i, leader := range bc.Leaders {
func (bc *BeaconChain) GetShardLeaderMap() map[int]*node.Info {
result := make(map[int]*node.Info)
for i, leader := range bc.BCInfo.Leaders {
result[i] = leader
}
return result
}
//New beaconchain initialization
func New(numShards int, ip, port string) *BeaconChain {
func New(numShards int, ip, port string, key p2p_crypto.PrivKey) *BeaconChain {
bc := BeaconChain{}
bc.log = log.New()
bc.NumberOfShards = numShards
bc.PubKey = generateBCKey()
bc.NumberOfNodesAdded = 0
bc.ShardLeaderMap = make(map[int]*bcconn.NodeInfo)
bc.Port = port
bc.IP = ip
bc.host = p2pimpl.NewHost(p2p.Peer{IP: ip, Port: port})
bc.Self = p2p.Peer{IP: ip, Port: port}
bc.host, _ = p2pimpl.NewHost(&bc.Self, key)
bcinfo := &BCInfo{NumberOfShards: numShards, NumberOfNodesAdded: 0,
IP: ip,
Port: port,
ShardLeaderMap: make(map[int]*node.Info)}
bc.BCInfo = *bcinfo
return &bc
}
func generateBCKey() kyber.Point {
func generateBCKey() *bls.PublicKey {
r := rand.Intn(1000)
priKey := pki.GetPrivateKeyFromInt(r)
pubkey := pki.GetPublicKeyFromPrivateKey(priKey)
priKey := pki.GetBLSPrivateKeyFromInt(r)
pubkey := priKey.GetPublicKey()
return pubkey
}
//AcceptNodeInfo deserializes node information received via beaconchain handler
func (bc *BeaconChain) AcceptNodeInfo(b []byte) *bcconn.NodeInfo {
func (bc *BeaconChain) AcceptNodeInfo(b []byte) *node.Info {
Node := bcconn.DeserializeNodeInfo(b)
bc.log.Info("New Node Connection", "IP", Node.Self.IP, "Port", Node.Self.Port)
bc.NumberOfNodesAdded = bc.NumberOfNodesAdded + 1
_, isLeader := utils.AllocateShard(bc.NumberOfNodesAdded, bc.NumberOfShards)
utils.GetLogInstance().Info("New Node Connection", "IP", Node.IP, "Port", Node.Port, "PeerID", Node.PeerID)
bc.Peer = p2p.Peer{IP: Node.IP, Port: Node.Port, PeerID: Node.PeerID}
bc.host.AddPeer(&bc.Peer)
bc.BCInfo.NumberOfNodesAdded = bc.BCInfo.NumberOfNodesAdded + 1
shardNum, isLeader := utils.AllocateShard(bc.BCInfo.NumberOfNodesAdded, bc.BCInfo.NumberOfShards)
if isLeader {
bc.Leaders = append(bc.Leaders, Node)
bc.BCInfo.Leaders = append(bc.BCInfo.Leaders, Node)
bc.BCInfo.ShardLeaderMap[shardNum] = Node
}
go SaveBeaconChainInfo(SaveFile, bc)
bc.state = NodeInfoReceived
return Node
}
//RespondRandomness sends a randomness beacon to the node inorder for it process what shard it will be in
func (bc *BeaconChain) RespondRandomness(Node *bcconn.NodeInfo) {
response := bcconn.ResponseRandomNumber{NumberOfShards: bc.NumberOfShards, NumberOfNodesAdded: bc.NumberOfNodesAdded, Leaders: bc.Leaders}
func (bc *BeaconChain) RespondRandomness(Node *node.Info) {
bci := bc.BCInfo
response := bcconn.ResponseRandomNumber{NumberOfShards: bci.NumberOfShards, NumberOfNodesAdded: bci.NumberOfNodesAdded, Leaders: bci.Leaders}
msg := bcconn.SerializeRandomInfo(response)
msgToSend := proto_identity.ConstructIdentityMessage(proto_identity.Acknowledge, msg)
bc.log.Info("Sent Out Msg", "# Nodes", response.NumberOfNodesAdded)
host.SendMessage(bc.host, Node.Self, msgToSend, nil)
utils.GetLogInstance().Info("Sent Out Msg", "# Nodes", response.NumberOfNodesAdded)
for i, n := range response.Leaders {
utils.GetLogInstance().Info("Sent Out Msg", "leader", i, "nodeInfo", n.PeerID)
}
host.SendMessage(bc.host, bc.Peer, msgToSend, nil)
bc.state = RandomInfoSent
}
@ -132,3 +153,48 @@ func (bc *BeaconChain) AcceptConnections(b []byte) {
func (bc *BeaconChain) StartServer() {
bc.host.BindHandlerAndServe(bc.BeaconChainHandler)
}
//SaveBeaconChainInfo to disk
func SaveBeaconChainInfo(filePath string, bc *BeaconChain) error {
bci := BCtoBCI(bc)
err := utils.Save(filePath, bci)
return err
}
//LoadBeaconChainInfo from disk
func LoadBeaconChainInfo(path string) (*BeaconChain, error) {
bci := &BCInfo{}
var err error
if _, err := os.Stat(path); err != nil {
return nil, err
}
err = utils.Load(path, bci)
var bc *BeaconChain
if err != nil {
return nil, err
}
bc = BCItoBC(bci)
return bc, err
}
// BCtoBCI converts beaconchain into beaconchaininfo
func BCtoBCI(bc *BeaconChain) *BCInfo {
bci := &BCInfo{Leaders: bc.BCInfo.Leaders, ShardLeaderMap: bc.BCInfo.ShardLeaderMap, NumberOfShards: bc.BCInfo.NumberOfShards, NumberOfNodesAdded: bc.BCInfo.NumberOfNodesAdded, IP: bc.BCInfo.IP, Port: bc.BCInfo.Port}
return bci
}
//BCItoBC converts beconchaininfo to beaconchain
func BCItoBC(bci *BCInfo) *BeaconChain {
bc := &BeaconChain{BCInfo: *bci}
return bc
}
//SetSaveFile sets the filepath where beaconchain will be saved
func SetSaveFile(path string) {
SaveFile = path
}
//GetID return ID
func (bc *BeaconChain) GetID() peer.ID {
return bc.host.GetID()
}

@ -3,6 +3,7 @@ package beaconchain
import (
"github.com/harmony-one/harmony/api/proto"
proto_identity "github.com/harmony-one/harmony/api/proto/identity"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/p2p"
)
@ -10,27 +11,27 @@ import (
func (bc *BeaconChain) BeaconChainHandler(s p2p.Stream) {
content, err := p2p.ReadMessageContent(s)
if err != nil {
bc.log.Error("Read p2p data failed")
utils.GetLogInstance().Error("Read p2p data failed")
return
}
msgCategory, err := proto.GetMessageCategory(content)
if err != nil {
bc.log.Error("Read message category failed", "err", err)
utils.GetLogInstance().Error("Read message category failed", "err", err)
return
}
msgType, err := proto.GetMessageType(content)
if err != nil {
bc.log.Error("Read action type failed")
utils.GetLogInstance().Error("Read action type failed")
return
}
msgPayload, err := proto.GetMessagePayload(content)
if err != nil {
bc.log.Error("Read message payload failed")
utils.GetLogInstance().Error("Read message payload failed")
return
}
identityMsgPayload, err := proto_identity.GetIdentityMessagePayload(msgPayload)
if err != nil {
bc.log.Error("Read message payload failed")
utils.GetLogInstance().Error("Read message payload failed")
return
}
switch msgCategory {
@ -38,20 +39,20 @@ func (bc *BeaconChain) BeaconChainHandler(s p2p.Stream) {
actionType := proto_identity.IDMessageType(msgType)
switch actionType {
case proto_identity.Identity:
bc.log.Info("Message category is of the type identity protocol, which is correct!")
utils.GetLogInstance().Info("Message category is of the type identity protocol, which is correct!")
idMsgType, err := proto_identity.GetIdentityMessageType(msgPayload)
if err != nil {
bc.log.Error("Error finding the identity message type")
utils.GetLogInstance().Error("Error finding the identity message type")
}
switch idMsgType {
case proto_identity.Register:
bc.log.Info("Identity Message Type is of the type Register")
utils.GetLogInstance().Info("Identity Message Type is of the type Register")
bc.AcceptConnections(identityMsgPayload)
default:
bc.log.Error("Unrecognized identity message type", "type", idMsgType)
utils.GetLogInstance().Error("Unrecognized identity message type", "type", idMsgType)
}
default:
bc.log.Error("Unrecognized message category", "actionType", actionType)
utils.GetLogInstance().Error("Unrecognized message category", "actionType", actionType)
}
}

@ -1,21 +1,24 @@
package beaconchain
import (
"log"
"os"
"reflect"
"strconv"
"testing"
"github.com/harmony-one/harmony/api/proto/bcconn"
"github.com/harmony-one/harmony/api/proto/node"
beaconchain "github.com/harmony-one/harmony/internal/beaconchain/rpc"
"github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/internal/utils"
"github.com/stretchr/testify/assert"
)
var (
leader1 = &bcconn.NodeInfo{Self: p2p.Peer{IP: "127.0.0.1", Port: "1"}}
leader2 = &bcconn.NodeInfo{Self: p2p.Peer{IP: "127.0.0.1", Port: "2"}}
leaders = []*bcconn.NodeInfo{leader1, leader2}
shardLeaderMap = map[int]*bcconn.NodeInfo{
leader1 = &node.Info{IP: "127.0.0.1", Port: "9981"}
leader2 = &node.Info{IP: "127.0.0.1", Port: "9982"}
leaders = []*node.Info{leader1, leader2}
shardLeaderMap = map[int]*node.Info{
0: leader1,
1: leader2,
}
@ -24,19 +27,20 @@ var (
func TestNewNode(t *testing.T) {
var ip, port string
ip = "127.0.0.1"
port = "8080"
port = "7523"
numshards := 2
bc := New(numshards, ip, port)
priKey, _, _ := utils.GenKeyP2P(ip, port)
bc := New(numshards, ip, port, priKey)
if bc.PubKey == nil {
t.Error("beacon chain public key not initialized")
}
if bc.NumberOfNodesAdded != 0 {
if bc.BCInfo.NumberOfNodesAdded != 0 {
t.Error("beacon chain number of nodes starting with is not zero! (should be zero)")
}
if bc.NumberOfShards != numshards {
if bc.BCInfo.NumberOfShards != numshards {
t.Error("beacon chain number of shards not initialized to given number of desired shards")
}
}
@ -44,10 +48,11 @@ func TestNewNode(t *testing.T) {
func TestShardLeaderMap(t *testing.T) {
var ip string
ip = "127.0.0.1"
beaconport := "8080"
beaconport := "7523"
numshards := 1
bc := New(numshards, ip, beaconport)
bc.Leaders = leaders
priKey, _, _ := utils.GenKeyP2P(ip, beaconport)
bc := New(numshards, ip, beaconport, priKey)
bc.BCInfo.Leaders = leaders
if !reflect.DeepEqual(bc.GetShardLeaderMap(), shardLeaderMap) {
t.Error("The function GetShardLeaderMap doesn't work well")
}
@ -57,17 +62,18 @@ func TestShardLeaderMap(t *testing.T) {
func TestFetchLeaders(t *testing.T) {
var ip string
ip = "127.0.0.1"
beaconport := "8080"
beaconport := "7523"
numshards := 1
bc := New(numshards, ip, beaconport)
bc.Leaders = leaders
priKey, _, _ := utils.GenKeyP2P(ip, beaconport)
bc := New(numshards, ip, beaconport, priKey)
bc.BCInfo.Leaders = leaders
bc.rpcServer = beaconchain.NewServer(bc.GetShardLeaderMap)
bc.StartRPCServer()
port, _ := strconv.Atoi(beaconport)
bcClient := beaconchain.NewClient("127.0.0.1", strconv.Itoa(port+BeaconchainServicePortDiff))
response := bcClient.GetLeaders()
retleaders := response.GetLeaders()
if !(retleaders[0].GetIp() == leaders[0].Self.IP || retleaders[0].GetPort() == leaders[0].Self.Port || retleaders[1].GetPort() == leaders[1].Self.Port) {
if !(retleaders[0].GetIp() == leaders[0].IP || retleaders[0].GetPort() == leaders[0].Port || retleaders[1].GetPort() == leaders[1].Port) {
t.Error("Fetch leaders response is not as expected")
}
@ -76,15 +82,16 @@ func TestFetchLeaders(t *testing.T) {
func TestAcceptNodeInfo(t *testing.T) {
var ip string
ip = "127.0.0.1"
beaconport := "8080"
beaconport := "7523"
numshards := 1
bc := New(numshards, ip, beaconport)
priKey, _, _ := utils.GenKeyP2P(ip, beaconport)
bc := New(numshards, ip, beaconport, priKey)
b := bcconn.SerializeNodeInfo(leader1)
node := bc.AcceptNodeInfo(b)
if !reflect.DeepEqual(node, leader1) {
t.Error("Beaconchain is unable to deserialize incoming node info")
}
if len(bc.Leaders) != 1 {
if len(bc.BCInfo.Leaders) != 1 {
t.Error("Beaconchain was unable to update the leader array")
}
@ -93,9 +100,10 @@ func TestAcceptNodeInfo(t *testing.T) {
func TestRespondRandomness(t *testing.T) {
var ip string
ip = "127.0.0.1"
beaconport := "8080"
beaconport := "7523"
numshards := 1
bc := New(numshards, ip, beaconport)
priKey, _, _ := utils.GenKeyP2P(ip, beaconport)
bc := New(numshards, ip, beaconport, priKey)
bc.RespondRandomness(leader1)
assert.Equal(t, RandomInfoSent, bc.state)
}
@ -103,10 +111,40 @@ func TestRespondRandomness(t *testing.T) {
func TestAcceptConnections(t *testing.T) {
var ip string
ip = "127.0.0.1"
beaconport := "8080"
beaconport := "7523"
numshards := 1
bc := New(numshards, ip, beaconport)
priKey, _, _ := utils.GenKeyP2P(ip, beaconport)
bc := New(numshards, ip, beaconport, priKey)
b := bcconn.SerializeNodeInfo(leader1)
bc.AcceptConnections(b)
assert.Equal(t, RandomInfoSent, bc.state)
}
func TestSaveBC(t *testing.T) {
var ip, port string
ip = "127.0.0.1"
port = "7523"
numshards := 2
bci := &BCInfo{IP: ip, Port: port, NumberOfShards: numshards}
bc := &BeaconChain{BCInfo: *bci}
err := SaveBeaconChainInfo("test.json", bc)
if err != nil {
log.Fatalln(err)
}
bc2, err2 := LoadBeaconChainInfo("test.json")
if err2 != nil {
log.Fatalln(err2)
}
if !reflect.DeepEqual(bc, bc2) {
t.Error("beacon chain info objects are not same")
}
os.Remove("test.json")
}
func TestSaveFile(t *testing.T) {
filepath := "test"
SetSaveFile(filepath)
if !reflect.DeepEqual(filepath, SaveFile) {
t.Error("Could not set savefile")
}
}

@ -5,7 +5,7 @@ import (
"log"
"net"
"github.com/harmony-one/harmony/api/proto/bcconn"
"github.com/harmony-one/harmony/api/proto/node"
"google.golang.org/grpc"
@ -14,7 +14,7 @@ import (
// Server is the Server struct for beacon chain package.
type Server struct {
shardLeaderMap func() map[int]*bcconn.NodeInfo
shardLeaderMap func() map[int]*node.Info
}
// FetchLeaders implements the FetchLeaders interface to return current leaders.
@ -23,7 +23,7 @@ func (s *Server) FetchLeaders(ctx context.Context, request *proto.FetchLeadersRe
leaders := []*proto.FetchLeadersResponse_Leader{}
for shardID, leader := range s.shardLeaderMap() {
leaders = append(leaders, &proto.FetchLeadersResponse_Leader{Ip: leader.Self.IP, Port: leader.Self.Port, ShardId: uint32(shardID)})
leaders = append(leaders, &proto.FetchLeadersResponse_Leader{Ip: leader.IP, Port: leader.Port, ShardId: uint32(shardID), PeerID: leader.PeerID.Pretty()})
}
log.Println(leaders)
return &proto.FetchLeadersResponse{Leaders: leaders}, nil
@ -45,7 +45,7 @@ func (s *Server) Start(ip, port string) (*grpc.Server, error) {
}
// NewServer creates new Server which implements BeaconChainServiceServer interface.
func NewServer(shardLeaderMap func() map[int]*bcconn.NodeInfo) *Server {
func NewServer(shardLeaderMap func() map[int]*node.Info) *Server {
s := &Server{shardLeaderMap}
return s
}

@ -1,242 +0,0 @@
package db
import (
"sync"
"time"
"github.com/harmony-one/harmony/log"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
)
// Constants for db which can be used to customize later.
const (
writePauseWarningThrottler = 1 * time.Minute
)
// LDBDatabase is database based on leveldb.
type LDBDatabase struct {
fn string // filename for reporting
db *leveldb.DB // LevelDB instance
quitLock sync.Mutex // Mutex protecting the quit channel access
quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
log log.Logger // Contextual logger tracking the database path
}
// NewLDBDatabase returns a LevelDB wrapped object.
func NewLDBDatabase(file string, cache int, handles int) (*LDBDatabase, error) {
logger := log.New("database", file)
// Ensure we have some minimal caching and file guarantees
if cache < 16 {
cache = 16
}
if handles < 16 {
handles = 16
}
logger.Info("Allocated cache and file handles", "cache", cache, "handles", handles)
// Open the db and recover any potential corruptions
db, err := leveldb.OpenFile(file, &opt.Options{
OpenFilesCacheCapacity: handles,
BlockCacheCapacity: cache / 2 * opt.MiB,
WriteBuffer: cache / 4 * opt.MiB, // Two of these are used internally
Filter: filter.NewBloomFilter(10),
})
if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
db, err = leveldb.RecoverFile(file, nil)
}
// (Re)check for errors and abort if opening of the db failed
if err != nil {
return nil, err
}
return &LDBDatabase{
fn: file,
db: db,
log: logger,
}, nil
}
// Path returns the path to the database directory.
func (db *LDBDatabase) Path() string {
return db.fn
}
// Put puts the given key / value to the queue
func (db *LDBDatabase) Put(key []byte, value []byte) error {
return db.db.Put(key, value, nil)
}
// Has is used to check if the given key is included into the database.
func (db *LDBDatabase) Has(key []byte) (bool, error) {
return db.db.Has(key, nil)
}
// Get returns the given key if it's present.
func (db *LDBDatabase) Get(key []byte) ([]byte, error) {
dat, err := db.db.Get(key, nil)
if err != nil {
return nil, err
}
return dat, nil
}
// Delete deletes the key from the queue and database
func (db *LDBDatabase) Delete(key []byte) error {
return db.db.Delete(key, nil)
}
// NewIterator returns the current iterator of the db.
func (db *LDBDatabase) NewIterator() iterator.Iterator {
return db.db.NewIterator(nil, nil)
}
// NewIteratorWithPrefix returns a iterator to iterate over subset of database content with a particular prefix.
func (db *LDBDatabase) NewIteratorWithPrefix(prefix []byte) iterator.Iterator {
return db.db.NewIterator(util.BytesPrefix(prefix), nil)
}
// Close closes the database.
func (db *LDBDatabase) Close() {
// Stop the metrics collection to avoid internal database races
db.quitLock.Lock()
defer db.quitLock.Unlock()
if db.quitChan != nil {
errc := make(chan error)
db.quitChan <- errc
if err := <-errc; err != nil {
db.log.Error("Metrics collection failed", "err", err)
}
db.quitChan = nil
}
err := db.db.Close()
if err == nil {
db.log.Info("Database closed")
} else {
db.log.Error("Failed to close database", "err", err)
}
}
// LDB returns the pointer to leveldb on which the LDBDatabase is built.
func (db *LDBDatabase) LDB() *leveldb.DB {
return db.db
}
/* TODO(minhdoan): Might add meter func from ethereum-go repo
*/
// NewBatch returns Batch interface for a series of leveldb transactions.
func (db *LDBDatabase) NewBatch() Batch {
return &ldbBatch{db: db.db, b: new(leveldb.Batch)}
}
type ldbBatch struct {
db *leveldb.DB
b *leveldb.Batch
size int
}
// Put is used to put key, value into the batch of transactions.
func (b *ldbBatch) Put(key, value []byte) error {
b.b.Put(key, value)
b.size += len(value)
return nil
}
// Delete is used to delete the item associated with the given key as a part of the batch.
func (b *ldbBatch) Delete(key []byte) error {
b.b.Delete(key)
b.size++
return nil
}
// Write writes the patch of transactions.
func (b *ldbBatch) Write() error {
return b.db.Write(b.b, nil)
}
// ValueSize returns the size of the patch.
func (b *ldbBatch) ValueSize() int {
return b.size
}
// Reset resets the batch.
func (b *ldbBatch) Reset() {
b.b.Reset()
b.size = 0
}
type table struct {
db Database
prefix string
}
// NewTable returns a Database object that prefixes all keys with a given
// string.
func NewTable(db Database, prefix string) Database {
return &table{
db: db,
prefix: prefix,
}
}
func (dt *table) Put(key []byte, value []byte) error {
return dt.db.Put(append([]byte(dt.prefix), key...), value)
}
func (dt *table) Has(key []byte) (bool, error) {
return dt.db.Has(append([]byte(dt.prefix), key...))
}
func (dt *table) Get(key []byte) ([]byte, error) {
return dt.db.Get(append([]byte(dt.prefix), key...))
}
func (dt *table) Delete(key []byte) error {
return dt.db.Delete(append([]byte(dt.prefix), key...))
}
func (dt *table) Close() {
// Do nothing; don't close the underlying DB.
}
type tableBatch struct {
batch Batch
prefix string
}
// NewTableBatch returns a Batch object which prefixes all keys with a given string.
func NewTableBatch(db Database, prefix string) Batch {
return &tableBatch{db.NewBatch(), prefix}
}
func (dt *table) NewBatch() Batch {
return &tableBatch{dt.db.NewBatch(), dt.prefix}
}
func (tb *tableBatch) Put(key, value []byte) error {
return tb.batch.Put(append([]byte(tb.prefix), key...), value)
}
func (tb *tableBatch) Delete(key []byte) error {
return tb.batch.Delete(append([]byte(tb.prefix), key...))
}
func (tb *tableBatch) Write() error {
return tb.batch.Write()
}
func (tb *tableBatch) ValueSize() int {
return tb.batch.ValueSize()
}
func (tb *tableBatch) Reset() {
tb.batch.Reset()
}

@ -1,194 +0,0 @@
package db
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"strconv"
"sync"
"testing"
)
func newTestLDB() (*LDBDatabase, func()) {
dirname, err := ioutil.TempDir(os.TempDir(), "db_test_")
if err != nil {
panic("failed to create test file: " + err.Error())
}
db, err := NewLDBDatabase(dirname, 0, 0)
if err != nil {
panic("failed to create test database: " + err.Error())
}
return db, func() {
db.Close()
os.RemoveAll(dirname)
}
}
var testValues = []string{"", "a", "1251", "\x00123\x00"}
func TestLDB_PutGet(t *testing.T) {
db, remove := newTestLDB()
defer remove()
testPutGet(db, t)
}
func TestMemoryDB_PutGet(t *testing.T) {
testPutGet(NewMemDatabase(), t)
}
func testPutGet(db Database, t *testing.T) {
t.Parallel()
for _, k := range testValues {
err := db.Put([]byte(k), nil)
if err != nil {
t.Fatalf("put failed: %v", err)
}
}
for _, k := range testValues {
data, err := db.Get([]byte(k))
if err != nil {
t.Fatalf("get failed: %v", err)
}
if len(data) != 0 {
t.Fatalf("get returned wrong result, got %q expected nil", string(data))
}
}
_, err := db.Get([]byte("non-exist-key"))
if err == nil {
t.Fatalf("expect to return a not found error")
}
for _, v := range testValues {
err := db.Put([]byte(v), []byte(v))
if err != nil {
t.Fatalf("put failed: %v", err)
}
}
for _, v := range testValues {
data, err := db.Get([]byte(v))
if err != nil {
t.Fatalf("get failed: %v", err)
}
if !bytes.Equal(data, []byte(v)) {
t.Fatalf("get returned wrong result, got %q expected %q", string(data), v)
}
}
for _, v := range testValues {
err := db.Put([]byte(v), []byte("?"))
if err != nil {
t.Fatalf("put override failed: %v", err)
}
}
for _, v := range testValues {
data, err := db.Get([]byte(v))
if err != nil {
t.Fatalf("get failed: %v", err)
}
if !bytes.Equal(data, []byte("?")) {
t.Fatalf("get returned wrong result, got %q expected ?", string(data))
}
}
for _, v := range testValues {
orig, err := db.Get([]byte(v))
if err != nil {
t.Fatalf("get failed: %v", err)
}
orig[0] = byte(0xff)
data, err := db.Get([]byte(v))
if err != nil {
t.Fatalf("get failed: %v", err)
}
if !bytes.Equal(data, []byte("?")) {
t.Fatalf("get returned wrong result, got %q expected ?", string(data))
}
}
for _, v := range testValues {
err := db.Delete([]byte(v))
if err != nil {
t.Fatalf("delete %q failed: %v", v, err)
}
}
for _, v := range testValues {
_, err := db.Get([]byte(v))
if err == nil {
t.Fatalf("got deleted value %q", v)
}
}
}
func TestLDB_ParallelPutGet(t *testing.T) {
db, remove := newTestLDB()
defer remove()
testParallelPutGet(db, t)
}
func TestMemoryDB_ParallelPutGet(t *testing.T) {
testParallelPutGet(NewMemDatabase(), t)
}
func testParallelPutGet(db Database, t *testing.T) {
const n = 8
var pending sync.WaitGroup
pending.Add(n)
for i := 0; i < n; i++ {
go func(key string) {
defer pending.Done()
err := db.Put([]byte(key), []byte("v"+key))
if err != nil {
panic("put failed: " + err.Error())
}
}(strconv.Itoa(i))
}
pending.Wait()
pending.Add(n)
for i := 0; i < n; i++ {
go func(key string) {
defer pending.Done()
data, err := db.Get([]byte(key))
if err != nil {
panic("get failed: " + err.Error())
}
if !bytes.Equal(data, []byte("v"+key)) {
panic(fmt.Sprintf("get failed, got %q expected %q", []byte(data), []byte("v"+key)))
}
}(strconv.Itoa(i))
}
pending.Wait()
pending.Add(n)
for i := 0; i < n; i++ {
go func(key string) {
defer pending.Done()
err := db.Delete([]byte(key))
if err != nil {
panic("delete failed: " + err.Error())
}
}(strconv.Itoa(i))
}
pending.Wait()
pending.Add(n)
for i := 0; i < n; i++ {
go func(key string) {
defer pending.Done()
_, err := db.Get([]byte(key))
if err == nil {
panic("get succeeded")
}
}(strconv.Itoa(i))
}
pending.Wait()
}

@ -1,36 +0,0 @@
package db
// IdealBatchSize is the max size of batch transactions.
// The value was determined empirically.
const IdealBatchSize = 100 * 1024
// Putter wraps the database write operation supported by both batches and regular databases.
type Putter interface {
Put(key []byte, value []byte) error
}
// Deleter wraps the database delete operation supported by both batches and regular databases.
type Deleter interface {
Delete(key []byte) error
}
// Database wraps all database operations. All methods are safe for concurrent use.
type Database interface {
Putter
Deleter
Get(key []byte) ([]byte, error)
Has(key []byte) (bool, error)
Close()
NewBatch() Batch
}
// Batch is a write-only database that commits changes to its host database
// when Write is called. Batch cannot be used concurrently.
type Batch interface {
Putter
Deleter
ValueSize() int // amount of data in the batch
Write() error
// Reset resets the batch for reuse
Reset()
}

@ -1,135 +0,0 @@
package db
import (
"errors"
"sync"
"github.com/harmony-one/harmony/internal/utils"
)
// MemDatabase is the test memory database. It won't be used for any production.
type MemDatabase struct {
db map[string][]byte
lock sync.RWMutex
}
// NewMemDatabase returns a pointer of the new creation of MemDatabase.
func NewMemDatabase() *MemDatabase {
return &MemDatabase{
db: make(map[string][]byte),
}
}
// NewMemDatabaseWithCap returns a pointer of the new creation of MemDatabase with the given size.
func NewMemDatabaseWithCap(size int) *MemDatabase {
return &MemDatabase{
db: make(map[string][]byte, size),
}
}
// Put puts (key, value) item into MemDatabase.
func (db *MemDatabase) Put(key []byte, value []byte) error {
db.lock.Lock()
defer db.lock.Unlock()
db.db[string(key)] = utils.CopyBytes(value)
return nil
}
// Has checks if the key is included into MemDatabase.
func (db *MemDatabase) Has(key []byte) (bool, error) {
db.lock.RLock()
defer db.lock.RUnlock()
_, ok := db.db[string(key)]
return ok, nil
}
// Get gets value of the given key.
func (db *MemDatabase) Get(key []byte) ([]byte, error) {
db.lock.RLock()
defer db.lock.RUnlock()
if entry, ok := db.db[string(key)]; ok {
return utils.CopyBytes(entry), nil
}
return nil, errors.New("not found")
}
// Keys returns all keys of the given MemDatabase.
func (db *MemDatabase) Keys() [][]byte {
db.lock.RLock()
defer db.lock.RUnlock()
keys := [][]byte{}
for key := range db.db {
keys = append(keys, []byte(key))
}
return keys
}
// Delete deletes the given key.
func (db *MemDatabase) Delete(key []byte) error {
db.lock.Lock()
defer db.lock.Unlock()
delete(db.db, string(key))
return nil
}
// Close closes the given db.
func (db *MemDatabase) Close() {}
// NewBatch returns a batch of MemDatabase transactions.
func (db *MemDatabase) NewBatch() Batch {
return &memBatch{db: db}
}
// Len returns the length of the given db.
func (db *MemDatabase) Len() int { return len(db.db) }
type kv struct {
k, v []byte
del bool
}
type memBatch struct {
db *MemDatabase
writes []kv
size int
}
func (b *memBatch) Put(key, value []byte) error {
b.writes = append(b.writes, kv{utils.CopyBytes(key), utils.CopyBytes(value), false})
b.size += len(value)
return nil
}
func (b *memBatch) Delete(key []byte) error {
b.writes = append(b.writes, kv{utils.CopyBytes(key), nil, true})
b.size++
return nil
}
func (b *memBatch) Write() error {
b.db.lock.Lock()
defer b.db.lock.Unlock()
for _, kv := range b.writes {
if kv.del {
delete(b.db.db, string(kv.k))
continue
}
b.db.db[string(kv.k)] = kv.v
}
return nil
}
func (b *memBatch) ValueSize() int {
return b.size
}
func (b *memBatch) Reset() {
b.writes = b.writes[:0]
b.size = 0
}

@ -0,0 +1 @@
Newnode package is for handling the interactions of a new candidate node that wants to join the network. Such interaction at the moment is about contacting the beaconchain and getting assigned a shard and findingout the shardleader. In future this package will be merged into the node package.

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save