commit
71c9794f45
@ -0,0 +1,2 @@ |
||||
repo_token: cr4Aim5IFC8A7IvStlMHQbVMRvBhRq0YH |
||||
|
@ -0,0 +1,30 @@ |
||||
--- |
||||
name: Bug report |
||||
about: Create a report to help us improve |
||||
title: '' |
||||
labels: '' |
||||
assignees: '' |
||||
|
||||
--- |
||||
|
||||
**Describe the bug** |
||||
A clear and concise description of what the bug is. |
||||
|
||||
**To Reproduce** |
||||
Steps to reproduce the behavior: |
||||
1. Check out code with "sha1" commit |
||||
2. Build |
||||
3. run local test using "...." commands |
||||
|
||||
**Expected behavior** |
||||
A clear and concise description of what you expected to happen. |
||||
|
||||
**Screenshots** |
||||
If applicable, add screenshots to help explain your problem. |
||||
|
||||
**Environment (please complete the following information):** |
||||
- OS: [Linux, MacOS] |
||||
- Go environment [ ```go env``` ] |
||||
|
||||
**Additional context** |
||||
Add any other context about the problem here. |
@ -0,0 +1,31 @@ |
||||
--- |
||||
name: Design Issue |
||||
about: Code has an architecture or design issue |
||||
title: '' |
||||
labels: design |
||||
assignees: '' |
||||
|
||||
--- |
||||
## Summary |
||||
|
||||
<!-- Describe the issue in a few sentences. --> |
||||
|
||||
## Current Design |
||||
|
||||
<!-- Describe how the current version of relevant code works. |
||||
Refer to specific files/lines or specific packages/methods where |
||||
applicable. --> |
||||
|
||||
## Problems |
||||
|
||||
<!-- Discuss in depth why the current design is problematic. |
||||
If the design fails to capture or embody certain concepts, |
||||
elaborate on them so that others can also see the need for it. --> |
||||
|
||||
## Proposal |
||||
|
||||
<!-- Propose how to evolve the design and code structure. |
||||
If the problem has been solved in other (maybe outside) projects, |
||||
or in different parts of the code, explain how they work. |
||||
If there are multiple ways, |
||||
briefly discuss pros and cons of each approach. --> |
@ -0,0 +1,10 @@ |
||||
[submodule "vendor/github.com/ethereum/go-ethereum"] |
||||
path = vendor/github.com/ethereum/go-ethereum |
||||
url = https://github.com/harmony-one/go-ethereum |
||||
branch = master |
||||
[submodule "vendor/github.com/golang/protobuf"] |
||||
path = vendor/github.com/golang/protobuf |
||||
url = https://github.com/golang/protobuf |
||||
[submodule "vendor/github.com/dedis/kyber"] |
||||
path = vendor/github.com/dedis/kyber |
||||
url = https://github.com/dedis/kyber |
@ -1,14 +0,0 @@ |
||||
#!/bin/bash |
||||
|
||||
if [ $(golint ./... | wc | awk '{print $1}') -gt 2 ]; then |
||||
echo "Go code is not formatted:" |
||||
gofmt -d . |
||||
exit 1 |
||||
fi |
||||
if [ -n "$(gofmt -l .)" ]; then |
||||
echo "Go code is not formatted:" |
||||
gofmt -d . |
||||
exit 1 |
||||
else |
||||
echo "Go code is well formatted ;)" |
||||
fi |
@ -1,18 +1,37 @@ |
||||
language: go |
||||
go: |
||||
- master |
||||
- stable |
||||
go_import_path: github.com/harmony-one/harmony |
||||
install: |
||||
- export GOPATH=$HOME/gopath |
||||
- export CGO_CPPFLAGS="-I$GOPATH/src/github.com/harmony-one/bls/include -I$GOPATH/src/github.com/harmony-one/mcl/include" |
||||
- export CGO_LDFLAGS="-L$GOPATH/src/github.com/harmony-one/bls/lib -L$GOPATH/src/github.com/harmony-one/mcl/lib" |
||||
- export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$GOPATH/src/github.com/harmony-one/bls/lib:$GOPATH/src/github.com/harmony-one/mcl/lib |
||||
- cd $HOME/gopath/src |
||||
- cd github.com/harmony-one/harmony |
||||
- cd github.com/harmony-one |
||||
- git clone https://github.com/harmony-one/mcl.git |
||||
- cd mcl |
||||
- make |
||||
- cd .. |
||||
- git clone https://github.com/harmony-one/bls.git |
||||
- cd bls |
||||
- make |
||||
- cd ../harmony |
||||
- go get -t -v ./... |
||||
- go get -u golang.org/x/lint/golint |
||||
- go get -u golang.org/x/tools/cmd/goimports |
||||
- go get gopkg.in/check.v1 |
||||
- ./.travis.gofmt.sh |
||||
- ./scripts/travis_checker.sh |
||||
- go build -v ./... |
||||
script: |
||||
- ./.travis.gofmt.sh |
||||
- ./scripts/travis_checker.sh |
||||
notifications: |
||||
slack: |
||||
harmonyone:gggCd1QQopsQAW8JYgBWiH7M |
||||
# secure: RPB3ThYIGuDUidvaWfOA7Hc9x1bDfd5+Y10r7xwY+NGCN3zW86s/GNLpLutI0MWTV9e2CJupHvz5clp8Ktle/tVjLhs6jHQnNV7U8PTWKkL5By6IFVAHN12unMQn/m0RPwqMfdubajXoV51XhbFA/iow/0fqwsd61VdPIuBrlQjy9z7kyVnRLNoGvYjDqKEkJfYVb3qFNFLzD0F7Y2AgxnezIRjsTLgHzR4owLJYqVMhvTYIV9/vSf1w4UUPzhHyZRESl6bri+a1+g7GxE32OtNwq68xxVeeJcrO/MbjAHHW9V6BW1MjJfYzD5T+7JHIfZOjV2WgzJ7uCkVYztfq+02yOCSWsLNxFVojIDhVFEhhJ6Vd2Zf1otolS7j0svK/qNmShID9q9NAasaI105GsQgtaSPAUGd88J/vyX2ndG1nDOvxmgOo10tZFOnPHW7JnWMybk3PLza8o1ujA7X3JFdvDA8BPP9h6MVP4N7doCQ/n4Crts53HvEWlvcv5sBNu61WYlSTBzf1qNwBKMyN2E0rNubsxKmW8B6jLdWYdlx57nyTRPraNKGE1fnUW5nWRZGax3F1tQRwEfpQMk22qgeUK0RYWsPgHFaPciKCA3dJX7t1k/ib9pyR4nc9SZnYw54KMhkAXPIVQ0iy0EpTAH1DNYV6v8zXCwjl+BdkhlY= |
||||
slack: harmonyone:gggCd1QQopsQAW8JYgBWiH7M |
||||
after_success: |
||||
- wget https://raw.githubusercontent.com/DiscordHooks/travis-ci-discord-webhook/master/send.sh |
||||
- chmod +x send.sh |
||||
- ./send.sh success $WEBHOOK_URL |
||||
after_failure: |
||||
- wget https://raw.githubusercontent.com/DiscordHooks/travis-ci-discord-webhook/master/send.sh |
||||
- chmod +x send.sh |
||||
- ./send.sh failure $WEBHOOK_URL |
||||
|
@ -0,0 +1,50 @@ |
||||
# Contributing To Harmony |
||||
|
||||
## Coding Guidelines |
||||
|
||||
* In general, we follow [effective_go](https://golang.org/doc/effective_go.html) |
||||
* Code must adhere to the official [Go formatting guidelines](https://golang.org/doc/effective_go.html#formatting) (i.e. uses [goimports](https://godoc.org/golang.org/x/tools/cmd/goimports)). |
||||
* Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) guidelines. |
||||
|
||||
## Pull Request (PR) |
||||
|
||||
This [github document](https://help.github.com/articles/creating-a-pull-request/) provides some guidance on how to create a pull request in github. |
||||
|
||||
## PR requirement |
||||
To pursue engineering excellence, we have insisted on the highest stardard on the quality of each PR. |
||||
|
||||
* For each PR, please run [golint](https://github.com/golang/lint), [goimports](https://godoc.org/golang.org/x/tools/cmd/goimports), to fix the basic issues/warnings. |
||||
* Make sure you understand [How to Write a Git Commit Message](https://chris.beams.io/posts/git-commit/). |
||||
* Add a [Test] section in every PR detailing on your test process and results. If the test log is too long, please include a link to [gist](https://gist.github.com/) and add the link to the PR. |
||||
|
||||
## Typical workflow example |
||||
The best practice is to reorder and squash your local commits before the PR submission to create an atomic and self-contained PR. |
||||
This [book chapter](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History) provides detailed explanation and guidance on how to rewrite the local git history. |
||||
|
||||
For exampple, a typical workflow is like the following. |
||||
```bash |
||||
# assuming you are working on a fix of bug1, and use a local branch called "fixes_of_bug1". |
||||
|
||||
git clone https://github.com/harmony-one/harmony |
||||
cd harmony |
||||
|
||||
# create a local branch to keep track of the origin/master |
||||
git branch fixes_of_bug1 origin/master |
||||
git checkout fixes_of_bug_1 |
||||
|
||||
# make changes, build, test locally, commit changes locally |
||||
# don't forget to squash or rearrange your commits using "git rebase -i" |
||||
git rebase -i origin/master |
||||
|
||||
# rebase your change on the top of the tree |
||||
git pull --rebase |
||||
|
||||
# push your branch and create a PR |
||||
git push origin fixes_of_bug_1:pr_fixes_of_bug_1 |
||||
``` |
||||
|
||||
## Licensing |
||||
|
||||
Please see [our Fiduciary License Agreement](FLA.md). By your submission of |
||||
your contribution to us, you and we mutually agree to the terms and conditions |
||||
of the agreement. |
@ -0,0 +1,226 @@ |
||||
# Fiduciary License Agreement 2.0 |
||||
|
||||
Thank you for your interest in contributing to Simple Rules Company's Harmony |
||||
("We" or "Us"). |
||||
|
||||
The purpose of this contributor agreement ("Agreement") is to clarify and |
||||
document the rights granted by contributors to Us. By Your Submission of your |
||||
Contribution to Us, You and We mutually agree to the terms and conditions of |
||||
this Agreement. |
||||
|
||||
## 0. Preamble |
||||
|
||||
Software is deeply embedded in all aspects of our lives and it is important |
||||
that it empower, rather than restrict us. Free Software gives everybody the |
||||
rights to use, understand, adapt and share software. These rights help support |
||||
other fundamental freedoms like freedom of speech, press and privacy. |
||||
|
||||
Development of Free Software can follow many patterns. In some cases whole |
||||
development is handled by a sole programmer or a small group of people. But |
||||
usually, the creation and maintenance of software is a complex process that |
||||
requires the contribution of many individuals. This also affects who owns the |
||||
rights to the software. In the latter case, rights in software are owned |
||||
jointly by a great number of individuals. |
||||
|
||||
To tackle this issue some projects require a full copyright assignment to be |
||||
signed by all contributors. The problem with such assignments is that they |
||||
often lack checks and balances that would protect the contributors from |
||||
potential abuse of power from the new copyright holder. |
||||
|
||||
FSFE’s Fiduciary License Agreement (FLA) was created by the Free Software |
||||
Foundation Europe e.V. with just that in mind – to concentrate all deciding |
||||
power within one entity and prevent fragmentation of rights on one hand, while |
||||
on the other preventing that single entity from abusing its power. The main |
||||
aim is to ensure that the software covered under the FLA will forever remain |
||||
Free Software. |
||||
|
||||
This process only serves for the transfer of economic rights. So-called moral |
||||
rights (e.g. authors right to be identified as author) remain with the original |
||||
author(s) and are inalienable. |
||||
|
||||
## How to use this FLA |
||||
|
||||
If You are an employee and have created the Contribution as part of your |
||||
employment, You need to have Your employer approve this Agreement or sign the |
||||
Entity version of this document. If You do not own the Copyright in the entire |
||||
work of authorship, any other author of the Contribution should also sign this |
||||
– in any event, please contact Us at licensing@harmony.one |
||||
|
||||
## 1. Definitions |
||||
|
||||
**"You"** means the individual Copyright owner who Submits a Contribution to Us. |
||||
|
||||
**"Contribution"** means any original work of authorship, including any |
||||
original modifications or additions to an existing work of authorship, |
||||
Submitted by You to Us, in which You own the Copyright. |
||||
|
||||
**"Copyright"** means all rights protecting works of authorship, including |
||||
copyright, moral and neighboring rights, as appropriate, for the full term of |
||||
their existence. |
||||
|
||||
**"Material"** means the software or documentation made available by Us to |
||||
third parties. When this Agreement covers more than one software project, the |
||||
Material means the software or documentation to which the Contribution was |
||||
Submitted. After You Submit the Contribution, it may be included in the |
||||
Material. |
||||
|
||||
**"Submit"** means any act by which a Contribution is transferred to Us by You |
||||
by means of tangible or intangible media, including but not limited to |
||||
electronic mailing lists, source code control systems, and issue tracking |
||||
systems that are managed by, or on behalf of, Us, but excluding any transfer |
||||
that is conspicuously marked or otherwise designated in writing by You as "Not |
||||
a Contribution." |
||||
|
||||
**"Documentation"** means any non-software portion of a Contribution. |
||||
|
||||
## 2. License grant |
||||
|
||||
### 2.1 Copyright license to Us |
||||
|
||||
Subject to the terms and conditions of this Agreement, You hereby grant to Us a |
||||
worldwide, royalty-free, exclusive, perpetual and irrevocable (except as stated |
||||
in Section 8.2) license, with the right to transfer an unlimited number of |
||||
non-exclusive licenses or to grant sublicenses to third parties, under the |
||||
Copyright covering the Contribution to use the Contribution by all means, |
||||
including, but not limited to: |
||||
|
||||
* publish the Contribution, |
||||
* modify the Contribution, |
||||
* prepare derivative works based upon or containing the Contribution and/or to |
||||
combine the Contribution with other Materials, |
||||
* reproduce the Contribution in original or modified form, |
||||
* distribute, to make the Contribution available to the public, display and |
||||
publicly perform the Contribution in original or modified form. |
||||
|
||||
### 2.2 Moral rights |
||||
|
||||
Moral Rights remain unaffected to the extent they are recognized and not |
||||
waivable by applicable law. Notwithstanding, You may add your name to the |
||||
attribution mechanism customary used in the Materials you Contribute to, such |
||||
as the header of the source code files of Your Contribution, and We will |
||||
respect this attribution when using Your Contribution. |
||||
|
||||
### 2.3 Copyright license back to You |
||||
|
||||
Upon such grant of rights to Us, We immediately grant to You a worldwide, |
||||
royalty-free, non-exclusive, perpetual and irrevocable license, with the right |
||||
to transfer an unlimited number of non-exclusive licenses or to grant |
||||
sublicenses to third parties, under the Copyright covering the Contribution to |
||||
use the Contribution by all means, including, but not limited to: |
||||
|
||||
* publish the Contribution, |
||||
* modify the Contribution, |
||||
* prepare derivative works based upon or containing the Contribution and/or to |
||||
combine the Contribution with other Materials, |
||||
* reproduce the Contribution in original or modified form, |
||||
* distribute, to make the Contribution available to the public, display and |
||||
publicly perform the Contribution in original or modified form. |
||||
|
||||
This license back is limited to the Contribution and does not provide any |
||||
rights to the Material. |
||||
|
||||
## 3. Patents |
||||
|
||||
### 3.1 Patent license |
||||
|
||||
Subject to the terms and conditions of this Agreement You hereby grant to Us |
||||
and to recipients of Materials distributed by Us a worldwide, royalty-free, |
||||
non-exclusive, perpetual and irrevocable (except as stated in Section 3.2) |
||||
patent license, with the right to transfer an unlimited number of non-exclusive |
||||
licenses or to grant sublicenses to third parties, to make, have made, use, |
||||
sell, offer for sale, import and otherwise transfer the Contribution and the |
||||
Contribution in combination with any Material (and portions of such |
||||
combination). This license applies to all patents owned or controlled by You, |
||||
whether already acquired or hereafter acquired, that would be infringed by |
||||
making, having made, using, selling, offering for sale, importing or otherwise |
||||
transferring of Your Contribution(s) alone or by combination of Your |
||||
Contribution(s) with any Material. |
||||
|
||||
### 3.2 Revocation of patent license |
||||
|
||||
You reserve the right to revoke the patent license stated in section 3.1 if We |
||||
make any infringement claim that is targeted at your Contribution and not |
||||
asserted for a Defensive Purpose. An assertion of claims of the Patents shall |
||||
be considered for a "Defensive Purpose" if the claims are asserted against an |
||||
entity that has filed, maintained, threatened, or voluntarily participated in a |
||||
patent infringement lawsuit against Us or any of Our licensees. |
||||
|
||||
## 4. License obligations by Us |
||||
|
||||
We agree to (sub)license the Contribution or any Materials containing, based on |
||||
or derived from your Contribution under the terms of any licenses the Free |
||||
Software Foundation classifies as Free Software License and which are approved |
||||
by the Open Source Initiative as Open Source licenses. |
||||
|
||||
We agree to license patents owned or controlled by You only to the extent |
||||
necessary to (sub)license Your Contribution(s) and the combination of Your |
||||
Contribution(s) with the Material under the terms of any licenses the Free |
||||
Software Foundation classifies as Free Software licenses and which are approved |
||||
by the Open Source Initiative as Open Source licenses.. |
||||
|
||||
## 5. Disclaimer |
||||
|
||||
THE CONTRIBUTION IS PROVIDED "AS IS". MORE PARTICULARLY, ALL EXPRESS OR |
||||
IMPLIED WARRANTIES INCLUDING, WITHOUT LIMITATION, ANY IMPLIED WARRANTY OF |
||||
SATISFACTORY QUALITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE |
||||
EXPRESSLY DISCLAIMED BY YOU TO US AND BY US TO YOU. TO THE EXTENT THAT ANY |
||||
SUCH WARRANTIES CANNOT BE DISCLAIMED, SUCH WARRANTY IS LIMITED IN DURATION AND |
||||
EXTENT TO THE MINIMUM PERIOD AND EXTENT PERMITTED BY LAW. |
||||
|
||||
## 6. Consequential damage waiver |
||||
|
||||
TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT WILL YOU OR WE |
||||
BE LIABLE FOR ANY LOSS OF PROFITS, LOSS OF ANTICIPATED SAVINGS, LOSS OF DATA, |
||||
INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL AND EXEMPLARY DAMAGES ARISING OUT |
||||
OF THIS AGREEMENT REGARDLESS OF THE LEGAL OR EQUITABLE THEORY (CONTRACT, TORT |
||||
OR OTHERWISE) UPON WHICH THE CLAIM IS BASED. |
||||
|
||||
## 7. Approximation of disclaimer and damage waiver |
||||
|
||||
IF THE DISCLAIMER AND DAMAGE WAIVER MENTIONED IN SECTION 5. AND SECTION 6. |
||||
CANNOT BE GIVEN LEGAL EFFECT UNDER APPLICABLE LOCAL LAW, REVIEWING COURTS SHALL |
||||
APPLY LOCAL LAW THAT MOST CLOSELY APPROXIMATES AN ABSOLUTE WAIVER OF ALL CIVIL |
||||
OR CONTRACTUAL LIABILITY IN CONNECTION WITH THE CONTRIBUTION. |
||||
|
||||
## 8. Term |
||||
|
||||
8.1 This Agreement shall come into effect upon Your acceptance of the terms and |
||||
conditions. |
||||
|
||||
8.2 This Agreement shall apply for the term of the copyright and patents |
||||
licensed here. However, You shall have the right to terminate the Agreement if |
||||
We do not fulfill the obligations as set forth in Section 4. Such termination |
||||
must be made in writing. |
||||
|
||||
8.3 In the event of a termination of this Agreement Sections 5., 6., 7., 8., |
||||
and 9. shall survive such termination and shall remain in full force |
||||
thereafter. For the avoidance of doubt, Free and Open Source Software |
||||
(sub)licenses that have already been granted for Contributions at the date of |
||||
the termination shall remain in full force after the termination of this |
||||
Agreement. |
||||
|
||||
## 9. Miscellaneous |
||||
|
||||
9.1 This Agreement and all disputes, claims, actions, suits or other |
||||
proceedings arising out of this agreement or relating in any way to it shall be |
||||
governed by the laws of the State of California, in the United States of |
||||
America excluding its private international law provisions. |
||||
|
||||
9.2 This Agreement sets out the entire agreement between You and Us for Your |
||||
Contributions to Us and overrides all other agreements or understandings. |
||||
|
||||
9.3 In case of Your death, this agreement shall continue with Your heirs. In |
||||
case of more than one heir, all heirs must exercise their rights through a |
||||
commonly authorized person. |
||||
|
||||
9.4 If any provision of this Agreement is found void and unenforceable, such |
||||
provision will be replaced to the extent possible with a provision that comes |
||||
closest to the meaning of the original provision and that is enforceable. The |
||||
terms and conditions set forth in this Agreement shall apply notwithstanding |
||||
any failure of essential purpose of this Agreement or any limited remedy to the |
||||
maximum extent possible under law. |
||||
|
||||
9.5 You agree to notify Us of any facts or circumstances of which you become |
||||
aware that would make this Agreement inaccurate in any respect. |
||||
|
||||
|
@ -0,0 +1,19 @@ |
||||
## Issue |
||||
|
||||
<!-- link to the issue number or description of the issue --> |
||||
|
||||
## Test |
||||
|
||||
#### Test Coverage Data |
||||
|
||||
<!-- run 'go test -cover' in the directory you made change --> |
||||
|
||||
* Before |
||||
* After |
||||
|
||||
#### Test/Run Logs |
||||
|
||||
<!-- links to the test/run log, or copy&paste part of the log if it is too long --> |
||||
<!-- or you may just create a [gist](https://gist.github.com/) and link the gist here --> |
||||
|
||||
## TODO |
@ -1,51 +1,131 @@ |
||||
# Harmony Benchmark |
||||
# Harmony |
||||
[![Build Status](https://travis-ci.com/harmony-one/harmony.svg?token=DnoYvYiTAk7pqTo9XsTi&branch=master)](https://travis-ci.com/harmony-one/harmony) |
||||
<a href='https://github.com/jpoles1/gopherbadger' target='_blank'>![gopherbadger-tag-do-not-edit](https://img.shields.io/badge/Go%20Coverage-39%25-brightgreen.svg?longCache=true&style=flat)</a> |
||||
|
||||
|
||||
## Coding Guidelines |
||||
|
||||
* In general, we follow [effective_go](https://golang.org/doc/effective_go.html) |
||||
* Code must adhere to the official [Go formatting guidelines](https://golang.org/doc/effective_go.html#formatting) (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)). |
||||
* Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) guidelines. |
||||
|
||||
<a href='https://github.com/jpoles1/gopherbadger' target='_blank'>![gopherbadger-tag-do-not-edit](https://img.shields.io/badge/Go%20Coverage-45%25-brightgreen.svg?longCache=true&style=flat)</a> |
||||
<a href="https://discord.gg/kdf8a6T">![Discord](https://img.shields.io/discord/532383335348043777.svg)</a> |
||||
[![Coverage Status](https://coveralls.io/repos/github/harmony-one/harmony/badge.svg?branch=master)](https://coveralls.io/github/harmony-one/harmony?branch=master) |
||||
|
||||
## Installation Requirements |
||||
GMP and OpenSSL |
||||
```bash |
||||
brew install gmp |
||||
brew install openssl |
||||
``` |
||||
|
||||
## Dev Environment Setup |
||||
|
||||
``` |
||||
```bash |
||||
export GOPATH=$HOME/<path_of_your_choice> |
||||
export CGO_CFLAGS="-I$GOPATH/src/github.com/harmony-one/bls/include -I$GOPATH/src/github.com/harmony-one/mcl/include -I/usr/local/opt/openssl/include" |
||||
export CGO_LDFLAGS="-L$GOPATH/src/github.com/harmony-one/bls/lib -L/usr/local/opt/openssl/lib" |
||||
export LD_LIBRARY_PATH=$GOPATH/src/github.com/harmony-one/bls/lib:$GOPATH/src/github.com/harmony-one/mcl/lib:/usr/local/opt/openssl/lib |
||||
export LIBRARY_PATH=$LD_LIBRARY_PATH |
||||
export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH |
||||
|
||||
mkdir -p $HOME/<path_of_your_choice>/src/github.com/harmony-one |
||||
|
||||
cd $HOME/<path_of_your_choice>/src/github.com/harmony-one |
||||
|
||||
git clone git@github.com:harmony-one/mcl.git |
||||
|
||||
cd mcl && make -j4 && cd .. |
||||
|
||||
git clone git@github.com:harmony-one/bls.git |
||||
|
||||
cd bls && make -j4 && cd .. |
||||
|
||||
git clone git@github.com:harmony-one/harmony.git |
||||
|
||||
cd harmony |
||||
|
||||
go get ./... |
||||
|
||||
git submodule update --init --recursive |
||||
|
||||
``` |
||||
|
||||
## Usage |
||||
## Build |
||||
|
||||
### Running local test |
||||
Harmony server / main node: |
||||
``` |
||||
./test/deploy.sh ./test/configs/local_config1.txt |
||||
go build -o bin/harmony cmd/harmony.go |
||||
``` |
||||
|
||||
## Testing |
||||
Beacon node: |
||||
``` |
||||
go build -o bin/beacon cmd/beaconchain/main.go |
||||
``` |
||||
|
||||
Make sure you the following command and make sure everything passed before submitting your code. |
||||
Wallet: |
||||
``` |
||||
go build -o bin/wallet cmd/client/wallet/main.go |
||||
``` |
||||
|
||||
Tx Generator: |
||||
``` |
||||
./test_before_submit.sh |
||||
go build -o bin/txgen cmd/client/txgen/main.go |
||||
``` |
||||
|
||||
## Linting |
||||
You can also run the script `./scripts/go_executable_build.sh` to build all the executables. |
||||
|
||||
Make sure you the following command and make sure everything passes golint. |
||||
Some of our scripts require bash 4.x support, please [install bash 4.x](http://tldrdevnotes.com/bash-upgrade-3-4-macos) on MacOS X. |
||||
|
||||
## Usage |
||||
You may build the src/harmony.go locally and run local test. |
||||
|
||||
### Running local test |
||||
The deploy.sh script creates a local environment of Harmony blockchain devnet based on the configuration file. |
||||
The configuration file configures number of nodes and their IP/Port. |
||||
The script starts one local beacon chain node, the blockchain nodes, and run a transactional generator program which generates and sends simulated transactions to the local blockchain. |
||||
|
||||
```bash |
||||
./test/deploy.sh ./test/configs/local_config1.txt |
||||
``` |
||||
./lint_before_submit.sh |
||||
|
||||
## Testing |
||||
|
||||
Make sure you use the following command and make sure everything passed before submitting your code. |
||||
|
||||
```bash |
||||
./test/test_before_submit.sh |
||||
``` |
||||
|
||||
## License |
||||
|
||||
Harmony is licensed under the MIT License. See [`LICENSE`](LICENSE) file for |
||||
the terms and conditions. |
||||
|
||||
Also please see [our Fiduciary License Agreement](FLA.md) if you are |
||||
contributing to the project. By your submission of your contribution to us, you |
||||
and we mutually agree to the terms and conditions of the agreement. |
||||
|
||||
|
||||
## Contributing To Harmony |
||||
|
||||
See [`CONTRIBUTING`](CONTRIBUTING.md) for details. |
||||
|
||||
## Development Status |
||||
|
||||
### Features Done |
||||
|
||||
* Basic consensus protocol with O(n) complexity |
||||
* Basic validator server |
||||
* P2p network connection and unicast |
||||
* Account model and support for Solidity |
||||
* Simple wallet program |
||||
* Mock beacon chain with static sharding |
||||
* Information disposal algorithm using erasure encoding (to be integrated) |
||||
* Blockchain explorer with performance report and transaction lookup |
||||
* Transaction generator for loadtesting |
||||
|
||||
### Features To Be Implemented |
||||
|
||||
* Full beacon chain with multiple validators |
||||
* Resharding |
||||
* Staking on beacon chain |
||||
* Fast state synchronization |
||||
* Distributed randomness generation with VRF and VDF |
||||
* Kademlia routing |
||||
* P2P network and gossiping |
||||
* Full protocol of consensus with BLS multi-sig and view-change protocol |
||||
* Integration with WASM |
||||
* Cross-shard transaction |
||||
|
@ -0,0 +1 @@ |
||||
protoc -I ./ message.proto --go_out=./ |
@ -0,0 +1,241 @@ |
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: message.proto
|
||||
|
||||
package message |
||||
|
||||
import ( |
||||
fmt "fmt" |
||||
proto "github.com/golang/protobuf/proto" |
||||
math "math" |
||||
) |
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal |
||||
var _ = fmt.Errorf |
||||
var _ = math.Inf |
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type MessageType int32 |
||||
|
||||
const ( |
||||
MessageType_UNKNOWN MessageType = 0 |
||||
MessageType_NEWNODE_BOOTNODE MessageType = 1 |
||||
MessageType_BOOTNODE_NEWNODE MessageType = 2 |
||||
MessageType_NEWNODE_BEACON MessageType = 3 |
||||
MessageType_BEACON_NEWNODE MessageType = 4 |
||||
) |
||||
|
||||
var MessageType_name = map[int32]string{ |
||||
0: "UNKNOWN", |
||||
1: "NEWNODE_BOOTNODE", |
||||
2: "BOOTNODE_NEWNODE", |
||||
3: "NEWNODE_BEACON", |
||||
4: "BEACON_NEWNODE", |
||||
} |
||||
|
||||
var MessageType_value = map[string]int32{ |
||||
"UNKNOWN": 0, |
||||
"NEWNODE_BOOTNODE": 1, |
||||
"BOOTNODE_NEWNODE": 2, |
||||
"NEWNODE_BEACON": 3, |
||||
"BEACON_NEWNODE": 4, |
||||
} |
||||
|
||||
func (x MessageType) String() string { |
||||
return proto.EnumName(MessageType_name, int32(x)) |
||||
} |
||||
|
||||
func (MessageType) EnumDescriptor() ([]byte, []int) { |
||||
return fileDescriptor_33c57e4bae7b9afd, []int{0} |
||||
} |
||||
|
||||
// This is universal message for all communication protocols.
|
||||
// There are different Requests for different message types.
|
||||
// As we introduce a new type of message just add a new MessageType and new type of request in Message.
|
||||
//
|
||||
// The request field will be either one of the structure corresponding to the MessageType type.
|
||||
type Message struct { |
||||
Type MessageType `protobuf:"varint,1,opt,name=type,proto3,enum=message.MessageType" json:"type,omitempty"` |
||||
// Types that are valid to be assigned to Request:
|
||||
// *Message_NewnodeBootnodeRequest
|
||||
// *Message_BootnodeNewnodeRequest
|
||||
Request isMessage_Request `protobuf_oneof:"request"` |
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"` |
||||
XXX_unrecognized []byte `json:"-"` |
||||
XXX_sizecache int32 `json:"-"` |
||||
} |
||||
|
||||
func (m *Message) Reset() { *m = Message{} } |
||||
func (m *Message) String() string { return proto.CompactTextString(m) } |
||||
func (*Message) ProtoMessage() {} |
||||
func (*Message) Descriptor() ([]byte, []int) { |
||||
return fileDescriptor_33c57e4bae7b9afd, []int{0} |
||||
} |
||||
|
||||
func (m *Message) XXX_Unmarshal(b []byte) error { |
||||
return xxx_messageInfo_Message.Unmarshal(m, b) |
||||
} |
||||
func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
||||
return xxx_messageInfo_Message.Marshal(b, m, deterministic) |
||||
} |
||||
func (m *Message) XXX_Merge(src proto.Message) { |
||||
xxx_messageInfo_Message.Merge(m, src) |
||||
} |
||||
func (m *Message) XXX_Size() int { |
||||
return xxx_messageInfo_Message.Size(m) |
||||
} |
||||
func (m *Message) XXX_DiscardUnknown() { |
||||
xxx_messageInfo_Message.DiscardUnknown(m) |
||||
} |
||||
|
||||
var xxx_messageInfo_Message proto.InternalMessageInfo |
||||
|
||||
func (m *Message) GetType() MessageType { |
||||
if m != nil { |
||||
return m.Type |
||||
} |
||||
return MessageType_UNKNOWN |
||||
} |
||||
|
||||
type isMessage_Request interface { |
||||
isMessage_Request() |
||||
} |
||||
|
||||
type Message_NewnodeBootnodeRequest struct { |
||||
NewnodeBootnodeRequest *NewNodeBootNodeRequest `protobuf:"bytes,2,opt,name=newnode_bootnode_request,json=newnodeBootnodeRequest,proto3,oneof"` |
||||
} |
||||
|
||||
type Message_BootnodeNewnodeRequest struct { |
||||
BootnodeNewnodeRequest *BootNodeNewNodeRequest `protobuf:"bytes,3,opt,name=bootnode_newnode_request,json=bootnodeNewnodeRequest,proto3,oneof"` |
||||
} |
||||
|
||||
func (*Message_NewnodeBootnodeRequest) isMessage_Request() {} |
||||
|
||||
func (*Message_BootnodeNewnodeRequest) isMessage_Request() {} |
||||
|
||||
func (m *Message) GetRequest() isMessage_Request { |
||||
if m != nil { |
||||
return m.Request |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *Message) GetNewnodeBootnodeRequest() *NewNodeBootNodeRequest { |
||||
if x, ok := m.GetRequest().(*Message_NewnodeBootnodeRequest); ok { |
||||
return x.NewnodeBootnodeRequest |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *Message) GetBootnodeNewnodeRequest() *BootNodeNewNodeRequest { |
||||
if x, ok := m.GetRequest().(*Message_BootnodeNewnodeRequest); ok { |
||||
return x.BootnodeNewnodeRequest |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||
func (*Message) XXX_OneofWrappers() []interface{} { |
||||
return []interface{}{ |
||||
(*Message_NewnodeBootnodeRequest)(nil), |
||||
(*Message_BootnodeNewnodeRequest)(nil), |
||||
} |
||||
} |
||||
|
||||
// Message of NewNode talking to BootNode.
|
||||
type NewNodeBootNodeRequest struct { |
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"` |
||||
XXX_unrecognized []byte `json:"-"` |
||||
XXX_sizecache int32 `json:"-"` |
||||
} |
||||
|
||||
func (m *NewNodeBootNodeRequest) Reset() { *m = NewNodeBootNodeRequest{} } |
||||
func (m *NewNodeBootNodeRequest) String() string { return proto.CompactTextString(m) } |
||||
func (*NewNodeBootNodeRequest) ProtoMessage() {} |
||||
func (*NewNodeBootNodeRequest) Descriptor() ([]byte, []int) { |
||||
return fileDescriptor_33c57e4bae7b9afd, []int{1} |
||||
} |
||||
|
||||
func (m *NewNodeBootNodeRequest) XXX_Unmarshal(b []byte) error { |
||||
return xxx_messageInfo_NewNodeBootNodeRequest.Unmarshal(m, b) |
||||
} |
||||
func (m *NewNodeBootNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
||||
return xxx_messageInfo_NewNodeBootNodeRequest.Marshal(b, m, deterministic) |
||||
} |
||||
func (m *NewNodeBootNodeRequest) XXX_Merge(src proto.Message) { |
||||
xxx_messageInfo_NewNodeBootNodeRequest.Merge(m, src) |
||||
} |
||||
func (m *NewNodeBootNodeRequest) XXX_Size() int { |
||||
return xxx_messageInfo_NewNodeBootNodeRequest.Size(m) |
||||
} |
||||
func (m *NewNodeBootNodeRequest) XXX_DiscardUnknown() { |
||||
xxx_messageInfo_NewNodeBootNodeRequest.DiscardUnknown(m) |
||||
} |
||||
|
||||
var xxx_messageInfo_NewNodeBootNodeRequest proto.InternalMessageInfo |
||||
|
||||
// Message of BootNode talking to NewNode.
|
||||
type BootNodeNewNodeRequest struct { |
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"` |
||||
XXX_unrecognized []byte `json:"-"` |
||||
XXX_sizecache int32 `json:"-"` |
||||
} |
||||
|
||||
func (m *BootNodeNewNodeRequest) Reset() { *m = BootNodeNewNodeRequest{} } |
||||
func (m *BootNodeNewNodeRequest) String() string { return proto.CompactTextString(m) } |
||||
func (*BootNodeNewNodeRequest) ProtoMessage() {} |
||||
func (*BootNodeNewNodeRequest) Descriptor() ([]byte, []int) { |
||||
return fileDescriptor_33c57e4bae7b9afd, []int{2} |
||||
} |
||||
|
||||
func (m *BootNodeNewNodeRequest) XXX_Unmarshal(b []byte) error { |
||||
return xxx_messageInfo_BootNodeNewNodeRequest.Unmarshal(m, b) |
||||
} |
||||
func (m *BootNodeNewNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
||||
return xxx_messageInfo_BootNodeNewNodeRequest.Marshal(b, m, deterministic) |
||||
} |
||||
func (m *BootNodeNewNodeRequest) XXX_Merge(src proto.Message) { |
||||
xxx_messageInfo_BootNodeNewNodeRequest.Merge(m, src) |
||||
} |
||||
func (m *BootNodeNewNodeRequest) XXX_Size() int { |
||||
return xxx_messageInfo_BootNodeNewNodeRequest.Size(m) |
||||
} |
||||
func (m *BootNodeNewNodeRequest) XXX_DiscardUnknown() { |
||||
xxx_messageInfo_BootNodeNewNodeRequest.DiscardUnknown(m) |
||||
} |
||||
|
||||
var xxx_messageInfo_BootNodeNewNodeRequest proto.InternalMessageInfo |
||||
|
||||
func init() { |
||||
proto.RegisterEnum("message.MessageType", MessageType_name, MessageType_value) |
||||
proto.RegisterType((*Message)(nil), "message.Message") |
||||
proto.RegisterType((*NewNodeBootNodeRequest)(nil), "message.NewNodeBootNodeRequest") |
||||
proto.RegisterType((*BootNodeNewNodeRequest)(nil), "message.BootNodeNewNodeRequest") |
||||
} |
||||
|
||||
func init() { proto.RegisterFile("message.proto", fileDescriptor_33c57e4bae7b9afd) } |
||||
|
||||
var fileDescriptor_33c57e4bae7b9afd = []byte{ |
||||
// 253 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x41, 0x4b, 0xc3, 0x40, |
||||
0x10, 0x85, 0xbb, 0x6d, 0x31, 0x38, 0xc1, 0xb2, 0x2c, 0xa5, 0xe4, 0x66, 0xe9, 0x29, 0x78, 0xe8, |
||||
0xa1, 0xfe, 0x02, 0xa3, 0x01, 0x41, 0x9c, 0x85, 0x50, 0xe9, 0xc1, 0x43, 0xb0, 0x74, 0xf0, 0x64, |
||||
0x26, 0x36, 0x2b, 0xa5, 0xff, 0xdc, 0xa3, 0x6c, 0x32, 0x1b, 0x8a, 0xe4, 0xb4, 0x6f, 0xe6, 0xbd, |
||||
0xf9, 0x66, 0xd9, 0x85, 0x9b, 0x2f, 0x6a, 0x9a, 0x8f, 0x4f, 0x5a, 0xd7, 0x47, 0x76, 0x6c, 0x22, |
||||
0x29, 0x57, 0xbf, 0x0a, 0xa2, 0xd7, 0x4e, 0x9b, 0x14, 0xa6, 0xee, 0x5c, 0x53, 0xa2, 0x96, 0x2a, |
||||
0x9d, 0x6d, 0xe6, 0xeb, 0x30, 0x22, 0xfe, 0xf6, 0x5c, 0x53, 0xd1, 0x26, 0xcc, 0x3b, 0x24, 0x15, |
||||
0x9d, 0x2a, 0x3e, 0x50, 0xb9, 0x67, 0x76, 0xad, 0x38, 0xd2, 0xf7, 0x0f, 0x35, 0x2e, 0x19, 0x2f, |
||||
0x55, 0x1a, 0x6f, 0x6e, 0xfb, 0x69, 0xa4, 0x13, 0xf2, 0x81, 0x32, 0x66, 0xe7, 0xcf, 0xa2, 0x8b, |
||||
0x3d, 0x8f, 0x8a, 0x85, 0x20, 0x32, 0x21, 0x88, 0xe3, 0xe1, 0x3d, 0x34, 0x6c, 0x09, 0xf0, 0xc9, |
||||
0x3f, 0x78, 0xa0, 0xca, 0x92, 0x0b, 0x78, 0x40, 0x60, 0x47, 0x10, 0x27, 0xbb, 0x86, 0x48, 0x58, |
||||
0xab, 0x04, 0x16, 0xc3, 0x77, 0xf3, 0xce, 0x30, 0xf8, 0xae, 0x82, 0xf8, 0xe2, 0x35, 0x4c, 0x0c, |
||||
0xd1, 0x1b, 0xbe, 0xa0, 0xdd, 0xa1, 0x1e, 0x99, 0x39, 0x68, 0xcc, 0x77, 0x68, 0x9f, 0xf2, 0x32, |
||||
0xb3, 0x76, 0xeb, 0x85, 0x56, 0xbe, 0x1b, 0xaa, 0x52, 0x6c, 0x3d, 0x36, 0x06, 0x66, 0x7d, 0x36, |
||||
0x7f, 0x78, 0xb4, 0xa8, 0x27, 0xbe, 0xd7, 0xe9, 0x3e, 0x37, 0xdd, 0x5f, 0xb5, 0xdf, 0x75, 0xff, |
||||
0x17, 0x00, 0x00, 0xff, 0xff, 0x37, 0x30, 0xd0, 0xfb, 0xbf, 0x01, 0x00, 0x00, |
||||
} |
@ -0,0 +1,33 @@ |
||||
syntax = "proto3"; |
||||
package message; |
||||
|
||||
enum MessageType { |
||||
UNKNOWN = 0; |
||||
NEWNODE_BOOTNODE = 1; |
||||
BOOTNODE_NEWNODE = 2; |
||||
NEWNODE_BEACON = 3; |
||||
BEACON_NEWNODE = 4; |
||||
} |
||||
|
||||
// This is universal message for all communication protocols. |
||||
// There are different Requests for different message types. |
||||
// As we introduce a new type of message just add a new MessageType and new type of request in Message. |
||||
// |
||||
// The request field will be either one of the structure corresponding to the MessageType type. |
||||
message Message { |
||||
MessageType type = 1; |
||||
oneof request { |
||||
NewNodeBootNodeRequest newnode_bootnode_request = 2; |
||||
BootNodeNewNodeRequest bootnode_newnode_request = 3; |
||||
} |
||||
} |
||||
|
||||
// Message of NewNode talking to BootNode. |
||||
message NewNodeBootNodeRequest { |
||||
} |
||||
|
||||
// Message of BootNode talking to NewNode. |
||||
message BootNodeNewNodeRequest { |
||||
} |
||||
|
||||
// TODO(minhdoan): refactor and introduce consensus message as one of possible Message.request. |
@ -0,0 +1,15 @@ |
||||
### Full state syncing |
||||
|
||||
A node downloads all the missing blocks until it catches up with the block that is in the process of consensus. |
||||
|
||||
### Node states |
||||
|
||||
The states of a node have the following options: |
||||
|
||||
NodeInit, NodeWaitToJoin, NodeNotInSync, NodeOffline, NodeReadyForConsensus, NodeDoingConsensus |
||||
|
||||
When any node joins the network, it will join the shard and try to participate in the consensus process. It will assume its status is NodeReadyForConsensus until it finds it is not able to verify the new block. Then it will move its status into NodeNotInSync. After finish the syncing process, its status becomes NodeReadyForConsensus again. Simply speaking, most of the time, its status is jumping between these two states. |
||||
|
||||
### Doing syncing |
||||
|
||||
Syncing process consists of 3 parts: download the old blocks that have timestamps before state syncing beginning time; register to a few peers (full node) and accept new blocks that have timestampes after state syncing beginning time; catch the last mile blocks from consensus process when its latest block is only 1~2 blocks behind the current consensus block. |
@ -0,0 +1,90 @@ |
||||
// bootnode provides peer discovery service to new node to connect to the p2p network
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"flag" |
||||
"fmt" |
||||
"os" |
||||
"path" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
"github.com/harmony-one/harmony/p2p" |
||||
"github.com/harmony-one/harmony/p2p/p2pimpl" |
||||
|
||||
ds "github.com/ipfs/go-datastore" |
||||
dsync "github.com/ipfs/go-datastore/sync" |
||||
|
||||
kaddht "github.com/libp2p/go-libp2p-kad-dht" |
||||
) |
||||
|
||||
var ( |
||||
version string |
||||
builtBy string |
||||
builtAt string |
||||
commit string |
||||
) |
||||
|
||||
func printVersion(me string) { |
||||
fmt.Fprintf(os.Stderr, "Harmony (C) 2019. %v, version %v-%v (%v %v)\n", path.Base(me), version, commit, builtBy, builtAt) |
||||
os.Exit(0) |
||||
} |
||||
|
||||
func loggingInit(logFolder, ip, port string) { |
||||
// Setup a logger to stdout and log file.
|
||||
if err := os.MkdirAll(logFolder, 0755); err != nil { |
||||
panic(err) |
||||
} |
||||
logFileName := fmt.Sprintf("./%v/bootnode-%v-%v.log", logFolder, ip, port) |
||||
h := log.MultiHandler( |
||||
log.StreamHandler(os.Stdout, log.TerminalFormat(false)), |
||||
log.Must.FileHandler(logFileName, log.JSONFormat()), // Log to file
|
||||
) |
||||
log.Root().SetHandler(h) |
||||
} |
||||
|
||||
func main() { |
||||
ip := flag.String("ip", "127.0.0.1", "IP of the node") |
||||
port := flag.String("port", "9876", "port of the node.") |
||||
logFolder := flag.String("log_folder", "latest", "the folder collecting the logs of this execution") |
||||
keyFile := flag.String("key", "./.bnkey", "the private key file of the bootnode") |
||||
versionFlag := flag.Bool("version", false, "Output version info") |
||||
|
||||
flag.Parse() |
||||
|
||||
if *versionFlag { |
||||
printVersion(os.Args[0]) |
||||
} |
||||
|
||||
// Logging setup
|
||||
utils.SetPortAndIP(*port, *ip) |
||||
|
||||
// Init logging.
|
||||
loggingInit(*logFolder, *ip, *port) |
||||
|
||||
privKey, err := utils.LoadKeyFromFile(*keyFile) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
var selfPeer = p2p.Peer{IP: *ip, Port: *port} |
||||
|
||||
host, err := p2pimpl.NewHost(&selfPeer, privKey) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
log.Info("bootnode", "BN_MA", fmt.Sprintf("/ip4/%s/tcp/%s/p2p/%s", *ip, *port, host.GetID().Pretty())) |
||||
|
||||
dataStore := dsync.MutexWrap(ds.NewMapDatastore()) |
||||
dht := kaddht.NewDHT(context.Background(), host.GetP2PHost(), dataStore) |
||||
|
||||
if err := dht.Bootstrap(context.Background()); err != nil { |
||||
log.Error("failed to bootstrap DHT") |
||||
panic(err) |
||||
} |
||||
|
||||
select {} |
||||
} |
@ -1,319 +1,241 @@ |
||||
package consensus |
||||
|
||||
import ( |
||||
"bytes" |
||||
"github.com/harmony-one/bls/ffi/go/bls" |
||||
bls_cosi "github.com/harmony-one/harmony/crypto/bls" |
||||
|
||||
"github.com/dedis/kyber/sign/schnorr" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
protobuf "github.com/golang/protobuf/proto" |
||||
consensus_proto "github.com/harmony-one/harmony/api/consensus" |
||||
"github.com/harmony-one/harmony/core/types" |
||||
"github.com/harmony-one/harmony/crypto" |
||||
"github.com/harmony-one/harmony/internal/attack" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
"github.com/harmony-one/harmony/log" |
||||
) |
||||
|
||||
// ProcessMessageValidator dispatches validator's consensus message.
|
||||
func (consensus *Consensus) ProcessMessageValidator(payload []byte) { |
||||
message := consensus_proto.Message{} |
||||
err := message.XXX_Unmarshal(payload) |
||||
err := protobuf.Unmarshal(payload, &message) |
||||
|
||||
if err != nil { |
||||
consensus.Log.Error("Failed to unmarshal message payload.", "err", err, "consensus", consensus) |
||||
utils.GetLogInstance().Error("Failed to unmarshal message payload.", "err", err, "consensus", consensus) |
||||
} |
||||
|
||||
switch message.Type { |
||||
case consensus_proto.MessageType_ANNOUNCE: |
||||
consensus.processAnnounceMessage(message) |
||||
case consensus_proto.MessageType_CHALLENGE: |
||||
consensus.processChallengeMessage(message, ResponseDone) |
||||
case consensus_proto.MessageType_FINAL_CHALLENGE: |
||||
consensus.processChallengeMessage(message, FinalResponseDone) |
||||
case consensus_proto.MessageType_COLLECTIVE_SIG: |
||||
consensus.processCollectiveSigMessage(message) |
||||
case consensus_proto.MessageType_PREPARED: |
||||
consensus.processPreparedMessage(message) |
||||
case consensus_proto.MessageType_COMMITTED: |
||||
consensus.processCommittedMessage(message) |
||||
default: |
||||
consensus.Log.Error("Unexpected message type", "msgType", message.Type, "consensus", consensus) |
||||
utils.GetLogInstance().Error("Unexpected message type", "msgType", message.Type, "consensus", consensus) |
||||
} |
||||
} |
||||
|
||||
// Processes the announce message sent from the leader
|
||||
func (consensus *Consensus) processAnnounceMessage(message consensus_proto.Message) { |
||||
consensus.Log.Info("Received Announce Message", "nodeID", consensus.nodeID) |
||||
utils.GetLogInstance().Info("Received Announce Message", "nodeID", consensus.nodeID) |
||||
|
||||
consensusID := message.ConsensusId |
||||
blockHash := message.BlockHash |
||||
leaderID := message.SenderId |
||||
block := message.Payload |
||||
signature := message.Signature |
||||
|
||||
copy(consensus.blockHash[:], blockHash[:]) |
||||
consensus.block = block |
||||
|
||||
// Verify block data
|
||||
// check leader Id
|
||||
myLeaderID := utils.GetUniqueIDFromPeer(consensus.leader) |
||||
if leaderID != myLeaderID { |
||||
consensus.Log.Warn("Received message from wrong leader", "myLeaderID", myLeaderID, "receivedLeaderId", leaderID, "consensus", consensus) |
||||
return |
||||
} |
||||
|
||||
// Verify signature
|
||||
message.Signature = nil |
||||
messageBytes, err := message.XXX_Marshal([]byte{}, true) |
||||
if err != nil { |
||||
consensus.Log.Warn("Failed to marshal the announce message", "error", err) |
||||
} |
||||
if schnorr.Verify(crypto.Ed25519Curve, consensus.leader.PubKey, messageBytes, signature) != nil { |
||||
consensus.Log.Warn("Received message with invalid signature", "leaderKey", consensus.leader.PubKey, "consensus", consensus) |
||||
if !consensus.checkConsensusMessage(message, consensus.leader.PubKey) { |
||||
utils.GetLogInstance().Debug("Failed to check the leader message") |
||||
return |
||||
} |
||||
|
||||
// check block header is valid
|
||||
var blockObj types.Block |
||||
err = rlp.DecodeBytes(block, &blockObj) |
||||
err := rlp.DecodeBytes(block, &blockObj) |
||||
if err != nil { |
||||
consensus.Log.Warn("Unparseable block header data", "error", err) |
||||
utils.GetLogInstance().Warn("Unparseable block header data", "error", err) |
||||
return |
||||
} |
||||
consensus.block = block |
||||
|
||||
// Add block to received block cache
|
||||
consensus.mutex.Lock() |
||||
consensus.blocksReceived[consensusID] = &BlockConsensusStatus{block, consensus.state} |
||||
consensus.mutex.Unlock() |
||||
|
||||
// Add attack model of IncorrectResponse.
|
||||
// Add attack model of IncorrectResponse
|
||||
if attack.GetInstance().IncorrectResponse() { |
||||
consensus.Log.Warn("IncorrectResponse attacked") |
||||
return |
||||
} |
||||
|
||||
// check block hash
|
||||
hash := blockObj.Hash() |
||||
if !bytes.Equal(blockHash[:], hash[:]) { |
||||
consensus.Log.Warn("Block hash doesn't match", "consensus", consensus) |
||||
utils.GetLogInstance().Warn("IncorrectResponse attacked") |
||||
return |
||||
} |
||||
|
||||
// check block data (transactions
|
||||
// check block data transactions
|
||||
if !consensus.BlockVerifier(&blockObj) { |
||||
consensus.Log.Warn("Block content is not verified successfully", "consensus", consensus) |
||||
utils.GetLogInstance().Warn("Block content is not verified successfully", "consensus", consensus) |
||||
return |
||||
} |
||||
|
||||
// Commit and store the commit
|
||||
secret, msgToSend := consensus.constructCommitMessage(consensus_proto.MessageType_COMMIT) |
||||
consensus.secret[consensusID] = secret |
||||
|
||||
// Construct and send prepare message
|
||||
msgToSend := consensus.constructPrepareMessage() |
||||
consensus.SendMessage(consensus.leader, msgToSend) |
||||
// consensus.Log.Warn("Sending Commit to leader", "state", targetState)
|
||||
|
||||
// Set state to CommitDone
|
||||
consensus.state = CommitDone |
||||
consensus.state = PrepareDone |
||||
} |
||||
|
||||
// Processes the challenge message sent from the leader
|
||||
func (consensus *Consensus) processChallengeMessage(message consensus_proto.Message, targetState State) { |
||||
consensus.Log.Info("Received Challenge Message", "nodeID", consensus.nodeID) |
||||
// Processes the prepared message sent from the leader
|
||||
func (consensus *Consensus) processPreparedMessage(message consensus_proto.Message) { |
||||
utils.GetLogInstance().Info("Received Prepared Message", "nodeID", consensus.nodeID) |
||||
|
||||
consensusID := message.ConsensusId |
||||
blockHash := message.BlockHash |
||||
leaderID := message.SenderId |
||||
messagePayload := message.Payload |
||||
signature := message.Signature |
||||
|
||||
//#### Read payload data
|
||||
// TODO: use BLS-based multi-sig
|
||||
offset := 0 |
||||
// 33 byte of aggregated commit
|
||||
aggreCommit := messagePayload[offset : offset+33] |
||||
offset += 33 |
||||
// 48 byte of multi-sig
|
||||
multiSig := messagePayload[offset : offset+48] |
||||
offset += 48 |
||||
|
||||
// 33 byte of aggregated key
|
||||
aggreKey := messagePayload[offset : offset+33] |
||||
offset += 33 |
||||
|
||||
// 32 byte of challenge
|
||||
challenge := messagePayload[offset : offset+32] |
||||
offset += 32 |
||||
// bitmap
|
||||
bitmap := messagePayload[offset:] |
||||
//#### END Read payload data
|
||||
|
||||
// Update readyByConsensus for attack.
|
||||
attack.GetInstance().UpdateConsensusReady(consensusID) |
||||
|
||||
// Verify block data and the aggregated signatures
|
||||
// check leader Id
|
||||
myLeaderID := utils.GetUniqueIDFromPeer(consensus.leader) |
||||
if uint32(leaderID) != myLeaderID { |
||||
consensus.Log.Warn("Received message from wrong leader", "myLeaderID", myLeaderID, "receivedLeaderId", leaderID, "consensus", consensus) |
||||
return |
||||
} |
||||
|
||||
// Verify signature
|
||||
message.Signature = nil |
||||
messageBytes, err := message.XXX_Marshal([]byte{}, true) |
||||
if err != nil { |
||||
consensus.Log.Warn("Failed to marshal the announce message", "error", err) |
||||
} |
||||
if schnorr.Verify(crypto.Ed25519Curve, consensus.leader.PubKey, messageBytes, signature) != nil { |
||||
consensus.Log.Warn("Received message with invalid signature", "leaderKey", consensus.leader.PubKey, "consensus", consensus) |
||||
if !consensus.checkConsensusMessage(message, consensus.leader.PubKey) { |
||||
utils.GetLogInstance().Debug("Failed to check the leader message") |
||||
return |
||||
} |
||||
|
||||
// Add attack model of IncorrectResponse.
|
||||
if attack.GetInstance().IncorrectResponse() { |
||||
consensus.Log.Warn("IncorrectResponse attacked") |
||||
utils.GetLogInstance().Warn("IncorrectResponse attacked") |
||||
return |
||||
} |
||||
|
||||
consensus.mutex.Lock() |
||||
defer consensus.mutex.Unlock() |
||||
|
||||
// check block hash
|
||||
if !bytes.Equal(blockHash[:], consensus.blockHash[:]) { |
||||
consensus.Log.Warn("Block hash doesn't match", "consensus", consensus) |
||||
return |
||||
} |
||||
|
||||
aggCommitment := crypto.Ed25519Curve.Point() |
||||
aggCommitment.UnmarshalBinary(aggreCommit[:32]) |
||||
aggKey := crypto.Ed25519Curve.Point() |
||||
aggKey.UnmarshalBinary(aggreKey[:32]) |
||||
|
||||
reconstructedChallenge, err := crypto.Challenge(crypto.Ed25519Curve, aggCommitment, aggKey, blockHash) |
||||
|
||||
// Verify the multi-sig for prepare phase
|
||||
deserializedMultiSig := bls.Sign{} |
||||
err := deserializedMultiSig.Deserialize(multiSig) |
||||
if err != nil { |
||||
log.Error("Failed to reconstruct the challenge from commits and keys") |
||||
utils.GetLogInstance().Warn("Failed to deserialize the multi signature for prepare phase", "Error", err, "leader ID", leaderID) |
||||
return |
||||
} |
||||
|
||||
// For now, simply return the private key of this node.
|
||||
receivedChallenge := crypto.Ed25519Curve.Scalar() |
||||
err = receivedChallenge.UnmarshalBinary(challenge) |
||||
if err != nil { |
||||
log.Error("Failed to deserialize challenge", "err", err) |
||||
return |
||||
} |
||||
|
||||
if !reconstructedChallenge.Equal(receivedChallenge) { |
||||
log.Error("The challenge doesn't match the commitments and keys") |
||||
return |
||||
} |
||||
|
||||
response, err := crypto.Response(crypto.Ed25519Curve, consensus.priKey, consensus.secret[consensusID], receivedChallenge) |
||||
if err != nil { |
||||
log.Warn("validator failed to generate response", "err", err, "priKey", consensus.priKey, "nodeID", consensus.nodeID, "secret", consensus.secret[consensusID]) |
||||
mask, err := bls_cosi.NewMask(consensus.PublicKeys, nil) |
||||
mask.SetMask(bitmap) |
||||
if !deserializedMultiSig.VerifyHash(mask.AggregatePublic, blockHash) || err != nil { |
||||
utils.GetLogInstance().Warn("Failed to verify the multi signature for prepare phase", "Error", err, "leader ID", leaderID) |
||||
return |
||||
} |
||||
consensus.aggregatedPrepareSig = &deserializedMultiSig |
||||
consensus.prepareBitmap = mask |
||||
|
||||
msgTypeToSend := consensus_proto.MessageType_RESPONSE |
||||
if targetState == FinalResponseDone { |
||||
msgTypeToSend = consensus_proto.MessageType_FINAL_RESPONSE |
||||
} |
||||
msgToSend := consensus.constructResponseMessage(msgTypeToSend, response) |
||||
|
||||
// Construct and send the commit message
|
||||
multiSigAndBitmap := append(multiSig, bitmap...) |
||||
msgToSend := consensus.constructCommitMessage(multiSigAndBitmap) |
||||
consensus.SendMessage(consensus.leader, msgToSend) |
||||
// consensus.Log.Warn("Sending Response to leader", "state", targetState)
|
||||
// Set state to target state (ResponseDone, FinalResponseDone)
|
||||
consensus.state = targetState |
||||
|
||||
if consensus.state == FinalResponseDone { |
||||
// TODO: the block catch up logic is a temporary workaround for full failure node catchup. Implement the full node catchup logic
|
||||
// The logic is to roll up to the latest blocks one by one to try catching up with the leader.
|
||||
for { |
||||
val, ok := consensus.blocksReceived[consensus.consensusID] |
||||
if ok { |
||||
delete(consensus.blocksReceived, consensus.consensusID) |
||||
|
||||
consensus.blockHash = [32]byte{} |
||||
delete(consensus.secret, consensusID) |
||||
consensus.consensusID = consensusID + 1 // roll up one by one, until the next block is not received yet.
|
||||
|
||||
var blockObj types.Block |
||||
err := rlp.DecodeBytes(val.block, &blockObj) |
||||
if err != nil { |
||||
consensus.Log.Warn("Unparseable block header data", "error", err) |
||||
return |
||||
} |
||||
if err != nil { |
||||
consensus.Log.Debug("failed to construct the new block after consensus") |
||||
} |
||||
// check block data (transactions
|
||||
if !consensus.BlockVerifier(&blockObj) { |
||||
consensus.Log.Debug("[WARNING] Block content is not verified successfully", "consensusID", consensus.consensusID) |
||||
return |
||||
} |
||||
consensus.Log.Info("Finished Response. Adding block to chain", "numTx", len(blockObj.Transactions())) |
||||
consensus.OnConsensusDone(&blockObj) |
||||
} else { |
||||
break |
||||
} |
||||
|
||||
} |
||||
} |
||||
consensus.state = CommitDone |
||||
} |
||||
|
||||
// Processes the collective signature message sent from the leader
|
||||
func (consensus *Consensus) processCollectiveSigMessage(message consensus_proto.Message) { |
||||
// Processes the committed message sent from the leader
|
||||
func (consensus *Consensus) processCommittedMessage(message consensus_proto.Message) { |
||||
utils.GetLogInstance().Warn("Received Committed Message", "nodeID", consensus.nodeID) |
||||
|
||||
consensusID := message.ConsensusId |
||||
blockHash := message.BlockHash |
||||
leaderID := message.SenderId |
||||
messagePayload := message.Payload |
||||
signature := message.Signature |
||||
|
||||
//#### Read payload data
|
||||
collectiveSig := messagePayload[0:64] |
||||
bitmap := messagePayload[64:] |
||||
//#### END: Read payload data
|
||||
|
||||
// Verify block data
|
||||
// check leader Id
|
||||
myLeaderID := utils.GetUniqueIDFromPeer(consensus.leader) |
||||
if uint32(leaderID) != myLeaderID { |
||||
consensus.Log.Warn("Received message from wrong leader", "myLeaderID", myLeaderID, "receivedLeaderId", leaderID, "consensus", consensus) |
||||
return |
||||
} |
||||
offset := 0 |
||||
// 48 byte of multi-sig
|
||||
multiSig := messagePayload[offset : offset+48] |
||||
offset += 48 |
||||
|
||||
// Verify signature
|
||||
message.Signature = nil |
||||
messageBytes, err := message.XXX_Marshal([]byte{}, true) |
||||
if err != nil { |
||||
consensus.Log.Warn("Failed to marshal the announce message", "error", err) |
||||
} |
||||
if schnorr.Verify(crypto.Ed25519Curve, consensus.leader.PubKey, messageBytes, signature) != nil { |
||||
consensus.Log.Warn("Received message with invalid signature", "leaderKey", consensus.leader.PubKey, "consensus", consensus) |
||||
return |
||||
} |
||||
// bitmap
|
||||
bitmap := messagePayload[offset:] |
||||
//#### END Read payload data
|
||||
|
||||
// Verify collective signature
|
||||
err = crypto.Verify(crypto.Ed25519Curve, consensus.PublicKeys, blockHash, append(collectiveSig, bitmap...), crypto.NewThresholdPolicy((2*len(consensus.PublicKeys)/3)+1)) |
||||
if err != nil { |
||||
consensus.Log.Warn("Failed to verify the collective sig message", "consensusID", consensusID, "err", err, "bitmap", bitmap, "NodeID", consensus.nodeID, "#PK", len(consensus.PublicKeys)) |
||||
// Update readyByConsensus for attack.
|
||||
attack.GetInstance().UpdateConsensusReady(consensusID) |
||||
|
||||
if !consensus.checkConsensusMessage(message, consensus.leader.PubKey) { |
||||
utils.GetLogInstance().Debug("Failed to check the leader message") |
||||
return |
||||
} |
||||
|
||||
// Add attack model of IncorrectResponse.
|
||||
if attack.GetInstance().IncorrectResponse() { |
||||
consensus.Log.Warn("IncorrectResponse attacked") |
||||
utils.GetLogInstance().Warn("IncorrectResponse attacked") |
||||
return |
||||
} |
||||
|
||||
// check consensus Id
|
||||
if consensusID != consensus.consensusID { |
||||
consensus.Log.Warn("Received message with wrong consensus Id", "myConsensusId", consensus.consensusID, "theirConsensusId", consensusID, "consensus", consensus) |
||||
consensus.mutex.Lock() |
||||
defer consensus.mutex.Unlock() |
||||
|
||||
// Verify the multi-sig for commit phase
|
||||
deserializedMultiSig := bls.Sign{} |
||||
err := deserializedMultiSig.Deserialize(multiSig) |
||||
if err != nil { |
||||
utils.GetLogInstance().Warn("Failed to deserialize the multi signature for commit phase", "Error", err, "leader ID", leaderID) |
||||
return |
||||
} |
||||
|
||||
// check block hash
|
||||
if !bytes.Equal(blockHash[:], consensus.blockHash[:]) { |
||||
consensus.Log.Warn("Block hash doesn't match", "consensus", consensus) |
||||
mask, err := bls_cosi.NewMask(consensus.PublicKeys, nil) |
||||
mask.SetMask(bitmap) |
||||
prepareMultiSigAndBitmap := append(consensus.aggregatedPrepareSig.Serialize(), consensus.prepareBitmap.Bitmap...) |
||||
if !deserializedMultiSig.VerifyHash(mask.AggregatePublic, prepareMultiSigAndBitmap) || err != nil { |
||||
utils.GetLogInstance().Warn("Failed to verify the multi signature for commit phase", "Error", err, "leader ID", leaderID) |
||||
return |
||||
} |
||||
consensus.aggregatedCommitSig = &deserializedMultiSig |
||||
consensus.commitBitmap = mask |
||||
|
||||
secret, msgToSend := consensus.constructCommitMessage(consensus_proto.MessageType_FINAL_COMMIT) |
||||
// Store the commitment secret
|
||||
consensus.secret[consensusID] = secret |
||||
consensus.state = CommittedDone |
||||
// TODO: the block catch up logic is a temporary workaround for full failure node catchup. Implement the full node catchup logic
|
||||
// The logic is to roll up to the latest blocks one by one to try catching up with the leader.
|
||||
for { |
||||
val, ok := consensus.blocksReceived[consensus.consensusID] |
||||
if ok { |
||||
delete(consensus.blocksReceived, consensus.consensusID) |
||||
|
||||
consensus.SendMessage(consensus.leader, msgToSend) |
||||
consensus.blockHash = [32]byte{} |
||||
consensus.consensusID = consensusID + 1 // roll up one by one, until the next block is not received yet.
|
||||
|
||||
// Set state to CommitDone
|
||||
consensus.state = FinalCommitDone |
||||
var blockObj types.Block |
||||
err := rlp.DecodeBytes(val.block, &blockObj) |
||||
if err != nil { |
||||
utils.GetLogInstance().Warn("Unparseable block header data", "error", err) |
||||
return |
||||
} |
||||
if err != nil { |
||||
utils.GetLogInstance().Debug("failed to construct the new block after consensus") |
||||
} |
||||
// check block data (transactions
|
||||
if !consensus.BlockVerifier(&blockObj) { |
||||
utils.GetLogInstance().Debug("[WARNING] Block content is not verified successfully", "consensusID", consensus.consensusID) |
||||
return |
||||
} |
||||
|
||||
// Put the signatures into the block
|
||||
copy(blockObj.Header().PrepareSignature[:], consensus.aggregatedPrepareSig.Serialize()[:]) |
||||
copy(blockObj.Header().PrepareBitmap[:], consensus.prepareBitmap.Bitmap) |
||||
copy(blockObj.Header().CommitSignature[:], consensus.aggregatedCommitSig.Serialize()[:]) |
||||
copy(blockObj.Header().CommitBitmap[:], consensus.commitBitmap.Bitmap) |
||||
utils.GetLogInstance().Info("Adding block to chain", "numTx", len(blockObj.Transactions())) |
||||
consensus.OnConsensusDone(&blockObj) |
||||
consensus.ResetState() |
||||
|
||||
select { |
||||
case consensus.VerifiedNewBlock <- &blockObj: |
||||
default: |
||||
utils.GetLogInstance().Info("[SYNC] consensus verified block send to chan failed", "blockHash", blockObj.Hash()) |
||||
continue |
||||
} |
||||
} else { |
||||
break |
||||
} |
||||
|
||||
} |
||||
} |
||||
|
@ -1,81 +1,47 @@ |
||||
package consensus |
||||
|
||||
import ( |
||||
"github.com/dedis/kyber" |
||||
consensus_proto "github.com/harmony-one/harmony/api/consensus" |
||||
"github.com/harmony-one/harmony/api/proto" |
||||
"github.com/harmony-one/harmony/crypto" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
// Construct the commit message to send to leader (assumption the consensus data is already verified)
|
||||
func (consensus *Consensus) constructCommitMessage(msgType consensus_proto.MessageType) (secret kyber.Scalar, commitMsg []byte) { |
||||
// Construct the prepare message to send to leader (assumption the consensus data is already verified)
|
||||
func (consensus *Consensus) constructPrepareMessage() []byte { |
||||
message := consensus_proto.Message{} |
||||
message.Type = msgType |
||||
message.Type = consensus_proto.MessageType_PREPARE |
||||
|
||||
// 4 byte consensus id
|
||||
message.ConsensusId = consensus.consensusID |
||||
consensus.populateMessageFields(&message) |
||||
|
||||
// 32 byte block hash
|
||||
message.BlockHash = consensus.blockHash[:] |
||||
|
||||
// 4 byte sender id
|
||||
message.SenderId = uint32(consensus.nodeID) |
||||
|
||||
// 32 byte of commit
|
||||
secret, commitment := crypto.Commit(crypto.Ed25519Curve) |
||||
bytes, err := commitment.MarshalBinary() |
||||
if err != nil { |
||||
consensus.Log.Debug("Failed to marshal commit", "error", err) |
||||
// 48 byte of bls signature
|
||||
sign := consensus.priKey.SignHash(message.BlockHash) |
||||
if sign != nil { |
||||
message.Payload = sign.Serialize() |
||||
} |
||||
message.Payload = bytes |
||||
|
||||
marshaledMessage, err := message.XXX_Marshal([]byte{}, true) |
||||
marshaledMessage, err := consensus.signAndMarshalConsensusMessage(&message) |
||||
if err != nil { |
||||
consensus.Log.Debug("Failed to marshal Announce message", "error", err) |
||||
utils.GetLogInstance().Error("Failed to sign and marshal the Prepare message", "error", err) |
||||
} |
||||
// 64 byte of signature on previous data
|
||||
signature := consensus.signMessage(marshaledMessage) |
||||
message.Signature = signature |
||||
|
||||
marshaledMessage, err = message.XXX_Marshal([]byte{}, true) |
||||
if err != nil { |
||||
consensus.Log.Debug("Failed to marshal Announce message", "error", err) |
||||
} |
||||
|
||||
return secret, proto.ConstructConsensusMessage(marshaledMessage) |
||||
return proto.ConstructConsensusMessage(marshaledMessage) |
||||
} |
||||
|
||||
// Construct the response message to send to leader (assumption the consensus data is already verified)
|
||||
func (consensus *Consensus) constructResponseMessage(msgType consensus_proto.MessageType, response kyber.Scalar) []byte { |
||||
// Construct the commit message which contains the signature on the multi-sig of prepare phase.
|
||||
func (consensus *Consensus) constructCommitMessage(multiSigAndBitmap []byte) []byte { |
||||
message := consensus_proto.Message{} |
||||
message.Type = msgType |
||||
|
||||
// 4 byte consensus id
|
||||
message.ConsensusId = consensus.consensusID |
||||
message.Type = consensus_proto.MessageType_COMMIT |
||||
|
||||
// 32 byte block hash
|
||||
message.BlockHash = consensus.blockHash[:] |
||||
consensus.populateMessageFields(&message) |
||||
|
||||
// 4 byte sender id
|
||||
message.SenderId = uint32(consensus.nodeID) |
||||
|
||||
bytes, err := response.MarshalBinary() |
||||
if err != nil { |
||||
consensus.Log.Debug("Failed to marshal response", "error", err) |
||||
} |
||||
message.Payload = bytes |
||||
|
||||
marshaledMessage, err := message.XXX_Marshal([]byte{}, true) |
||||
if err != nil { |
||||
consensus.Log.Debug("Failed to marshal Announce message", "error", err) |
||||
// 48 byte of bls signature
|
||||
sign := consensus.priKey.SignHash(multiSigAndBitmap) |
||||
if sign != nil { |
||||
message.Payload = sign.Serialize() |
||||
} |
||||
// 64 byte of signature on previous data
|
||||
signature := consensus.signMessage(marshaledMessage) |
||||
message.Signature = signature |
||||
|
||||
marshaledMessage, err = message.XXX_Marshal([]byte{}, true) |
||||
marshaledMessage, err := consensus.signAndMarshalConsensusMessage(&message) |
||||
if err != nil { |
||||
consensus.Log.Debug("Failed to marshal Announce message", "error", err) |
||||
utils.GetLogInstance().Error("Failed to sign and marshal the Commit message", "error", err) |
||||
} |
||||
return proto.ConstructConsensusMessage(marshaledMessage) |
||||
} |
||||
|
@ -1,3 +0,0 @@ |
||||
go test ./... -coverprofile=/tmp/coverage.out; |
||||
grep -v "harmony-one/harmony/core" /tmp/coverage.out | grep -v "harmony-one/harmony/internal/trie" | grep -v "harmony-one/harmony/internal/db" | grep -v "harmony-one/harmony/log" > /tmp/coverage1.out |
||||
go tool cover -func=/tmp/coverage1.out |
@ -0,0 +1,230 @@ |
||||
package bls |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
|
||||
"github.com/harmony-one/bls/ffi/go/bls" |
||||
) |
||||
|
||||
func init() { |
||||
bls.Init(bls.BLS12_381) |
||||
} |
||||
|
||||
// AggregateSig aggregates all the BLS signature into a single multi-signature.
|
||||
func AggregateSig(sigs []*bls.Sign) *bls.Sign { |
||||
var aggregatedSig bls.Sign |
||||
for _, sig := range sigs { |
||||
aggregatedSig.Add(sig) |
||||
} |
||||
return &aggregatedSig |
||||
} |
||||
|
||||
// Mask represents a cosigning participation bitmask.
|
||||
type Mask struct { |
||||
Bitmap []byte |
||||
publics []*bls.PublicKey |
||||
AggregatePublic *bls.PublicKey |
||||
} |
||||
|
||||
// NewMask returns a new participation bitmask for cosigning where all
|
||||
// cosigners are disabled by default. If a public key is given it verifies that
|
||||
// it is present in the list of keys and sets the corresponding index in the
|
||||
// bitmask to 1 (enabled).
|
||||
func NewMask(publics []*bls.PublicKey, myKey *bls.PublicKey) (*Mask, error) { |
||||
m := &Mask{ |
||||
publics: publics, |
||||
} |
||||
m.Bitmap = make([]byte, m.Len()) |
||||
m.AggregatePublic = &bls.PublicKey{} |
||||
if myKey != nil { |
||||
found := false |
||||
for i, key := range publics { |
||||
if key.IsEqual(myKey) { |
||||
m.SetBit(i, true) |
||||
found = true |
||||
break |
||||
} |
||||
} |
||||
if !found { |
||||
return nil, errors.New("key not found") |
||||
} |
||||
} |
||||
return m, nil |
||||
} |
||||
|
||||
// Mask returns a copy of the participation bitmask.
|
||||
func (m *Mask) Mask() []byte { |
||||
clone := make([]byte, len(m.Bitmap)) |
||||
copy(clone[:], m.Bitmap) |
||||
return clone |
||||
} |
||||
|
||||
// Len returns the Bitmap length in bytes.
|
||||
func (m *Mask) Len() int { |
||||
return (len(m.publics) + 7) >> 3 |
||||
} |
||||
|
||||
// SetMask sets the participation bitmask according to the given byte slice
|
||||
// interpreted in little-endian order, i.e., bits 0-7 of byte 0 correspond to
|
||||
// cosigners 0-7, bits 0-7 of byte 1 correspond to cosigners 8-15, etc.
|
||||
func (m *Mask) SetMask(mask []byte) error { |
||||
if m.Len() != len(mask) { |
||||
return fmt.Errorf("mismatching Bitmap lengths") |
||||
} |
||||
for i := range m.publics { |
||||
byt := i >> 3 |
||||
msk := byte(1) << uint(i&7) |
||||
if ((m.Bitmap[byt] & msk) == 0) && ((mask[byt] & msk) != 0) { |
||||
m.Bitmap[byt] ^= msk // flip bit in Bitmap from 0 to 1
|
||||
m.AggregatePublic.Add(m.publics[i]) |
||||
} |
||||
if ((m.Bitmap[byt] & msk) != 0) && ((mask[byt] & msk) == 0) { |
||||
m.Bitmap[byt] ^= msk // flip bit in Bitmap from 1 to 0
|
||||
m.AggregatePublic.Sub(m.publics[i]) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// SetBit enables (enable: true) or disables (enable: false) the bit
|
||||
// in the participation Bitmap of the given cosigner.
|
||||
func (m *Mask) SetBit(i int, enable bool) error { |
||||
if i >= len(m.publics) { |
||||
return errors.New("index out of range") |
||||
} |
||||
byt := i >> 3 |
||||
msk := byte(1) << uint(i&7) |
||||
if ((m.Bitmap[byt] & msk) == 0) && enable { |
||||
m.Bitmap[byt] ^= msk // flip bit in Bitmap from 0 to 1
|
||||
m.AggregatePublic.Add(m.publics[i]) |
||||
} |
||||
if ((m.Bitmap[byt] & msk) != 0) && !enable { |
||||
m.Bitmap[byt] ^= msk // flip bit in Bitmap from 1 to 0
|
||||
m.AggregatePublic.Sub(m.publics[i]) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// GetPubKeyFromMask will return pubkeys which masked either zero or one depending on the flag
|
||||
// it is used to show which signers are signed or not in the cosign message
|
||||
func (m *Mask) GetPubKeyFromMask(flag bool) []*bls.PublicKey { |
||||
pubKeys := []*bls.PublicKey{} |
||||
for i := range m.publics { |
||||
byt := i >> 3 |
||||
msk := byte(1) << uint(i&7) |
||||
if flag == true { |
||||
if (m.Bitmap[byt] & msk) != 0 { |
||||
pubKeys = append(pubKeys, m.publics[i]) |
||||
} |
||||
} else { |
||||
if (m.Bitmap[byt] & msk) == 0 { |
||||
pubKeys = append(pubKeys, m.publics[i]) |
||||
} |
||||
} |
||||
} |
||||
return pubKeys |
||||
} |
||||
|
||||
// IndexEnabled checks whether the given index is enabled in the Bitmap or not.
|
||||
func (m *Mask) IndexEnabled(i int) (bool, error) { |
||||
if i >= len(m.publics) { |
||||
return false, errors.New("index out of range") |
||||
} |
||||
byt := i >> 3 |
||||
msk := byte(1) << uint(i&7) |
||||
return ((m.Bitmap[byt] & msk) != 0), nil |
||||
} |
||||
|
||||
// KeyEnabled checks whether the index, corresponding to the given key, is
|
||||
// enabled in the Bitmap or not.
|
||||
func (m *Mask) KeyEnabled(public *bls.PublicKey) (bool, error) { |
||||
for i, key := range m.publics { |
||||
if key.IsEqual(public) { |
||||
return m.IndexEnabled(i) |
||||
} |
||||
} |
||||
return false, errors.New("key not found") |
||||
} |
||||
|
||||
// SetKey set the bit in the Bitmap for the given cosigner
|
||||
func (m *Mask) SetKey(public *bls.PublicKey, enable bool) error { |
||||
for i, key := range m.publics { |
||||
if key.IsEqual(public) { |
||||
return m.SetBit(i, enable) |
||||
} |
||||
} |
||||
return errors.New("key not found") |
||||
} |
||||
|
||||
// CountEnabled returns the number of enabled nodes in the CoSi participation
|
||||
// Bitmap.
|
||||
func (m *Mask) CountEnabled() int { |
||||
// hw is hamming weight
|
||||
hw := 0 |
||||
for i := range m.publics { |
||||
byt := i >> 3 |
||||
msk := byte(1) << uint(i&7) |
||||
if (m.Bitmap[byt] & msk) != 0 { |
||||
hw++ |
||||
} |
||||
} |
||||
return hw |
||||
} |
||||
|
||||
// CountTotal returns the total number of nodes this CoSi instance knows.
|
||||
func (m *Mask) CountTotal() int { |
||||
return len(m.publics) |
||||
} |
||||
|
||||
// AggregateMasks computes the bitwise OR of the two given participation masks.
|
||||
func AggregateMasks(a, b []byte) ([]byte, error) { |
||||
if len(a) != len(b) { |
||||
return nil, errors.New("mismatching Bitmap lengths") |
||||
} |
||||
m := make([]byte, len(a)) |
||||
for i := range m { |
||||
m[i] = a[i] | b[i] |
||||
} |
||||
return m, nil |
||||
} |
||||
|
||||
// Policy represents a fully customizable cosigning policy deciding what
|
||||
// cosigner sets are and aren't sufficient for a collective signature to be
|
||||
// considered acceptable to a verifier. The Check method may inspect the set of
|
||||
// participants that cosigned by invoking cosi.Mask and/or cosi.MaskBit, and may
|
||||
// use any other relevant contextual information (e.g., how security-critical
|
||||
// the operation relying on the collective signature is) in determining whether
|
||||
// the collective signature was produced by an acceptable set of cosigners.
|
||||
type Policy interface { |
||||
Check(m *Mask) bool |
||||
} |
||||
|
||||
// CompletePolicy is the default policy requiring that all participants have
|
||||
// cosigned to make a collective signature valid.
|
||||
type CompletePolicy struct { |
||||
} |
||||
|
||||
// Check verifies that all participants have contributed to a collective
|
||||
// signature.
|
||||
func (p CompletePolicy) Check(m *Mask) bool { |
||||
return m.CountEnabled() == m.CountTotal() |
||||
} |
||||
|
||||
// ThresholdPolicy allows to specify a simple t-of-n policy requring that at
|
||||
// least the given threshold number of participants t have cosigned to make a
|
||||
// collective signature valid.
|
||||
type ThresholdPolicy struct { |
||||
thold int |
||||
} |
||||
|
||||
// NewThresholdPolicy returns a new ThresholdPolicy with the given threshold.
|
||||
func NewThresholdPolicy(thold int) *ThresholdPolicy { |
||||
return &ThresholdPolicy{thold: thold} |
||||
} |
||||
|
||||
// Check verifies that at least a threshold number of participants have
|
||||
// contributed to a collective signature.
|
||||
func (p ThresholdPolicy) Check(m *Mask) bool { |
||||
return m.CountEnabled() >= p.thold |
||||
} |
@ -0,0 +1,38 @@ |
||||
package bls |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/harmony-one/bls/ffi/go/bls" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
// Test the basic functionality of a BLS multi-sig mask.
|
||||
func TestNewMask(test *testing.T) { |
||||
_, pubKey1 := utils.GenKeyBLS("127.0.0.1", "5555") |
||||
_, pubKey2 := utils.GenKeyBLS("127.0.0.1", "6666") |
||||
_, pubKey3 := utils.GenKeyBLS("127.0.0.1", "7777") |
||||
|
||||
mask, err := NewMask([]*bls.PublicKey{pubKey1, pubKey2, pubKey3}, pubKey1) |
||||
|
||||
if err != nil { |
||||
test.Errorf("Failed to create a new Mask: %s", err) |
||||
} |
||||
|
||||
if mask.Len() != 1 { |
||||
test.Errorf("Mask created with wrong size: %d", mask.Len()) |
||||
} |
||||
|
||||
enabled, err := mask.KeyEnabled(pubKey1) |
||||
if !enabled || err != nil { |
||||
test.Errorf("My key pubKey1 should have been enabled: %s", err) |
||||
} |
||||
|
||||
if mask.CountEnabled() != 1 { |
||||
test.Error("Only one key should have been enabled") |
||||
} |
||||
|
||||
if mask.CountTotal() != 3 { |
||||
test.Error("Should have a total of 3 keys") |
||||
} |
||||
} |
@ -0,0 +1,3 @@ |
||||
The beaconchain package currently is a centralized service that allocates every potential new node (uses newnode package) a specific shard. |
||||
If N is the number of shards, supplied as a parameter at bootup, then first N joining nodes are assigned to be the leaders of those N shards. The nodes that come after that are then assigned shards based on their order of entry. |
||||
In the future, the generation of randomness would be decentralized. Such randomness would be provided to a new node once its PoS has been verified and then the node would be able to calculate its own shard automatically. |
@ -1,242 +0,0 @@ |
||||
package db |
||||
|
||||
import ( |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/harmony-one/harmony/log" |
||||
"github.com/syndtr/goleveldb/leveldb" |
||||
"github.com/syndtr/goleveldb/leveldb/errors" |
||||
"github.com/syndtr/goleveldb/leveldb/filter" |
||||
"github.com/syndtr/goleveldb/leveldb/iterator" |
||||
"github.com/syndtr/goleveldb/leveldb/opt" |
||||
"github.com/syndtr/goleveldb/leveldb/util" |
||||
) |
||||
|
||||
// Constants for db which can be used to customize later.
|
||||
const ( |
||||
writePauseWarningThrottler = 1 * time.Minute |
||||
) |
||||
|
||||
// LDBDatabase is database based on leveldb.
|
||||
type LDBDatabase struct { |
||||
fn string // filename for reporting
|
||||
db *leveldb.DB // LevelDB instance
|
||||
|
||||
quitLock sync.Mutex // Mutex protecting the quit channel access
|
||||
quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
|
||||
|
||||
log log.Logger // Contextual logger tracking the database path
|
||||
} |
||||
|
||||
// NewLDBDatabase returns a LevelDB wrapped object.
|
||||
func NewLDBDatabase(file string, cache int, handles int) (*LDBDatabase, error) { |
||||
logger := log.New("database", file) |
||||
|
||||
// Ensure we have some minimal caching and file guarantees
|
||||
if cache < 16 { |
||||
cache = 16 |
||||
} |
||||
if handles < 16 { |
||||
handles = 16 |
||||
} |
||||
logger.Info("Allocated cache and file handles", "cache", cache, "handles", handles) |
||||
|
||||
// Open the db and recover any potential corruptions
|
||||
db, err := leveldb.OpenFile(file, &opt.Options{ |
||||
OpenFilesCacheCapacity: handles, |
||||
BlockCacheCapacity: cache / 2 * opt.MiB, |
||||
WriteBuffer: cache / 4 * opt.MiB, // Two of these are used internally
|
||||
Filter: filter.NewBloomFilter(10), |
||||
}) |
||||
if _, corrupted := err.(*errors.ErrCorrupted); corrupted { |
||||
db, err = leveldb.RecoverFile(file, nil) |
||||
} |
||||
// (Re)check for errors and abort if opening of the db failed
|
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &LDBDatabase{ |
||||
fn: file, |
||||
db: db, |
||||
log: logger, |
||||
}, nil |
||||
} |
||||
|
||||
// Path returns the path to the database directory.
|
||||
func (db *LDBDatabase) Path() string { |
||||
return db.fn |
||||
} |
||||
|
||||
// Put puts the given key / value to the queue
|
||||
func (db *LDBDatabase) Put(key []byte, value []byte) error { |
||||
return db.db.Put(key, value, nil) |
||||
} |
||||
|
||||
// Has is used to check if the given key is included into the database.
|
||||
func (db *LDBDatabase) Has(key []byte) (bool, error) { |
||||
return db.db.Has(key, nil) |
||||
} |
||||
|
||||
// Get returns the given key if it's present.
|
||||
func (db *LDBDatabase) Get(key []byte) ([]byte, error) { |
||||
dat, err := db.db.Get(key, nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return dat, nil |
||||
} |
||||
|
||||
// Delete deletes the key from the queue and database
|
||||
func (db *LDBDatabase) Delete(key []byte) error { |
||||
return db.db.Delete(key, nil) |
||||
} |
||||
|
||||
// NewIterator returns the current iterator of the db.
|
||||
func (db *LDBDatabase) NewIterator() iterator.Iterator { |
||||
return db.db.NewIterator(nil, nil) |
||||
} |
||||
|
||||
// NewIteratorWithPrefix returns a iterator to iterate over subset of database content with a particular prefix.
|
||||
func (db *LDBDatabase) NewIteratorWithPrefix(prefix []byte) iterator.Iterator { |
||||
return db.db.NewIterator(util.BytesPrefix(prefix), nil) |
||||
} |
||||
|
||||
// Close closes the database.
|
||||
func (db *LDBDatabase) Close() { |
||||
// Stop the metrics collection to avoid internal database races
|
||||
db.quitLock.Lock() |
||||
defer db.quitLock.Unlock() |
||||
|
||||
if db.quitChan != nil { |
||||
errc := make(chan error) |
||||
db.quitChan <- errc |
||||
if err := <-errc; err != nil { |
||||
db.log.Error("Metrics collection failed", "err", err) |
||||
} |
||||
db.quitChan = nil |
||||
} |
||||
err := db.db.Close() |
||||
if err == nil { |
||||
db.log.Info("Database closed") |
||||
} else { |
||||
db.log.Error("Failed to close database", "err", err) |
||||
} |
||||
} |
||||
|
||||
// LDB returns the pointer to leveldb on which the LDBDatabase is built.
|
||||
func (db *LDBDatabase) LDB() *leveldb.DB { |
||||
return db.db |
||||
} |
||||
|
||||
/* TODO(minhdoan): Might add meter func from ethereum-go repo |
||||
*/ |
||||
|
||||
// NewBatch returns Batch interface for a series of leveldb transactions.
|
||||
func (db *LDBDatabase) NewBatch() Batch { |
||||
return &ldbBatch{db: db.db, b: new(leveldb.Batch)} |
||||
} |
||||
|
||||
type ldbBatch struct { |
||||
db *leveldb.DB |
||||
b *leveldb.Batch |
||||
size int |
||||
} |
||||
|
||||
// Put is used to put key, value into the batch of transactions.
|
||||
func (b *ldbBatch) Put(key, value []byte) error { |
||||
b.b.Put(key, value) |
||||
b.size += len(value) |
||||
return nil |
||||
} |
||||
|
||||
// Delete is used to delete the item associated with the given key as a part of the batch.
|
||||
func (b *ldbBatch) Delete(key []byte) error { |
||||
b.b.Delete(key) |
||||
b.size++ |
||||
return nil |
||||
} |
||||
|
||||
// Write writes the patch of transactions.
|
||||
func (b *ldbBatch) Write() error { |
||||
return b.db.Write(b.b, nil) |
||||
} |
||||
|
||||
// ValueSize returns the size of the patch.
|
||||
func (b *ldbBatch) ValueSize() int { |
||||
return b.size |
||||
} |
||||
|
||||
// Reset resets the batch.
|
||||
func (b *ldbBatch) Reset() { |
||||
b.b.Reset() |
||||
b.size = 0 |
||||
} |
||||
|
||||
type table struct { |
||||
db Database |
||||
prefix string |
||||
} |
||||
|
||||
// NewTable returns a Database object that prefixes all keys with a given
|
||||
// string.
|
||||
func NewTable(db Database, prefix string) Database { |
||||
return &table{ |
||||
db: db, |
||||
prefix: prefix, |
||||
} |
||||
} |
||||
|
||||
func (dt *table) Put(key []byte, value []byte) error { |
||||
return dt.db.Put(append([]byte(dt.prefix), key...), value) |
||||
} |
||||
|
||||
func (dt *table) Has(key []byte) (bool, error) { |
||||
return dt.db.Has(append([]byte(dt.prefix), key...)) |
||||
} |
||||
|
||||
func (dt *table) Get(key []byte) ([]byte, error) { |
||||
return dt.db.Get(append([]byte(dt.prefix), key...)) |
||||
} |
||||
|
||||
func (dt *table) Delete(key []byte) error { |
||||
return dt.db.Delete(append([]byte(dt.prefix), key...)) |
||||
} |
||||
|
||||
func (dt *table) Close() { |
||||
// Do nothing; don't close the underlying DB.
|
||||
} |
||||
|
||||
type tableBatch struct { |
||||
batch Batch |
||||
prefix string |
||||
} |
||||
|
||||
// NewTableBatch returns a Batch object which prefixes all keys with a given string.
|
||||
func NewTableBatch(db Database, prefix string) Batch { |
||||
return &tableBatch{db.NewBatch(), prefix} |
||||
} |
||||
|
||||
func (dt *table) NewBatch() Batch { |
||||
return &tableBatch{dt.db.NewBatch(), dt.prefix} |
||||
} |
||||
|
||||
func (tb *tableBatch) Put(key, value []byte) error { |
||||
return tb.batch.Put(append([]byte(tb.prefix), key...), value) |
||||
} |
||||
|
||||
func (tb *tableBatch) Delete(key []byte) error { |
||||
return tb.batch.Delete(append([]byte(tb.prefix), key...)) |
||||
} |
||||
|
||||
func (tb *tableBatch) Write() error { |
||||
return tb.batch.Write() |
||||
} |
||||
|
||||
func (tb *tableBatch) ValueSize() int { |
||||
return tb.batch.ValueSize() |
||||
} |
||||
|
||||
func (tb *tableBatch) Reset() { |
||||
tb.batch.Reset() |
||||
} |
@ -1,194 +0,0 @@ |
||||
package db |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"os" |
||||
"strconv" |
||||
"sync" |
||||
"testing" |
||||
) |
||||
|
||||
func newTestLDB() (*LDBDatabase, func()) { |
||||
dirname, err := ioutil.TempDir(os.TempDir(), "db_test_") |
||||
if err != nil { |
||||
panic("failed to create test file: " + err.Error()) |
||||
} |
||||
db, err := NewLDBDatabase(dirname, 0, 0) |
||||
if err != nil { |
||||
panic("failed to create test database: " + err.Error()) |
||||
} |
||||
|
||||
return db, func() { |
||||
db.Close() |
||||
os.RemoveAll(dirname) |
||||
} |
||||
} |
||||
|
||||
var testValues = []string{"", "a", "1251", "\x00123\x00"} |
||||
|
||||
func TestLDB_PutGet(t *testing.T) { |
||||
db, remove := newTestLDB() |
||||
defer remove() |
||||
testPutGet(db, t) |
||||
} |
||||
|
||||
func TestMemoryDB_PutGet(t *testing.T) { |
||||
testPutGet(NewMemDatabase(), t) |
||||
} |
||||
|
||||
func testPutGet(db Database, t *testing.T) { |
||||
t.Parallel() |
||||
|
||||
for _, k := range testValues { |
||||
err := db.Put([]byte(k), nil) |
||||
if err != nil { |
||||
t.Fatalf("put failed: %v", err) |
||||
} |
||||
} |
||||
|
||||
for _, k := range testValues { |
||||
data, err := db.Get([]byte(k)) |
||||
if err != nil { |
||||
t.Fatalf("get failed: %v", err) |
||||
} |
||||
if len(data) != 0 { |
||||
t.Fatalf("get returned wrong result, got %q expected nil", string(data)) |
||||
} |
||||
} |
||||
|
||||
_, err := db.Get([]byte("non-exist-key")) |
||||
if err == nil { |
||||
t.Fatalf("expect to return a not found error") |
||||
} |
||||
|
||||
for _, v := range testValues { |
||||
err := db.Put([]byte(v), []byte(v)) |
||||
if err != nil { |
||||
t.Fatalf("put failed: %v", err) |
||||
} |
||||
} |
||||
|
||||
for _, v := range testValues { |
||||
data, err := db.Get([]byte(v)) |
||||
if err != nil { |
||||
t.Fatalf("get failed: %v", err) |
||||
} |
||||
if !bytes.Equal(data, []byte(v)) { |
||||
t.Fatalf("get returned wrong result, got %q expected %q", string(data), v) |
||||
} |
||||
} |
||||
|
||||
for _, v := range testValues { |
||||
err := db.Put([]byte(v), []byte("?")) |
||||
if err != nil { |
||||
t.Fatalf("put override failed: %v", err) |
||||
} |
||||
} |
||||
|
||||
for _, v := range testValues { |
||||
data, err := db.Get([]byte(v)) |
||||
if err != nil { |
||||
t.Fatalf("get failed: %v", err) |
||||
} |
||||
if !bytes.Equal(data, []byte("?")) { |
||||
t.Fatalf("get returned wrong result, got %q expected ?", string(data)) |
||||
} |
||||
} |
||||
|
||||
for _, v := range testValues { |
||||
orig, err := db.Get([]byte(v)) |
||||
if err != nil { |
||||
t.Fatalf("get failed: %v", err) |
||||
} |
||||
orig[0] = byte(0xff) |
||||
data, err := db.Get([]byte(v)) |
||||
if err != nil { |
||||
t.Fatalf("get failed: %v", err) |
||||
} |
||||
if !bytes.Equal(data, []byte("?")) { |
||||
t.Fatalf("get returned wrong result, got %q expected ?", string(data)) |
||||
} |
||||
} |
||||
|
||||
for _, v := range testValues { |
||||
err := db.Delete([]byte(v)) |
||||
if err != nil { |
||||
t.Fatalf("delete %q failed: %v", v, err) |
||||
} |
||||
} |
||||
|
||||
for _, v := range testValues { |
||||
_, err := db.Get([]byte(v)) |
||||
if err == nil { |
||||
t.Fatalf("got deleted value %q", v) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestLDB_ParallelPutGet(t *testing.T) { |
||||
db, remove := newTestLDB() |
||||
defer remove() |
||||
testParallelPutGet(db, t) |
||||
} |
||||
|
||||
func TestMemoryDB_ParallelPutGet(t *testing.T) { |
||||
testParallelPutGet(NewMemDatabase(), t) |
||||
} |
||||
|
||||
func testParallelPutGet(db Database, t *testing.T) { |
||||
const n = 8 |
||||
var pending sync.WaitGroup |
||||
|
||||
pending.Add(n) |
||||
for i := 0; i < n; i++ { |
||||
go func(key string) { |
||||
defer pending.Done() |
||||
err := db.Put([]byte(key), []byte("v"+key)) |
||||
if err != nil { |
||||
panic("put failed: " + err.Error()) |
||||
} |
||||
}(strconv.Itoa(i)) |
||||
} |
||||
pending.Wait() |
||||
|
||||
pending.Add(n) |
||||
for i := 0; i < n; i++ { |
||||
go func(key string) { |
||||
defer pending.Done() |
||||
data, err := db.Get([]byte(key)) |
||||
if err != nil { |
||||
panic("get failed: " + err.Error()) |
||||
} |
||||
if !bytes.Equal(data, []byte("v"+key)) { |
||||
panic(fmt.Sprintf("get failed, got %q expected %q", []byte(data), []byte("v"+key))) |
||||
} |
||||
}(strconv.Itoa(i)) |
||||
} |
||||
pending.Wait() |
||||
|
||||
pending.Add(n) |
||||
for i := 0; i < n; i++ { |
||||
go func(key string) { |
||||
defer pending.Done() |
||||
err := db.Delete([]byte(key)) |
||||
if err != nil { |
||||
panic("delete failed: " + err.Error()) |
||||
} |
||||
}(strconv.Itoa(i)) |
||||
} |
||||
pending.Wait() |
||||
|
||||
pending.Add(n) |
||||
for i := 0; i < n; i++ { |
||||
go func(key string) { |
||||
defer pending.Done() |
||||
_, err := db.Get([]byte(key)) |
||||
if err == nil { |
||||
panic("get succeeded") |
||||
} |
||||
}(strconv.Itoa(i)) |
||||
} |
||||
pending.Wait() |
||||
} |
@ -1,36 +0,0 @@ |
||||
package db |
||||
|
||||
// IdealBatchSize is the max size of batch transactions.
|
||||
// The value was determined empirically.
|
||||
const IdealBatchSize = 100 * 1024 |
||||
|
||||
// Putter wraps the database write operation supported by both batches and regular databases.
|
||||
type Putter interface { |
||||
Put(key []byte, value []byte) error |
||||
} |
||||
|
||||
// Deleter wraps the database delete operation supported by both batches and regular databases.
|
||||
type Deleter interface { |
||||
Delete(key []byte) error |
||||
} |
||||
|
||||
// Database wraps all database operations. All methods are safe for concurrent use.
|
||||
type Database interface { |
||||
Putter |
||||
Deleter |
||||
Get(key []byte) ([]byte, error) |
||||
Has(key []byte) (bool, error) |
||||
Close() |
||||
NewBatch() Batch |
||||
} |
||||
|
||||
// Batch is a write-only database that commits changes to its host database
|
||||
// when Write is called. Batch cannot be used concurrently.
|
||||
type Batch interface { |
||||
Putter |
||||
Deleter |
||||
ValueSize() int // amount of data in the batch
|
||||
Write() error |
||||
// Reset resets the batch for reuse
|
||||
Reset() |
||||
} |
@ -1,135 +0,0 @@ |
||||
package db |
||||
|
||||
import ( |
||||
"errors" |
||||
"sync" |
||||
|
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
// MemDatabase is the test memory database. It won't be used for any production.
|
||||
type MemDatabase struct { |
||||
db map[string][]byte |
||||
lock sync.RWMutex |
||||
} |
||||
|
||||
// NewMemDatabase returns a pointer of the new creation of MemDatabase.
|
||||
func NewMemDatabase() *MemDatabase { |
||||
return &MemDatabase{ |
||||
db: make(map[string][]byte), |
||||
} |
||||
} |
||||
|
||||
// NewMemDatabaseWithCap returns a pointer of the new creation of MemDatabase with the given size.
|
||||
func NewMemDatabaseWithCap(size int) *MemDatabase { |
||||
return &MemDatabase{ |
||||
db: make(map[string][]byte, size), |
||||
} |
||||
} |
||||
|
||||
// Put puts (key, value) item into MemDatabase.
|
||||
func (db *MemDatabase) Put(key []byte, value []byte) error { |
||||
db.lock.Lock() |
||||
defer db.lock.Unlock() |
||||
|
||||
db.db[string(key)] = utils.CopyBytes(value) |
||||
return nil |
||||
} |
||||
|
||||
// Has checks if the key is included into MemDatabase.
|
||||
func (db *MemDatabase) Has(key []byte) (bool, error) { |
||||
db.lock.RLock() |
||||
defer db.lock.RUnlock() |
||||
|
||||
_, ok := db.db[string(key)] |
||||
return ok, nil |
||||
} |
||||
|
||||
// Get gets value of the given key.
|
||||
func (db *MemDatabase) Get(key []byte) ([]byte, error) { |
||||
db.lock.RLock() |
||||
defer db.lock.RUnlock() |
||||
|
||||
if entry, ok := db.db[string(key)]; ok { |
||||
return utils.CopyBytes(entry), nil |
||||
} |
||||
return nil, errors.New("not found") |
||||
} |
||||
|
||||
// Keys returns all keys of the given MemDatabase.
|
||||
func (db *MemDatabase) Keys() [][]byte { |
||||
db.lock.RLock() |
||||
defer db.lock.RUnlock() |
||||
|
||||
keys := [][]byte{} |
||||
for key := range db.db { |
||||
keys = append(keys, []byte(key)) |
||||
} |
||||
return keys |
||||
} |
||||
|
||||
// Delete deletes the given key.
|
||||
func (db *MemDatabase) Delete(key []byte) error { |
||||
db.lock.Lock() |
||||
defer db.lock.Unlock() |
||||
|
||||
delete(db.db, string(key)) |
||||
return nil |
||||
} |
||||
|
||||
// Close closes the given db.
|
||||
func (db *MemDatabase) Close() {} |
||||
|
||||
// NewBatch returns a batch of MemDatabase transactions.
|
||||
func (db *MemDatabase) NewBatch() Batch { |
||||
return &memBatch{db: db} |
||||
} |
||||
|
||||
// Len returns the length of the given db.
|
||||
func (db *MemDatabase) Len() int { return len(db.db) } |
||||
|
||||
type kv struct { |
||||
k, v []byte |
||||
del bool |
||||
} |
||||
|
||||
type memBatch struct { |
||||
db *MemDatabase |
||||
writes []kv |
||||
size int |
||||
} |
||||
|
||||
func (b *memBatch) Put(key, value []byte) error { |
||||
b.writes = append(b.writes, kv{utils.CopyBytes(key), utils.CopyBytes(value), false}) |
||||
b.size += len(value) |
||||
return nil |
||||
} |
||||
|
||||
func (b *memBatch) Delete(key []byte) error { |
||||
b.writes = append(b.writes, kv{utils.CopyBytes(key), nil, true}) |
||||
b.size++ |
||||
return nil |
||||
} |
||||
|
||||
func (b *memBatch) Write() error { |
||||
b.db.lock.Lock() |
||||
defer b.db.lock.Unlock() |
||||
|
||||
for _, kv := range b.writes { |
||||
if kv.del { |
||||
delete(b.db.db, string(kv.k)) |
||||
continue |
||||
} |
||||
b.db.db[string(kv.k)] = kv.v |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (b *memBatch) ValueSize() int { |
||||
return b.size |
||||
} |
||||
|
||||
func (b *memBatch) Reset() { |
||||
b.writes = b.writes[:0] |
||||
b.size = 0 |
||||
} |
@ -0,0 +1 @@ |
||||
Newnode package is for handling the interactions of a new candidate node that wants to join the network. Such interaction at the moment is about contacting the beaconchain and getting assigned a shard and findingout the shardleader. In future this package will be merged into the node package. |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue