Merge branch 'master' of github.com:harmony-one/harmony into fixNewbinary

pull/437/head
ak 6 years ago
commit f265aa59ba
  1. 36
      api/service/discovery/service.go
  2. 17
      api/service/networkinfo/service.go
  3. 8
      cmd/harmony.go
  4. 39
      consensus/consensus.go
  5. 31
      consensus/consensus_leader.go
  6. 13
      consensus/consensus_validator.go
  7. 2
      consensus/engine/consensus_engine.go
  8. 2
      consensus/engine/errors.go
  9. 10
      core/block_validator.go
  10. 30
      core/blockchain.go
  11. 12
      core/chain_makers.go
  12. 4
      core/evm.go
  13. 10
      core/headerchain.go
  14. 5
      core/resharding.go
  15. 6
      core/state_processor.go
  16. 4
      crypto/vrf/p256/p256_test.go
  17. 5
      drand/drand.go
  18. 12
      drand/drand_leader.go
  19. 4
      internal/utils/singleton.go
  20. 9
      node/node.go
  21. 93
      node/node_handler.go
  22. 4
      node/node_newblock.go
  23. 6
      node/worker/worker.go
  24. 6
      p2p/host/hostv2/hostv2.go
  25. 6
      test/cal_tps.sh
  26. 12
      test/configs/local_config2.txt
  27. 32
      test/configs/oneshard.txt
  28. 5
      test/configs/oneshard1.txt
  29. 2
      test/deploy.sh
  30. 4
      test/p2pchat/chat.go
  31. 4
      test/testdata/directory.json
  32. 782
      test/testdata/getentryresponse.json

@ -1,6 +1,8 @@
package discovery package discovery
import ( import (
"time"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
proto_discovery "github.com/harmony-one/harmony/api/proto/discovery" proto_discovery "github.com/harmony-one/harmony/api/proto/discovery"
"github.com/harmony-one/harmony/p2p" "github.com/harmony-one/harmony/p2p"
@ -46,9 +48,14 @@ func (s *Service) StopService() {
// Run is the main function of the service // Run is the main function of the service
func (s *Service) Run() { func (s *Service) Run() {
go s.contactP2pPeers() go s.contactP2pPeers()
// go s.pingPeer()
} }
func (s *Service) contactP2pPeers() { func (s *Service) contactP2pPeers() {
tick := time.NewTicker(5 * time.Second)
ping := proto_discovery.NewPingMessage(s.host.GetSelfPeer())
buffer := ping.ConstructPingMessage()
content := host.ConstructP2pMessage(byte(0), buffer)
for { for {
select { select {
case peer, ok := <-s.peerChan: case peer, ok := <-s.peerChan:
@ -62,9 +69,17 @@ func (s *Service) contactP2pPeers() {
log.Debug("[DISCOVERY]", "add outgoing peer", peer) log.Debug("[DISCOVERY]", "add outgoing peer", peer)
// TODO: stop ping if pinged before // TODO: stop ping if pinged before
// TODO: call staking servcie here if it is a new node // TODO: call staking servcie here if it is a new node
s.pingPeer(peer) if s.stakingChan != nil {
s.stakingChan <- peer
}
case <-s.stopChan: case <-s.stopChan:
log.Debug("[DISCOVERY] stop")
return return
case <-tick.C:
err := s.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, content)
if err != nil {
log.Error("Failed to send ping message", "group", p2p.GroupIDBeacon)
}
} }
} }
} }
@ -74,19 +89,24 @@ func (s *Service) Init() {
log.Info("Init discovery service") log.Info("Init discovery service")
} }
func (s *Service) pingPeer(peer p2p.Peer) { func (s *Service) pingPeer() {
tick := time.NewTicker(5 * time.Second)
ping := proto_discovery.NewPingMessage(s.host.GetSelfPeer()) ping := proto_discovery.NewPingMessage(s.host.GetSelfPeer())
buffer := ping.ConstructPingMessage() buffer := ping.ConstructPingMessage()
content := host.ConstructP2pMessage(byte(0), buffer) content := host.ConstructP2pMessage(byte(0), buffer)
// s.host.SendMessage(peer, content)
// log.Debug("Sent Ping Message via unicast to", "peer", peer) for {
select {
case <-tick.C:
err := s.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, content) err := s.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, content)
if err != nil { if err != nil {
log.Error("Failed to send ping message", "group", p2p.GroupIDBeacon) log.Error("Failed to send ping message", "group", p2p.GroupIDBeacon)
} else {
log.Debug("[PING] sent Ping Message via group send to", "peer", peer)
} }
if s.stakingChan != nil { case <-s.stopChan:
s.stakingChan <- peer log.Info("Stop sending ping message")
return
}
} }
// s.host.SendMessage(peer, content)
// log.Debug("Sent Ping Message via unicast to", "peer", peer)
} }

@ -28,7 +28,6 @@ type Service struct {
peerChan chan p2p.Peer peerChan chan p2p.Peer
peerInfo <-chan peerstore.PeerInfo peerInfo <-chan peerstore.PeerInfo
discovery *libp2pdis.RoutingDiscovery discovery *libp2pdis.RoutingDiscovery
lock sync.Mutex
} }
// New returns role conversion service. // New returns role conversion service.
@ -49,7 +48,6 @@ func New(h p2p.Host, rendezvous string, peerChan chan p2p.Peer) *Service {
stopChan: make(chan struct{}), stopChan: make(chan struct{}),
stoppedChan: make(chan struct{}), stoppedChan: make(chan struct{}),
peerChan: peerChan, peerChan: peerChan,
peerInfo: make(<-chan peerstore.PeerInfo),
} }
} }
@ -91,8 +89,6 @@ func (s *Service) Init() error {
libp2pdis.Advertise(s.ctx, s.discovery, s.Rendezvous) libp2pdis.Advertise(s.ctx, s.discovery, s.Rendezvous)
utils.GetLogInstance().Info("Successfully announced!") utils.GetLogInstance().Info("Successfully announced!")
go s.DoService()
return nil return nil
} }
@ -104,26 +100,22 @@ func (s *Service) Run() {
if err != nil { if err != nil {
utils.GetLogInstance().Error("FindPeers", "error", err) utils.GetLogInstance().Error("FindPeers", "error", err)
} }
go s.DoService()
} }
// DoService does network info. // DoService does network info.
func (s *Service) DoService() { func (s *Service) DoService() {
for { for {
select { select {
case peer, ok := <-s.peerInfo: case peer := <-s.peerInfo:
if !ok {
utils.GetLogInstance().Debug("no more peer info", "peer", peer.ID)
return
}
if peer.ID != s.Host.GetP2PHost().ID() && len(peer.ID) > 0 { if peer.ID != s.Host.GetP2PHost().ID() && len(peer.ID) > 0 {
utils.GetLogInstance().Info("Found Peer", "peer", peer.ID, "addr", peer.Addrs, "my ID", s.Host.GetP2PHost().ID()) utils.GetLogInstance().Info("Found Peer", "peer", peer.ID, "addr", peer.Addrs, "my ID", s.Host.GetP2PHost().ID())
s.lock.Lock()
if err := s.Host.GetP2PHost().Connect(s.ctx, peer); err != nil { if err := s.Host.GetP2PHost().Connect(s.ctx, peer); err != nil {
utils.GetLogInstance().Warn("can't connect to peer node", "error", err) utils.GetLogInstance().Warn("can't connect to peer node", "error", err)
} else { } else {
utils.GetLogInstance().Info("connected to peer node", "peer", peer) utils.GetLogInstance().Info("connected to peer node", "peer", peer)
} }
s.lock.Unlock()
// figure out the public ip/port // figure out the public ip/port
ip := "127.0.0.1" ip := "127.0.0.1"
var port string var port string
@ -143,11 +135,10 @@ func (s *Service) DoService() {
utils.GetLogInstance().Info("Notify peerChan", "peer", p) utils.GetLogInstance().Info("Notify peerChan", "peer", p)
s.peerChan <- p s.peerChan <- p
} }
case <-s.ctx.Done(): case <-s.stopChan:
return return
} }
} }
} }
// StopService stops network info service. // StopService stops network info service.

@ -199,6 +199,7 @@ func main() {
// Attack determination. // Attack determination.
attack.GetInstance().SetAttackEnabled(attackDetermination(*attackedMode)) attack.GetInstance().SetAttackEnabled(attackDetermination(*attackedMode))
} }
utils.UseLibP2P = false
} else { } else {
if *isLeader { if *isLeader {
role = "leader" role = "leader"
@ -206,6 +207,7 @@ func main() {
} else { } else {
role = "validator" role = "validator"
} }
utils.UseLibP2P = true
} }
// Init logging. // Init logging.
loggingInit(*logFolder, role, *ip, *port, *onlyLogTps) loggingInit(*logFolder, role, *ip, *port, *onlyLogTps)
@ -264,7 +266,9 @@ func main() {
// Add randomness protocol // Add randomness protocol
// TODO: enable drand only for beacon chain // TODO: enable drand only for beacon chain
// TODO: put this in a better place other than main.
dRand := drand.New(host, shardID, peers, leader, currentNode.ConfirmedBlockChannel) dRand := drand.New(host, shardID, peers, leader, currentNode.ConfirmedBlockChannel)
currentNode.Consensus.RegisterPRndChannel(dRand.PRndChannel)
currentNode.DRand = dRand currentNode.DRand = dRand
// If there is a client configured in the node list. // If there is a client configured in the node list.
@ -284,7 +288,9 @@ func main() {
go currentNode.JoinShard(leader) go currentNode.JoinShard(leader)
} }
} else { } else {
currentNode.UseLibP2P = true if consensus.IsLeader {
go currentNode.SendPongMessage()
}
} }
go currentNode.SupportSyncing() go currentNode.SupportSyncing()

@ -18,6 +18,7 @@ import (
protobuf "github.com/golang/protobuf/proto" protobuf "github.com/golang/protobuf/proto"
"github.com/harmony-one/bls/ffi/go/bls" "github.com/harmony-one/bls/ffi/go/bls"
consensus_proto "github.com/harmony-one/harmony/api/consensus" consensus_proto "github.com/harmony-one/harmony/api/consensus"
consensus_engine "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core/state" "github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
bls_cosi "github.com/harmony-one/harmony/crypto/bls" bls_cosi "github.com/harmony-one/harmony/crypto/bls"
@ -96,6 +97,9 @@ type Consensus struct {
// verified block to state sync broadcast // verified block to state sync broadcast
VerifiedNewBlock chan *types.Block VerifiedNewBlock chan *types.Block
// Channel for DRG protocol to send pRnd (preimage of randomness resulting from combined vrf randomnesses) to consensus. The first 32 bytes are randomness, the rest is for bitmap.
PRndChannel chan []byte
uniqueIDInstance *utils.UniqueValidatorID uniqueIDInstance *utils.UniqueValidatorID
// The p2p host used to send/receive p2p messages // The p2p host used to send/receive p2p messages
@ -213,6 +217,11 @@ func New(host p2p.Host, ShardID string, peers []p2p.Peer, leader p2p.Peer) *Cons
return &consensus return &consensus
} }
// RegisterPRndChannel registers the channel for receiving randomness preimage from DRG protocol
func (consensus *Consensus) RegisterPRndChannel(pRndChannel chan []byte) {
consensus.PRndChannel = pRndChannel
}
// Checks the basic meta of a consensus message, including the signature. // Checks the basic meta of a consensus message, including the signature.
func (consensus *Consensus) checkConsensusMessage(message consensus_proto.Message, publicKey *bls.PublicKey) error { func (consensus *Consensus) checkConsensusMessage(message consensus_proto.Message, publicKey *bls.PublicKey) error {
consensusID := message.ConsensusId consensusID := message.ConsensusId
@ -222,18 +231,18 @@ func (consensus *Consensus) checkConsensusMessage(message consensus_proto.Messag
err := verifyMessageSig(publicKey, message) err := verifyMessageSig(publicKey, message)
if err != nil { if err != nil {
utils.GetLogInstance().Warn("Failed to verify the message signature", "Error", err) utils.GetLogInstance().Warn("Failed to verify the message signature", "Error", err)
return ErrInvalidConsensusMessage return consensus_engine.ErrInvalidConsensusMessage
} }
// check consensus Id // check consensus Id
if consensusID != consensus.consensusID { if consensusID != consensus.consensusID {
utils.GetLogInstance().Warn("Wrong consensus Id", "myConsensusId", consensus.consensusID, "theirConsensusId", consensusID, "consensus", consensus) utils.GetLogInstance().Warn("Wrong consensus Id", "myConsensusId", consensus.consensusID, "theirConsensusId", consensusID, "consensus", consensus)
return ErrConsensusIDNotMatch return consensus_engine.ErrConsensusIDNotMatch
} }
if !bytes.Equal(blockHash, consensus.blockHash[:]) { if !bytes.Equal(blockHash, consensus.blockHash[:]) {
utils.GetLogInstance().Warn("Wrong blockHash", "consensus", consensus) utils.GetLogInstance().Warn("Wrong blockHash", "consensus", consensus)
return ErrInvalidConsensusMessage return consensus_engine.ErrInvalidConsensusMessage
} }
return nil return nil
} }
@ -379,7 +388,7 @@ func (consensus *Consensus) AddPeers(peers []*p2p.Peer) int {
consensus.pubKeyLock.Lock() consensus.pubKeyLock.Lock()
consensus.PublicKeys = append(consensus.PublicKeys, peer.PubKey) consensus.PublicKeys = append(consensus.PublicKeys, peer.PubKey)
consensus.pubKeyLock.Unlock() consensus.pubKeyLock.Unlock()
utils.GetLogInstance().Debug("[SYNC]", "new peer added", peer) // utils.GetLogInstance().Debug("[SYNC]", "new peer added", peer)
} }
count++ count++
} }
@ -432,8 +441,12 @@ func (consensus *Consensus) RemovePeers(peers []p2p.Peer) int {
pong := proto_discovery.NewPongMessage(validators, consensus.PublicKeys) pong := proto_discovery.NewPongMessage(validators, consensus.PublicKeys)
buffer := pong.ConstructPongMessage() buffer := pong.ConstructPongMessage()
if utils.UseLibP2P {
consensus.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, buffer)
} else {
host.BroadcastMessageFromLeader(consensus.host, validators, buffer, consensus.OfflinePeers) host.BroadcastMessageFromLeader(consensus.host, validators, buffer, consensus.OfflinePeers)
} }
}
return count2 return count2
} }
@ -479,7 +492,7 @@ func NewFaker() *Consensus {
// VerifyHeader checks whether a header conforms to the consensus rules of the // VerifyHeader checks whether a header conforms to the consensus rules of the
// stock bft engine. // stock bft engine.
func (consensus *Consensus) VerifyHeader(chain ChainReader, header *types.Header, seal bool) error { func (consensus *Consensus) VerifyHeader(chain consensus_engine.ChainReader, header *types.Header, seal bool) error {
// TODO: implement this // TODO: implement this
return nil return nil
} }
@ -487,7 +500,7 @@ func (consensus *Consensus) VerifyHeader(chain ChainReader, header *types.Header
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
// concurrently. The method returns a quit channel to abort the operations and // concurrently. The method returns a quit channel to abort the operations and
// a results channel to retrieve the async verifications. // a results channel to retrieve the async verifications.
func (consensus *Consensus) VerifyHeaders(chain ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { func (consensus *Consensus) VerifyHeaders(chain consensus_engine.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
abort, results := make(chan struct{}), make(chan error, len(headers)) abort, results := make(chan struct{}), make(chan error, len(headers))
for i := 0; i < len(headers); i++ { for i := 0; i < len(headers); i++ {
results <- nil results <- nil
@ -495,7 +508,7 @@ func (consensus *Consensus) VerifyHeaders(chain ChainReader, headers []*types.He
return abort, results return abort, results
} }
func (consensus *Consensus) verifyHeaderWorker(chain ChainReader, headers []*types.Header, seals []bool, index int) error { func (consensus *Consensus) verifyHeaderWorker(chain consensus_engine.ChainReader, headers []*types.Header, seals []bool, index int) error {
var parent *types.Header var parent *types.Header
if index == 0 { if index == 0 {
parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1) parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1)
@ -503,7 +516,7 @@ func (consensus *Consensus) verifyHeaderWorker(chain ChainReader, headers []*typ
parent = headers[index-1] parent = headers[index-1]
} }
if parent == nil { if parent == nil {
return ErrUnknownAncestor return consensus_engine.ErrUnknownAncestor
} }
if chain.GetHeader(headers[index].Hash(), headers[index].Number.Uint64()) != nil { if chain.GetHeader(headers[index].Hash(), headers[index].Number.Uint64()) != nil {
return nil // known block return nil // known block
@ -513,19 +526,19 @@ func (consensus *Consensus) verifyHeaderWorker(chain ChainReader, headers []*typ
// verifyHeader checks whether a header conforms to the consensus rules of the // verifyHeader checks whether a header conforms to the consensus rules of the
// stock bft engine. // stock bft engine.
func (consensus *Consensus) verifyHeader(chain ChainReader, header, parent *types.Header, uncle bool, seal bool) error { func (consensus *Consensus) verifyHeader(chain consensus_engine.ChainReader, header, parent *types.Header, uncle bool, seal bool) error {
return nil return nil
} }
// VerifySeal implements consensus.Engine, checking whether the given block satisfies // VerifySeal implements consensus.Engine, checking whether the given block satisfies
// the PoW difficulty requirements. // the PoW difficulty requirements.
func (consensus *Consensus) VerifySeal(chain ChainReader, header *types.Header) error { func (consensus *Consensus) VerifySeal(chain consensus_engine.ChainReader, header *types.Header) error {
return nil return nil
} }
// Finalize implements consensus.Engine, accumulating the block and uncle rewards, // Finalize implements consensus.Engine, accumulating the block and uncle rewards,
// setting the final state and assembling the block. // setting the final state and assembling the block.
func (consensus *Consensus) Finalize(chain ChainReader, header *types.Header, state *state.DB, txs []*types.Transaction, receipts []*types.Receipt) (*types.Block, error) { func (consensus *Consensus) Finalize(chain consensus_engine.ChainReader, header *types.Header, state *state.DB, txs []*types.Transaction, receipts []*types.Receipt) (*types.Block, error) {
// Accumulate any block and uncle rewards and commit the final state root // Accumulate any block and uncle rewards and commit the final state root
// Header seems complete, assemble into a block and return // Header seems complete, assemble into a block and return
accumulateRewards(chain.Config(), state, header) accumulateRewards(chain.Config(), state, header)
@ -556,14 +569,14 @@ func (consensus *Consensus) SealHash(header *types.Header) (hash common.Hash) {
} }
// Seal is to seal final block. // Seal is to seal final block.
func (consensus *Consensus) Seal(chain ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { func (consensus *Consensus) Seal(chain consensus_engine.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
// TODO: implement final block sealing // TODO: implement final block sealing
return nil return nil
} }
// Prepare is to prepare ... // Prepare is to prepare ...
// TODO(RJ): fix it. // TODO(RJ): fix it.
func (consensus *Consensus) Prepare(chain ChainReader, header *types.Header) error { func (consensus *Consensus) Prepare(chain consensus_engine.ChainReader, header *types.Header) error {
// TODO: implement prepare method // TODO: implement prepare method
return nil return nil
} }

@ -5,6 +5,8 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/harmony-one/harmony/core"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
protobuf "github.com/golang/protobuf/proto" protobuf "github.com/golang/protobuf/proto"
"github.com/harmony-one/bls/ffi/go/bls" "github.com/harmony-one/bls/ffi/go/bls"
@ -14,6 +16,7 @@ import (
bls_cosi "github.com/harmony-one/harmony/crypto/bls" bls_cosi "github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/profiler" "github.com/harmony-one/harmony/internal/profiler"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/p2p/host" "github.com/harmony-one/harmony/p2p/host"
) )
@ -47,6 +50,19 @@ func (consensus *Consensus) WaitForNewBlock(blockChannel chan *types.Block, stop
time.Sleep(waitForEnoughValidators * time.Millisecond) time.Sleep(waitForEnoughValidators * time.Millisecond)
} }
if core.IsEpochBlock(newBlock) {
// Receive pRnd from DRG protocol
utils.GetLogInstance().Debug("[DRG] Waiting for pRnd")
pRndAndBitmap := <-consensus.PRndChannel
utils.GetLogInstance().Debug("[DRG] GOT pRnd", "pRnd", pRndAndBitmap)
pRnd := pRndAndBitmap[:32]
bitmap := pRndAndBitmap[32:]
vrfBitmap, _ := bls_cosi.NewMask(consensus.PublicKeys, consensus.leader.PubKey)
vrfBitmap.SetMask(bitmap)
// TODO: check validity of pRnd
_ = pRnd
}
startTime = time.Now() startTime = time.Now()
utils.GetLogInstance().Debug("STARTING CONSENSUS", "numTxs", len(newBlock.Transactions()), "consensus", consensus, "startTime", startTime, "publicKeys", len(consensus.PublicKeys)) utils.GetLogInstance().Debug("STARTING CONSENSUS", "numTxs", len(newBlock.Transactions()), "consensus", consensus, "startTime", startTime, "publicKeys", len(consensus.PublicKeys))
for { // Wait until last consensus is finished for { // Wait until last consensus is finished
@ -107,7 +123,12 @@ func (consensus *Consensus) startConsensus(newBlock *types.Block) {
// Leader sign the block hash itself // Leader sign the block hash itself
consensus.prepareSigs[consensus.nodeID] = consensus.priKey.SignHash(consensus.blockHash[:]) consensus.prepareSigs[consensus.nodeID] = consensus.priKey.SignHash(consensus.blockHash[:])
if utils.UseLibP2P {
// Construct broadcast p2p message
consensus.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, host.ConstructP2pMessage(byte(17), msgToSend))
} else {
host.BroadcastMessageFromLeader(consensus.host, consensus.GetValidatorPeers(), msgToSend, consensus.OfflinePeers) host.BroadcastMessageFromLeader(consensus.host, consensus.GetValidatorPeers(), msgToSend, consensus.OfflinePeers)
}
} }
// processPrepareMessage processes the prepare message sent from validators // processPrepareMessage processes the prepare message sent from validators
@ -164,7 +185,12 @@ func (consensus *Consensus) processPrepareMessage(message consensus_proto.Messag
// Construct and broadcast prepared message // Construct and broadcast prepared message
msgToSend, aggSig := consensus.constructPreparedMessage() msgToSend, aggSig := consensus.constructPreparedMessage()
consensus.aggregatedPrepareSig = aggSig consensus.aggregatedPrepareSig = aggSig
if utils.UseLibP2P {
consensus.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, host.ConstructP2pMessage(byte(17), msgToSend))
} else {
host.BroadcastMessageFromLeader(consensus.host, consensus.GetValidatorPeers(), msgToSend, consensus.OfflinePeers) host.BroadcastMessageFromLeader(consensus.host, consensus.GetValidatorPeers(), msgToSend, consensus.OfflinePeers)
}
// Set state to targetState // Set state to targetState
consensus.state = targetState consensus.state = targetState
@ -230,7 +256,12 @@ func (consensus *Consensus) processCommitMessage(message consensus_proto.Message
// Construct and broadcast committed message // Construct and broadcast committed message
msgToSend, aggSig := consensus.constructCommittedMessage() msgToSend, aggSig := consensus.constructCommittedMessage()
consensus.aggregatedCommitSig = aggSig consensus.aggregatedCommitSig = aggSig
if utils.UseLibP2P {
consensus.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, host.ConstructP2pMessage(byte(17), msgToSend))
} else {
host.BroadcastMessageFromLeader(consensus.host, consensus.GetValidatorPeers(), msgToSend, consensus.OfflinePeers) host.BroadcastMessageFromLeader(consensus.host, consensus.GetValidatorPeers(), msgToSend, consensus.OfflinePeers)
}
var blockObj types.Block var blockObj types.Block
err := rlp.DecodeBytes(consensus.block, &blockObj) err := rlp.DecodeBytes(consensus.block, &blockObj)

@ -3,10 +3,13 @@ package consensus
import ( import (
"github.com/harmony-one/bls/ffi/go/bls" "github.com/harmony-one/bls/ffi/go/bls"
bls_cosi "github.com/harmony-one/harmony/crypto/bls" bls_cosi "github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/p2p/host"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
protobuf "github.com/golang/protobuf/proto" protobuf "github.com/golang/protobuf/proto"
consensus_proto "github.com/harmony-one/harmony/api/consensus" consensus_proto "github.com/harmony-one/harmony/api/consensus"
consensus_engine "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/attack" "github.com/harmony-one/harmony/internal/attack"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
@ -74,7 +77,7 @@ func (consensus *Consensus) processAnnounceMessage(message consensus_proto.Messa
if err := consensus.checkConsensusMessage(message, consensus.leader.PubKey); err != nil { if err := consensus.checkConsensusMessage(message, consensus.leader.PubKey); err != nil {
utils.GetLogInstance().Debug("Failed to check the leader message") utils.GetLogInstance().Debug("Failed to check the leader message")
if err == ErrConsensusIDNotMatch { if err == consensus_engine.ErrConsensusIDNotMatch {
utils.GetLogInstance().Debug("sending bft block to state syncing") utils.GetLogInstance().Debug("sending bft block to state syncing")
consensus.sendBFTBlockToStateSyncing(consensusID) consensus.sendBFTBlockToStateSyncing(consensusID)
} }
@ -103,7 +106,11 @@ func (consensus *Consensus) processAnnounceMessage(message consensus_proto.Messa
// Construct and send prepare message // Construct and send prepare message
msgToSend := consensus.constructPrepareMessage() msgToSend := consensus.constructPrepareMessage()
if utils.UseLibP2P {
consensus.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, host.ConstructP2pMessage(byte(17), msgToSend))
} else {
consensus.SendMessage(consensus.leader, msgToSend) consensus.SendMessage(consensus.leader, msgToSend)
}
consensus.state = PrepareDone consensus.state = PrepareDone
} }
@ -163,7 +170,11 @@ func (consensus *Consensus) processPreparedMessage(message consensus_proto.Messa
// Construct and send the commit message // Construct and send the commit message
multiSigAndBitmap := append(multiSig, bitmap...) multiSigAndBitmap := append(multiSig, bitmap...)
msgToSend := consensus.constructCommitMessage(multiSigAndBitmap) msgToSend := consensus.constructCommitMessage(multiSigAndBitmap)
if utils.UseLibP2P {
consensus.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, host.ConstructP2pMessage(byte(17), msgToSend))
} else {
consensus.SendMessage(consensus.leader, msgToSend) consensus.SendMessage(consensus.leader, msgToSend)
}
consensus.state = CommitDone consensus.state = CommitDone
} }

@ -1,4 +1,4 @@
package consensus package engine
import ( import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package consensus package engine
import "errors" import "errors"

@ -20,7 +20,7 @@ import (
"fmt" "fmt"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/consensus" consensus_engine "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core/state" "github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
) )
@ -32,11 +32,11 @@ import (
type BlockValidator struct { type BlockValidator struct {
config *params.ChainConfig // Chain configuration options config *params.ChainConfig // Chain configuration options
bc *BlockChain // Canonical block chain bc *BlockChain // Canonical block chain
engine consensus.Engine // Consensus engine used for validating engine consensus_engine.Engine // Consensus engine used for validating
} }
// NewBlockValidator returns a new block validator which is safe for re-use // NewBlockValidator returns a new block validator which is safe for re-use
func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engine consensus.Engine) *BlockValidator { func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engine consensus_engine.Engine) *BlockValidator {
validator := &BlockValidator{ validator := &BlockValidator{
config: config, config: config,
engine: engine, engine: engine,
@ -55,9 +55,9 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
} }
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) { if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) { if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
return consensus.ErrUnknownAncestor return consensus_engine.ErrUnknownAncestor
} }
return consensus.ErrPrunedAncestor return consensus_engine.ErrPrunedAncestor
} }
// Header validity is known at this point, check the uncles and transactions // Header validity is known at this point, check the uncles and transactions
header := block.Header() header := block.Header()

@ -38,7 +38,7 @@ import (
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/harmony-one/harmony/consensus" consensus_engine "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core/rawdb" "github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/core/state" "github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
@ -135,7 +135,7 @@ type BlockChain struct {
procInterrupt int32 // interrupt signaler for block processing procInterrupt int32 // interrupt signaler for block processing
wg sync.WaitGroup // chain processing wait group for shutting down wg sync.WaitGroup // chain processing wait group for shutting down
engine consensus.Engine engine consensus_engine.Engine
processor Processor // block processor interface processor Processor // block processor interface
validator Validator // block and state validator interface validator Validator // block and state validator interface
vmConfig vm.Config vmConfig vm.Config
@ -147,7 +147,7 @@ type BlockChain struct {
// NewBlockChain returns a fully initialised block chain using information // NewBlockChain returns a fully initialised block chain using information
// available in the database. It initialises the default Ethereum Validator and // available in the database. It initialises the default Ethereum Validator and
// Processor. // Processor.
func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) { func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus_engine.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
if cacheConfig == nil { if cacheConfig == nil {
cacheConfig = &CacheConfig{ cacheConfig = &CacheConfig{
TrieNodeLimit: 256 * 1024 * 1024, TrieNodeLimit: 256 * 1024 * 1024,
@ -223,6 +223,16 @@ func (bc *BlockChain) ValidateNewBlock(block *types.Block, address common.Addres
return nil return nil
} }
// IsEpochBlock returns whether this block is the first block of an epoch.
func IsEpochBlock(block *types.Block) bool {
return block.NumberU64()%BlocksPerEpoch == 0
}
// IsEpochLastBlock returns whether this block is the last block of an epoch.
func IsEpochLastBlock(block *types.Block) bool {
return block.NumberU64()%BlocksPerEpoch == BlocksPerEpoch-1
}
func (bc *BlockChain) getProcInterrupt() bool { func (bc *BlockChain) getProcInterrupt() bool {
return atomic.LoadInt32(&bc.procInterrupt) == 1 return atomic.LoadInt32(&bc.procInterrupt) == 1
} }
@ -931,7 +941,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
// Calculate the total difficulty of the block // Calculate the total difficulty of the block
ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
if ptd == nil { if ptd == nil {
return NonStatTy, consensus.ErrUnknownAncestor return NonStatTy, consensus_engine.ErrUnknownAncestor
} }
// Make sure no inconsistent state is leaked during insertion // Make sure no inconsistent state is leaked during insertion
bc.mu.Lock() bc.mu.Lock()
@ -1135,7 +1145,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
continue continue
} }
case err == consensus.ErrFutureBlock: case err == consensus_engine.ErrFutureBlock:
// Allow up to MaxFuture second in the future blocks. If this limit is exceeded // Allow up to MaxFuture second in the future blocks. If this limit is exceeded
// the chain is discarded and processed at a later time if given. // the chain is discarded and processed at a later time if given.
max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
@ -1146,12 +1156,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
stats.queued++ stats.queued++
continue continue
case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()): case err == consensus_engine.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
bc.futureBlocks.Add(block.Hash(), block) bc.futureBlocks.Add(block.Hash(), block)
stats.queued++ stats.queued++
continue continue
case err == consensus.ErrPrunedAncestor: case err == consensus_engine.ErrPrunedAncestor:
// Block competing with the canonical chain, store in the db, but don't process // Block competing with the canonical chain, store in the db, but don't process
// until the competitor TD goes above the canonical TD // until the competitor TD goes above the canonical TD
currentBlock := bc.CurrentBlock() currentBlock := bc.CurrentBlock()
@ -1615,7 +1625,7 @@ func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
// Engine retrieves the blockchain's consensus engine. // Engine retrieves the blockchain's consensus engine.
func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } func (bc *BlockChain) Engine() consensus_engine.Engine { return bc.engine }
// SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
@ -1687,11 +1697,11 @@ func (bc *BlockChain) GetRandSeedByNumber(number uint64) int64 {
// epoch block is where the new shard state stored // epoch block is where the new shard state stored
func (bc *BlockChain) GetNewShardState(block *types.Block) types.ShardState { func (bc *BlockChain) GetNewShardState(block *types.Block) types.ShardState {
hash := block.Hash() hash := block.Hash()
number := block.NumberU64()
// just ignore non-epoch block // just ignore non-epoch block
if !CheckEpochBlock(number) { if !IsEpochBlock(block) {
return nil return nil
} }
number := block.NumberU64()
shardState := bc.GetShardState(hash, number) shardState := bc.GetShardState(hash, number)
if shardState == nil { if shardState == nil {
epoch := GetEpochFromBlockNumber(number) epoch := GetEpochFromBlockNumber(number)

@ -23,7 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/consensus" consensus_engine "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core/state" "github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/core/vm" "github.com/harmony-one/harmony/core/vm"
@ -44,7 +44,7 @@ type BlockGen struct {
uncles []*types.Header uncles []*types.Header
config *params.ChainConfig config *params.ChainConfig
engine consensus.Engine engine consensus_engine.Engine
} }
// SetCoinbase sets the coinbase of the generated block. // SetCoinbase sets the coinbase of the generated block.
@ -161,7 +161,7 @@ func (b *BlockGen) PrevBlock(index int) *types.Block {
// Blocks created by GenerateChain do not contain valid proof of work // Blocks created by GenerateChain do not contain valid proof of work
// values. Inserting them into BlockChain requires use of FakePow or // values. Inserting them into BlockChain requires use of FakePow or
// a similar non-validating proof of work implementation. // a similar non-validating proof of work implementation.
func GenerateChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) { func GenerateChain(config *params.ChainConfig, parent *types.Block, engine consensus_engine.Engine, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) {
if config == nil { if config == nil {
config = params.TestChainConfig config = params.TestChainConfig
} }
@ -216,7 +216,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
return blocks, receipts return blocks, receipts
} }
func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.DB, engine consensus.Engine) *types.Header { func makeHeader(chain consensus_engine.ChainReader, parent *types.Block, state *state.DB, engine consensus_engine.Engine) *types.Header {
var time *big.Int var time *big.Int
if parent.Time() == nil { if parent.Time() == nil {
time = big.NewInt(10) time = big.NewInt(10)
@ -241,7 +241,7 @@ func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.D
} }
// makeHeaderChain creates a deterministic chain of headers rooted at parent. // makeHeaderChain creates a deterministic chain of headers rooted at parent.
func makeHeaderChain(parent *types.Header, n int, engine consensus.Engine, db ethdb.Database, seed int) []*types.Header { func makeHeaderChain(parent *types.Header, n int, engine consensus_engine.Engine, db ethdb.Database, seed int) []*types.Header {
blocks := makeBlockChain(types.NewBlockWithHeader(parent), n, engine, db, seed) blocks := makeBlockChain(types.NewBlockWithHeader(parent), n, engine, db, seed)
headers := make([]*types.Header, len(blocks)) headers := make([]*types.Header, len(blocks))
for i, block := range blocks { for i, block := range blocks {
@ -251,7 +251,7 @@ func makeHeaderChain(parent *types.Header, n int, engine consensus.Engine, db et
} }
// makeBlockChain creates a deterministic chain of blocks rooted at parent. // makeBlockChain creates a deterministic chain of blocks rooted at parent.
func makeBlockChain(parent *types.Block, n int, engine consensus.Engine, db ethdb.Database, seed int) []*types.Block { func makeBlockChain(parent *types.Block, n int, engine consensus_engine.Engine, db ethdb.Database, seed int) []*types.Block {
blocks, _ := GenerateChain(params.TestChainConfig, parent, engine, db, n, func(i int, b *BlockGen) { blocks, _ := GenerateChain(params.TestChainConfig, parent, engine, db, n, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)}) b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
}) })

@ -20,7 +20,7 @@ import (
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/harmony-one/harmony/consensus" consensus_engine "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/core/vm" "github.com/harmony-one/harmony/core/vm"
) )
@ -29,7 +29,7 @@ import (
// current blockchain to be used during transaction processing. // current blockchain to be used during transaction processing.
type ChainContext interface { type ChainContext interface {
// Engine retrieves the chain's consensus engine. // Engine retrieves the chain's consensus engine.
Engine() consensus.Engine Engine() consensus_engine.Engine
// GetHeader returns the hash corresponding to their hash. // GetHeader returns the hash corresponding to their hash.
GetHeader(common.Hash, uint64) *types.Header GetHeader(common.Hash, uint64) *types.Header

@ -30,7 +30,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/consensus" consensus_engine "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core/rawdb" "github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
lru "github.com/hashicorp/golang-lru" lru "github.com/hashicorp/golang-lru"
@ -63,14 +63,14 @@ type HeaderChain struct {
procInterrupt func() bool procInterrupt func() bool
rand *mrand.Rand rand *mrand.Rand
engine consensus.Engine engine consensus_engine.Engine
} }
// NewHeaderChain creates a new HeaderChain structure. // NewHeaderChain creates a new HeaderChain structure.
// getValidator should return the parent's validator // getValidator should return the parent's validator
// procInterrupt points to the parent's interrupt semaphore // procInterrupt points to the parent's interrupt semaphore
// wg points to the parent's shutdown wait group // wg points to the parent's shutdown wait group
func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) { func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus_engine.Engine, procInterrupt func() bool) (*HeaderChain, error) {
headerCache, _ := lru.New(headerCacheLimit) headerCache, _ := lru.New(headerCacheLimit)
tdCache, _ := lru.New(tdCacheLimit) tdCache, _ := lru.New(tdCacheLimit)
numberCache, _ := lru.New(numberCacheLimit) numberCache, _ := lru.New(numberCacheLimit)
@ -140,7 +140,7 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
// Calculate the total difficulty of the header // Calculate the total difficulty of the header
ptd := hc.GetTd(header.ParentHash, number-1) ptd := hc.GetTd(header.ParentHash, number-1)
if ptd == nil { if ptd == nil {
return NonStatTy, consensus.ErrUnknownAncestor return NonStatTy, consensus_engine.ErrUnknownAncestor
} }
localTd := hc.GetTd(hc.currentHeaderHash, hc.CurrentHeader().Number.Uint64()) localTd := hc.GetTd(hc.currentHeaderHash, hc.CurrentHeader().Number.Uint64())
externTd := new(big.Int).Add(header.Difficulty, ptd) externTd := new(big.Int).Add(header.Difficulty, ptd)
@ -498,7 +498,7 @@ func (hc *HeaderChain) SetGenesis(head *types.Header) {
func (hc *HeaderChain) Config() *params.ChainConfig { return hc.config } func (hc *HeaderChain) Config() *params.ChainConfig { return hc.config }
// Engine retrieves the header chain's consensus engine. // Engine retrieves the header chain's consensus engine.
func (hc *HeaderChain) Engine() consensus.Engine { return hc.engine } func (hc *HeaderChain) Engine() consensus_engine.Engine { return hc.engine }
// GetBlock implements consensus.ChainReader, and returns nil for every input as // GetBlock implements consensus.ChainReader, and returns nil for every input as
// a header chain does not have blocks available for retrieval. // a header chain does not have blocks available for retrieval.

@ -94,11 +94,6 @@ func GetEpochFromBlockNumber(blockNumber uint64) uint64 {
return blockNumber / uint64(BlocksPerEpoch) return blockNumber / uint64(BlocksPerEpoch)
} }
// CheckEpochBlock check whethere a given block number is the one to store epoch information
func CheckEpochBlock(blockNumber uint64) bool {
return blockNumber%uint64(BlocksPerEpoch) == 0
}
// GetPreviousEpochBlockNumber gets the epoch block number of previous epoch // GetPreviousEpochBlockNumber gets the epoch block number of previous epoch
func GetPreviousEpochBlockNumber(blockNumber uint64) uint64 { func GetPreviousEpochBlockNumber(blockNumber uint64) uint64 {
epoch := GetEpochFromBlockNumber(blockNumber) epoch := GetEpochFromBlockNumber(blockNumber)

@ -20,7 +20,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/consensus" consensus_engine "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core/state" "github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/core/vm" "github.com/harmony-one/harmony/core/vm"
@ -33,11 +33,11 @@ import (
type StateProcessor struct { type StateProcessor struct {
config *params.ChainConfig // Chain configuration options config *params.ChainConfig // Chain configuration options
bc *BlockChain // Canonical block chain bc *BlockChain // Canonical block chain
engine consensus.Engine // Consensus engine used for block rewards engine consensus_engine.Engine // Consensus engine used for block rewards
} }
// NewStateProcessor initialises a new StateProcessor. // NewStateProcessor initialises a new StateProcessor.
func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine) *StateProcessor { func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consensus_engine.Engine) *StateProcessor {
return &StateProcessor{ return &StateProcessor{
config: config, config: config,
bc: bc, bc: bc,

@ -207,13 +207,13 @@ func TestProofToHash(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("ReadFile(%v): %v", respFile, err) t.Fatalf("ReadFile(%v): %v", respFile, err)
} }
var getUserResponses []testdata.GetUserResponseVector var getUserResponses []testdata.ResponseVector
if err := json.Unmarshal(b, &getUserResponses); err != nil { if err := json.Unmarshal(b, &getUserResponses); err != nil {
t.Fatalf("Unmarshal(): %v", err) t.Fatalf("Unmarshal(): %v", err)
} }
for _, tc := range getUserResponses { for _, tc := range getUserResponses {
t.Run(tc.Desc, func(t *testing.T) { t.Run(tc.Desc, func(t *testing.T) {
_, err := pk.ProofToHash([]byte(tc.UserID), tc.Resp.GetLeaf().GetVrfProof()) _, err := pk.ProofToHash([]byte(tc.UserIDs[0]), tc.GetUserResp.GetLeaf().GetVrfProof())
if err != nil { if err != nil {
t.Errorf("ProofToHash(%v): %v)", tc.Desc, err) t.Errorf("ProofToHash(%v): %v)", tc.Desc, err)
} }

@ -23,7 +23,8 @@ type DRand struct {
bitmap *bls_cosi.Mask bitmap *bls_cosi.Mask
pRand *[32]byte pRand *[32]byte
rand *[32]byte rand *[32]byte
ConfirmedBlockChannel chan *types.Block // Channel for confirmed blocks ConfirmedBlockChannel chan *types.Block // Channel to receive confirmed blocks
PRndChannel chan []byte // Channel to send pRnd (preimage of randomness resulting from combined vrf randomnesses) to consensus. The first 32 bytes are randomness, the rest is for bitmap.
// map of nodeID to validator Peer object // map of nodeID to validator Peer object
// FIXME: should use PubKey of p2p.Peer as the hashkey // FIXME: should use PubKey of p2p.Peer as the hashkey
@ -65,6 +66,8 @@ func New(host p2p.Host, ShardID string, peers []p2p.Peer, leader p2p.Peer, confi
dRand.ConfirmedBlockChannel = confirmedBlockChannel dRand.ConfirmedBlockChannel = confirmedBlockChannel
} }
dRand.PRndChannel = make(chan []byte)
selfPeer := host.GetSelfPeer() selfPeer := host.GetSelfPeer()
if leader.Port == selfPeer.Port && leader.IP == selfPeer.IP { if leader.Port == selfPeer.Port && leader.IP == selfPeer.IP {
dRand.IsLeader = true dRand.IsLeader = true

@ -3,6 +3,7 @@ package drand
import ( import (
protobuf "github.com/golang/protobuf/proto" protobuf "github.com/golang/protobuf/proto"
drand_proto "github.com/harmony-one/harmony/api/drand" drand_proto "github.com/harmony-one/harmony/api/drand"
"github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/p2p/host" "github.com/harmony-one/harmony/p2p/host"
@ -17,7 +18,9 @@ func (dRand *DRand) WaitForEpochBlock(blockChannel chan *types.Block, stopChan c
default: default:
// keep waiting for epoch block // keep waiting for epoch block
newBlock := <-blockChannel newBlock := <-blockChannel
if core.IsEpochLastBlock(newBlock) {
dRand.init(newBlock) dRand.init(newBlock)
}
case <-stopChan: case <-stopChan:
return return
} }
@ -96,5 +99,14 @@ func (dRand *DRand) processCommitMessage(message drand_proto.Message) {
// Construct pRand and initiate consensus on it // Construct pRand and initiate consensus on it
utils.GetLogInstance().Debug("Received enough randomness commit", "numReceivedSoFar", len((*vrfs)), "validatorID", validatorID, "PublicKeys", len(dRand.PublicKeys)) utils.GetLogInstance().Debug("Received enough randomness commit", "numReceivedSoFar", len((*vrfs)), "validatorID", validatorID, "PublicKeys", len(dRand.PublicKeys))
// TODO: communicate the pRand to consensus // TODO: communicate the pRand to consensus
pRnd := [32]byte{}
// Bitwise XOR on all the submitted vrfs
for _, vrf := range *vrfs {
for i := 0; i < len(pRnd); i++ {
pRnd[i] = pRnd[i] ^ vrf[i]
}
}
dRand.PRndChannel <- append(pRnd[:], dRand.bitmap.Bitmap...)
} }
} }

@ -13,6 +13,10 @@ import (
var ( var (
Port string Port string
IP string IP string
// Global Variable to use libp2p for networking
// FIXME: this is a temporary hack, once we totally switch to libp2p
// this variable shouldn't be used
UseLibP2P bool
) )
// SetPortAndIP used to print out loggings of node with Port and IP. // SetPortAndIP used to print out loggings of node with Port and IP.

@ -195,9 +195,8 @@ type Node struct {
// Group Message Receiver // Group Message Receiver
groupReceiver p2p.GroupReceiver groupReceiver p2p.GroupReceiver
// fully integrate with libp2p for networking // Duplicated Ping Message Received
// FIXME: this is temporary hack until we can fully replace the old one duplicatedPing map[string]bool
UseLibP2P bool
} }
// Blockchain returns the blockchain from node // Blockchain returns the blockchain from node
@ -229,7 +228,7 @@ func (node *Node) getTransactionsForNewBlock(maxNumTxs int) types.Transactions {
// StartServer starts a server and process the requests by a handler. // StartServer starts a server and process the requests by a handler.
func (node *Node) StartServer() { func (node *Node) StartServer() {
if node.UseLibP2P { if utils.UseLibP2P {
select {} select {}
} else { } else {
node.host.BindHandlerAndServe(node.StreamHandler) node.host.BindHandlerAndServe(node.StreamHandler)
@ -317,6 +316,8 @@ func New(host p2p.Host, consensus *bft.Consensus, db ethdb.Database) *Node {
// start the goroutine to receive group message // start the goroutine to receive group message
go node.ReceiveGroupMessage() go node.ReceiveGroupMessage()
node.duplicatedPing = make(map[string]bool)
return &node return &node
} }

@ -14,6 +14,7 @@ import (
proto_discovery "github.com/harmony-one/harmony/api/proto/discovery" proto_discovery "github.com/harmony-one/harmony/api/proto/discovery"
proto_identity "github.com/harmony-one/harmony/api/proto/identity" proto_identity "github.com/harmony-one/harmony/api/proto/identity"
proto_node "github.com/harmony-one/harmony/api/proto/node" proto_node "github.com/harmony-one/harmony/api/proto/node"
"github.com/harmony-one/harmony/api/service"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/crypto/pki" "github.com/harmony-one/harmony/crypto/pki"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
@ -46,7 +47,7 @@ func (node *Node) StreamHandler(s p2p.Stream) {
return return
} }
node.messageHandler(content) node.messageHandler(content, "")
} }
// ReceiveGroupMessage use libp2p pubsub mechanism to receive broadcast messages // ReceiveGroupMessage use libp2p pubsub mechanism to receive broadcast messages
@ -59,18 +60,18 @@ func (node *Node) ReceiveGroupMessage() {
} }
msg, sender, err := node.groupReceiver.Receive(ctx) msg, sender, err := node.groupReceiver.Receive(ctx)
if sender != node.host.GetID() { if sender != node.host.GetID() {
utils.GetLogInstance().Info("[PUBSUB]", "received group msg", len(msg), "sender", sender) // utils.GetLogInstance().Info("[PUBSUB]", "received group msg", len(msg), "sender", sender)
if err == nil { if err == nil {
// skip the first 5 bytes, 1 byte is p2p type, 4 bytes are message size // skip the first 5 bytes, 1 byte is p2p type, 4 bytes are message size
node.messageHandler(msg[5:]) node.messageHandler(msg[5:], string(sender))
} }
} }
} }
} }
// messageHandler parses the message and dispatch the actions // messageHandler parses the message and dispatch the actions
func (node *Node) messageHandler(content []byte) { func (node *Node) messageHandler(content []byte, sender string) {
node.MaybeBroadcastAsValidator(content) // node.MaybeBroadcastAsValidator(content)
consensusObj := node.Consensus consensusObj := node.Consensus
@ -178,7 +179,7 @@ func (node *Node) messageHandler(content []byte) {
os.Exit(0) os.Exit(0)
} }
case proto_node.PING: case proto_node.PING:
node.pingMessageHandler(msgPayload) node.pingMessageHandler(msgPayload, sender)
case proto_node.PONG: case proto_node.PONG:
node.pongMessageHandler(msgPayload) node.pongMessageHandler(msgPayload)
} }
@ -229,8 +230,12 @@ func (node *Node) transactionMessageHandler(msgPayload []byte) {
func (node *Node) BroadcastNewBlock(newBlock *types.Block) { func (node *Node) BroadcastNewBlock(newBlock *types.Block) {
if node.ClientPeer != nil { if node.ClientPeer != nil {
utils.GetLogInstance().Debug("Sending new block to client", "client", node.ClientPeer) utils.GetLogInstance().Debug("Sending new block to client", "client", node.ClientPeer)
if utils.UseLibP2P {
node.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, proto_node.ConstructBlocksSyncMessage([]*types.Block{newBlock}))
} else {
node.SendMessage(*node.ClientPeer, proto_node.ConstructBlocksSyncMessage([]*types.Block{newBlock})) node.SendMessage(*node.ClientPeer, proto_node.ConstructBlocksSyncMessage([]*types.Block{newBlock}))
} }
}
} }
// VerifyNewBlock is called by consensus participants to verify the block (account model) they are running consensus on // VerifyNewBlock is called by consensus participants to verify the block (account model) they are running consensus on
@ -261,6 +266,7 @@ func (node *Node) PostConsensusProcessing(newBlock *types.Block) {
node.AddNewBlock(newBlock) node.AddNewBlock(newBlock)
// TODO: enable drand only for beacon chain // TODO: enable drand only for beacon chain
// ConfirmedBlockChannel which is listened by drand leader who will initiate DRG if its a epoch block (first block of a epoch)
if node.DRand != nil { if node.DRand != nil {
go func() { go func() {
node.ConfirmedBlockChannel <- newBlock node.ConfirmedBlockChannel <- newBlock
@ -279,7 +285,17 @@ func (node *Node) AddNewBlock(newBlock *types.Block) {
} }
} }
func (node *Node) pingMessageHandler(msgPayload []byte) int { func (node *Node) pingMessageHandler(msgPayload []byte, sender string) int {
if sender != "" {
_, ok := node.duplicatedPing[sender]
if !ok {
node.duplicatedPing[sender] = true
} else {
// duplicated ping message return
return 0
}
}
ping, err := proto_discovery.GetPingMessage(msgPayload) ping, err := proto_discovery.GetPingMessage(msgPayload)
if err != nil { if err != nil {
utils.GetLogInstance().Error("Can't get Ping Message") utils.GetLogInstance().Error("Can't get Ping Message")
@ -299,11 +315,13 @@ func (node *Node) pingMessageHandler(msgPayload []byte) int {
return -1 return -1
} }
utils.GetLogInstance().Debug("[pingMessageHandler]", "incoming peer", peer) // utils.GetLogInstance().Debug("[pingMessageHandler]", "incoming peer", peer)
// add to incoming peer list // add to incoming peer list
node.host.AddIncomingPeer(*peer) node.host.AddIncomingPeer(*peer)
if utils.UseLibP2P {
node.host.ConnectHostPeer(*peer) node.host.ConnectHostPeer(*peer)
}
if ping.Node.Role == proto_node.ClientRole { if ping.Node.Role == proto_node.ClientRole {
utils.GetLogInstance().Info("Add Client Peer to Node", "Node", node.Consensus.GetNodeID(), "Client", peer) utils.GetLogInstance().Info("Add Client Peer to Node", "Node", node.Consensus.GetNodeID(), "Client", peer)
@ -314,7 +332,8 @@ func (node *Node) pingMessageHandler(msgPayload []byte) int {
// Add to Node's peer list anyway // Add to Node's peer list anyway
node.AddPeers([]*p2p.Peer{peer}) node.AddPeers([]*p2p.Peer{peer})
if node.Consensus.IsLeader { // This is the old way of broadcasting pong message
if node.Consensus.IsLeader && !utils.UseLibP2P {
peers := node.Consensus.GetValidatorPeers() peers := node.Consensus.GetValidatorPeers()
pong := proto_discovery.NewPongMessage(peers, node.Consensus.PublicKeys) pong := proto_discovery.NewPongMessage(peers, node.Consensus.PublicKeys)
buffer := pong.ConstructPongMessage() buffer := pong.ConstructPongMessage()
@ -332,21 +351,57 @@ func (node *Node) pingMessageHandler(msgPayload []byte) int {
// Broadcast the message to all validators, as publicKeys is updated // Broadcast the message to all validators, as publicKeys is updated
// FIXME: HAR-89 use a separate nodefind/neighbor message // FIXME: HAR-89 use a separate nodefind/neighbor message
if node.UseLibP2P { host.BroadcastMessageFromLeader(node.GetHost(), peers, buffer, node.Consensus.OfflinePeers)
// utils.GetLogInstance().Info("PingMsgHandler send pong message")
}
return 1
}
// SendPongMessage is the a goroutine to periodcally send pong message to all peers
func (node *Node) SendPongMessage() {
tick := time.NewTicker(10 * time.Second)
numPeers := len(node.Consensus.GetValidatorPeers())
numPubKeys := len(node.Consensus.PublicKeys)
sentMessage := false
// Send Pong Message only when there is change on the number of peers
for {
select {
case <-tick.C:
peers := node.Consensus.GetValidatorPeers()
numPeersNow := len(peers)
numPubKeysNow := len(node.Consensus.PublicKeys)
// no peers, wait for another tick
if numPeersNow == 0 || numPubKeysNow == 0 {
continue
}
// new peers added
if numPubKeysNow != numPubKeys || numPeersNow != numPeers {
sentMessage = false
} else {
// stable number of peers/pubkeys, sent the pong message
if !sentMessage {
pong := proto_discovery.NewPongMessage(peers, node.Consensus.PublicKeys)
buffer := pong.ConstructPongMessage()
content := host.ConstructP2pMessage(byte(0), buffer) content := host.ConstructP2pMessage(byte(0), buffer)
err := node.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, content) err := node.host.SendMessageToGroups([]p2p.GroupID{p2p.GroupIDBeacon}, content)
if err != nil { if err != nil {
utils.GetLogInstance().Error("[PONG] failed to send pong message", "group", p2p.GroupIDBeacon) utils.GetLogInstance().Error("[PONG] failed to send pong message", "group", p2p.GroupIDBeacon)
continue
} else { } else {
utils.GetLogInstance().Debug("[PONG] sent Pong Message via group send", "group", p2p.GroupIDBeacon) utils.GetLogInstance().Info("[PONG] sent pong message to", "group", p2p.GroupIDBeacon)
} }
} else { sentMessage = true
host.BroadcastMessageFromLeader(node.GetHost(), peers, buffer, node.Consensus.OfflinePeers) // stop sending ping message
utils.GetLogInstance().Info("PingMsgHandler send pong message") node.serviceManager.TakeAction(&service.Action{Action: service.Stop, ServiceType: service.PeerDiscovery})
}
}
numPeers = numPeersNow
numPubKeys = numPubKeysNow
} }
} }
return 1
} }
func (node *Node) pongMessageHandler(msgPayload []byte) int { func (node *Node) pongMessageHandler(msgPayload []byte) int {
@ -374,8 +429,6 @@ func (node *Node) pongMessageHandler(msgPayload []byte) int {
peers = append(peers, peer) peers = append(peers, peer)
} }
utils.GetLogInstance().Debug("[pongMessageHandler]", "received msg #peers", len(peers))
if len(peers) > 0 { if len(peers) > 0 {
node.AddPeers(peers) node.AddPeers(peers)
} }
@ -396,7 +449,7 @@ func (node *Node) pongMessageHandler(msgPayload []byte) int {
publicKeys = append(publicKeys, &key) publicKeys = append(publicKeys, &key)
} }
utils.GetLogInstance().Debug("[pongMessageHandler]", "received msg #keys", len(publicKeys)) utils.GetLogInstance().Debug("[pongMessageHandler]", "#keys", len(publicKeys), "#peers", len(peers))
if node.State == NodeWaitToJoin { if node.State == NodeWaitToJoin {
node.State = NodeReadyForConsensus node.State = NodeReadyForConsensus
@ -406,5 +459,7 @@ func (node *Node) pongMessageHandler(msgPayload []byte) int {
} }
} }
// Stop discovery service after received pong message
node.serviceManager.TakeAction(&service.Action{Action: service.Stop, ServiceType: service.PeerDiscovery})
return node.Consensus.UpdatePublicKeys(publicKeys) return node.Consensus.UpdatePublicKeys(publicKeys)
} }

@ -82,12 +82,12 @@ func (node *Node) addNewShardState(block *types.Block) {
} }
func (node *Node) addNewRandSeed(block *types.Block) { func (node *Node) addNewRandSeed(block *types.Block) {
blockNumber := block.NumberU64() if !core.IsEpochBlock(block) {
if !core.CheckEpochBlock(blockNumber) {
return return
} }
var rnd int64 var rnd int64
blockNumber := block.NumberU64()
epoch := core.GetEpochFromBlockNumber(blockNumber) epoch := core.GetEpochFromBlockNumber(blockNumber)
if epoch == 1 { if epoch == 1 {
rnd = core.InitialSeed rnd = core.InitialSeed

@ -8,7 +8,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/consensus" consensus_engine "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/state" "github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
@ -33,7 +33,7 @@ type Worker struct {
current *environment // An environment for current running cycle. current *environment // An environment for current running cycle.
coinbase common.Address coinbase common.Address
engine consensus.Engine engine consensus_engine.Engine
gasFloor uint64 gasFloor uint64
gasCeil uint64 gasCeil uint64
@ -156,7 +156,7 @@ func (w *Worker) Commit() (*types.Block, error) {
} }
// New create a new worker object. // New create a new worker object.
func New(config *params.ChainConfig, chain *core.BlockChain, engine consensus.Engine, coinbase common.Address, shardID uint32) *Worker { func New(config *params.ChainConfig, chain *core.BlockChain, engine consensus_engine.Engine, coinbase common.Address, shardID uint32) *Worker {
worker := &Worker{ worker := &Worker{
config: config, config: config,
chain: chain, chain: chain,

@ -162,8 +162,8 @@ func New(self *p2p.Peer, priKey p2p_crypto.PrivKey, opts ...p2p_config.Option) *
append(opts, libp2p.ListenAddrs(listenAddr), libp2p.Identity(priKey))..., append(opts, libp2p.ListenAddrs(listenAddr), libp2p.Identity(priKey))...,
) )
catchError(err) catchError(err)
// pubsub, err := pubsub.NewGossipSub(ctx, p2pHost) pubsub, err := pubsub.NewGossipSub(ctx, p2pHost)
pubsub, err := pubsub.NewFloodSub(ctx, p2pHost) // pubsub, err := pubsub.NewFloodSub(ctx, p2pHost)
catchError(err) catchError(err)
self.PeerID = p2pHost.ID() self.PeerID = p2pHost.ID()
@ -246,8 +246,6 @@ func (host *HostV2) ConnectHostPeer(peer p2p.Peer) {
utils.GetLogInstance().Error("ConnectHostPeer", "new peerinfo error", err, "peer", peer) utils.GetLogInstance().Error("ConnectHostPeer", "new peerinfo error", err, "peer", peer)
return return
} }
host.lock.Lock()
defer host.lock.Unlock()
if err := host.h.Connect(ctx, *peerInfo); err != nil { if err := host.h.Connect(ctx, *peerInfo); err != nil {
utils.GetLogInstance().Warn("can't connect to peer", "error", err, "peer", peer) utils.GetLogInstance().Warn("can't connect to peer", "error", err, "peer", peer)
} else { } else {

@ -31,6 +31,8 @@ fi
NUM_SHARDS=${#FILES[@]} NUM_SHARDS=${#FILES[@]}
SUM=0 SUM=0
NUM_CONSENSUS=0 NUM_CONSENSUS=0
NUM_TOTAL_NODES=$( expr $NUM_SHARDS + $NUM_VALIDATORS )
NUM_SIGS=0
declare -A TPS declare -A TPS
@ -43,12 +45,14 @@ for f in "${FILES[@]}"; do
else else
avg_tps=0 avg_tps=0
fi fi
num_sigs=$(grep numOfSignatures $f | tail -1 | cut -f 5 -d , | cut -f 2 -d :)
TPS[$leader]="$num_consensus, $avg_tps" TPS[$leader]="$num_consensus, $avg_tps"
NUM_CONSENSUS=$(expr $NUM_CONSENSUS + $num_consensus ) NUM_CONSENSUS=$(expr $NUM_CONSENSUS + $num_consensus )
SUM=$( expr $SUM + $avg_tps_int ) SUM=$( expr $SUM + $avg_tps_int )
NUM_SIGS=$( expr $NUM_SIGS + $num_sigs)
done done
echo $NUM_SHARDS shards, $NUM_CONSENSUS consensus, $SUM total TPS, $NUM_VALIDATORS nodes echo $NUM_SHARDS shards, $NUM_CONSENSUS consensus, $SUM total TPS, $NUM_VALIDATORS nodes, $NUM_TOTAL_NODES total nodes, $NUM_SIGS total signatures
for t in "${!TPS[@]}"; do for t in "${!TPS[@]}"; do
echo $t, ${TPS[$t]} echo $t, ${TPS[$t]}
done done

@ -1,12 +0,0 @@
127.0.0.1 9000 leader 0
127.0.0.1 9001 validator 0
127.0.0.1 9002 validator 0
127.0.0.1 9003 validator 0
127.0.0.1 9004 validator 0
127.0.0.1 9006 newnode 0
127.0.0.1 9007 newnode 0
127.0.0.1 9008 newnode 0
127.0.0.1 9009 newnode 0
127.0.0.1 9010 newnode 0
127.0.0.1 9011 newnode 0
127.0.0.1 19999 client 0

@ -0,0 +1,32 @@
127.0.0.1 9000 leader 0
127.0.0.1 9001 validator 0
127.0.0.1 9002 validator 0
127.0.0.1 9003 validator 0
127.0.0.1 9004 validator 0
127.0.0.1 9005 validator 0
127.0.0.1 9006 validator 0
127.0.0.1 9007 validator 0
127.0.0.1 9008 validator 0
127.0.0.1 9009 validator 0
127.0.0.1 9010 validator 0
127.0.0.1 9011 validator 0
127.0.0.1 9012 validator 0
127.0.0.1 9013 validator 0
127.0.0.1 9014 validator 0
127.0.0.1 9015 validator 0
127.0.0.1 9016 validator 0
127.0.0.1 9017 validator 0
127.0.0.1 9018 validator 0
127.0.0.1 9019 validator 0
127.0.0.1 9020 validator 0
127.0.0.1 9021 validator 0
127.0.0.1 9022 validator 0
127.0.0.1 9023 validator 0
127.0.0.1 9024 validator 0
127.0.0.1 9025 validator 0
127.0.0.1 9026 validator 0
127.0.0.1 9027 validator 0
127.0.0.1 9028 validator 0
127.0.0.1 9029 validator 0
127.0.0.1 9030 validator 0
127.0.0.1 19999 client 0

@ -0,0 +1,5 @@
127.0.0.1 9000 leader 0
127.0.0.1 9001 validator 0
127.0.0.1 9002 validator 0
127.0.0.1 9003 validator 0
127.0.0.1 9004 validator 0

@ -73,7 +73,7 @@ EOU
DB= DB=
TXGEN=true TXGEN=true
DURATION=90 DURATION=90
MIN=5 MIN=2
SHARDS=2 SHARDS=2
KILLPORT=9004 KILLPORT=9004
SYNC=false SYNC=false

@ -8,8 +8,8 @@ import (
"os" "os"
"sync" "sync"
"github.com/ipfs/go-log" log "github.com/ipfs/go-log"
"github.com/libp2p/go-libp2p" libp2p "github.com/libp2p/go-libp2p"
discovery "github.com/libp2p/go-libp2p-discovery" discovery "github.com/libp2p/go-libp2p-discovery"
libp2pdht "github.com/libp2p/go-libp2p-kad-dht" libp2pdht "github.com/libp2p/go-libp2p-kad-dht"
peer "github.com/libp2p/go-libp2p-peer" peer "github.com/libp2p/go-libp2p-peer"

@ -1,7 +1,7 @@
{ {
"directoryId": "integration", "directoryId": "integration",
"log": { "log": {
"treeId": "8541686838476068721", "treeId": "6511398593182094144",
"treeType": "PREORDERED_LOG", "treeType": "PREORDERED_LOG",
"hashStrategy": "RFC6962_SHA256", "hashStrategy": "RFC6962_SHA256",
"hashAlgorithm": "SHA256", "hashAlgorithm": "SHA256",
@ -11,7 +11,7 @@
} }
}, },
"map": { "map": {
"treeId": "6598072539431303895", "treeId": "7627063266021945174",
"treeType": "MAP", "treeType": "MAP",
"hashStrategy": "CONIKS_SHA256", "hashStrategy": "CONIKS_SHA256",
"hashAlgorithm": "SHA256", "hashAlgorithm": "SHA256",

@ -1,29 +1,31 @@
[ [
{ {
"Desc": "empty_alice", "Desc": "empty_alice",
"UserID": "alice", "UserIDs": [
"Resp": { "alice"
],
"GetUserResp": {
"revision": { "revision": {
"map_root": { "map_root": {
"map_root": { "map_root": {
"map_root": "AAEgT5c5rf7RjwprNaZTxAls9fuKTJ0h1PFrBF5VyUgfglMVfD9mQONvCAAAAAAAAAAAAAA=", "map_root": "AAEgoMiXEtKsVlKmOMkHhZ8DIdUpVxBXevDQqyz8HlVTYpwVg0ku8ZcHVAAAAAAAAAAAAAA=",
"signature": "MEUCIQCNSY1U94y28kwMU6JEzxpVT0NfxgSlBqW+Q+9gss6JqAIgDVQvDTPEC80qML4i0g0d/ZBRU/mRyW6Zf4IcWmFu4Fc=" "signature": "MEYCIQDhwON1ZH6j7qw5/kQTtGgO4a3zI028ajCIOQ6BdtMB3AIhAJE+NqncdDfJmYGklOFxyD9vfEWm5RlqRXvkRxV/jgEl"
} }
}, },
"latest_log_root": { "latest_log_root": {
"log_root": { "log_root": {
"timestamp_nanos": 1548182080757002000, "timestamp_nanos": 1550163163063238915,
"root_hash": "oFazZRJgxpnfHPgM+Ki1hu+w5plbmP2mk8OC7pNtYUM=", "root_hash": "zLL4sTnEciIm2uaPl/n7XPbiawbtx+63RyxDJz6fVJI=",
"tree_size": 1, "tree_size": 1,
"tree_revision": 1, "tree_revision": 1,
"key_hint": "dooq7H3+13E=", "key_hint": "Wl0ihYmY00A=",
"log_root": "AAEAAAAAAAAAASCgVrNlEmDGmd8c+Az4qLWG77DmmVuY/aaTw4Luk21hQxV8P2ZbJW8QAAAAAAAAAAEAAA==", "log_root": "AAEAAAAAAAAAASDMsvixOcRyIiba5o+X+ftc9uJrBu3H7rdHLEMnPp9UkhWDSS8GR8EDAAAAAAAAAAEAAA==",
"log_root_signature": "MEUCIQC9f1bDtioj1ToCJE7pFpDXoR4WDW81MZq5LeNkgiNt3wIgfiMGSLgRp1yIAC2L7bFz7qzkBpSCDJ8ECGZ/628aZms=" "log_root_signature": "MEYCIQDV9wVMePn/6JEOnmvr56evMDPWAs9ocmx9BZryPbumJgIhAM/RuOeXp+9eyqnuX0Xb07efPjvDX/ugRFy/ILp3CuZo"
} }
} }
}, },
"leaf": { "leaf": {
"vrf_proof": "rxm+eUnt0zCGVeHtrn0SF9dUZ6ZPgRcWYqroKpjNonQ8y+scOc+/3QuS9K9c6otPQK2yuHbSySnCDjsVGFbwawRTJUYW21qW/fCBqGRPufN2+S2CHVyVSP1EWLTIn1M0zJgw+OQcAgHUdSP1DkqoE5X9Le+VWO+nuJtc4eJODJCo", "vrf_proof": "CsrlVdKdfH2wcfVeaNtlVpVgUyLFmuRtLr9Q6sZa1m+Y+GgE1x2VXo+mfpKU0Txz7OPUv3JH0fMotV+NRWj33ARTJUYW21qW/fCBqGRPufN2+S2CHVyVSP1EWLTIn1M0zJgw+OQcAgHUdSP1DkqoE5X9Le+VWO+nuJtc4eJODJCo",
"map_inclusion": { "map_inclusion": {
"leaf": { "leaf": {
"index": "A9/B/HF0DP6pap5CSp8/Jo05FhoGWECfsTAnRlVdzvM=" "index": "A9/B/HF0DP6pap5CSp8/Jo05FhoGWECfsTAnRlVdzvM="
@ -289,39 +291,42 @@
} }
} }
}, },
"BatchListUserRevisionsResp": null,
"TrustNewLog": true "TrustNewLog": true
}, },
{ {
"Desc": "bob0_set", "Desc": "bob0_set",
"UserID": "bob", "UserIDs": [
"Resp": { "bob"
],
"GetUserResp": {
"revision": { "revision": {
"map_root": { "map_root": {
"map_root": { "map_root": {
"map_root": "AAEgvZoGMrg0Mz2GPvObuOxCbt0iwm3F4j6oFPssBYof/hgVfD9mqlPR0AAAAAAAAAABABISAhgBEgwQsf2F/Onsj74VGAI=", "map_root": "AAEg+O7XQigTSTtnjWvypQfnoTvYtsZMirOiOG6wsJEZk+UVg0kvHAH/3QAAAAAAAAABABISAhgBEgwQmv2Yq/Gl0sEVGAI=",
"signature": "MEQCIEpt5DMWdTCRsQA9fnzA2lO5XqD5p/fuJBNkO5sJhaX3AiBwPXdC16D69a7/YZY6pICK3GK7n2oYQxSWOVbBHJhqLg==" "signature": "MEUCIGHAhD1602oqYuCCV/SK4Y+isQzqGWvPCRGUAMf5CeMYAiEAxKMu4h5+elzr+/S5c4aOlPN9hS2EZSZK0QMxVVa3S34="
}, },
"log_inclusion": [ "log_inclusion": [
"oFazZRJgxpnfHPgM+Ki1hu+w5plbmP2mk8OC7pNtYUM=" "zLL4sTnEciIm2uaPl/n7XPbiawbtx+63RyxDJz6fVJI="
] ]
}, },
"latest_log_root": { "latest_log_root": {
"log_root": { "log_root": {
"timestamp_nanos": 1548182082188587000, "timestamp_nanos": 1550163163560158238,
"root_hash": "iLzyO7OaPvSPpetDbGd9eNpo/NfUcEYeBta8z/hYG3M=", "root_hash": "VigWWNMYkkC/tjfy+5vtTukZXZnppaYja73iLZoyD7c=",
"tree_size": 2, "tree_size": 2,
"tree_revision": 2, "tree_revision": 2,
"key_hint": "dooq7H3+13E=", "key_hint": "Wl0ihYmY00A=",
"log_root": "AAEAAAAAAAAAAiCIvPI7s5o+9I+l60NsZ3142mj819RwRh4G1rzP+FgbcxV8P2awea/4AAAAAAAAAAIAAA==", "log_root": "AAEAAAAAAAAAAiBWKBZY0xiSQL+2N/L7m+1O6RldmemlpiNrveItmjIPtxWDSS8j5iQeAAAAAAAAAAIAAA==",
"log_root_signature": "MEUCIC8BasnxCIVXyPFjgdRYy64Pnu4ln/fTGHERN+5CqRpKAiEAoYwgFeRYZZoRaKdBgUcu2noiCr4LDoyohzSltYIOX58=" "log_root_signature": "MEUCIHhitMsUiIXsnyQjLImwpMxZRt6OjQ9N5RVqtNqxxPqaAiEA5xHhyJctJsoiWNhysxmeHNrkqkNcJctY8Z77va6prkA="
}, },
"log_consistency": [ "log_consistency": [
"74BJt1hw9+CNRkcAlNhl0+nTymtLDwUBRsf4xRZP+Wk=" "bg5yr6Foqkjs6NdFXL3HqQqTBzy8hIsJ/a4JdgCH2jg="
] ]
} }
}, },
"leaf": { "leaf": {
"vrf_proof": "5QGNxndM4zopl1q2G3ry8eGSW5tB9nqP44e6LYpb8duYiNlm42VkdehwD6IDQus3CJOssIz62JxgwJpk/yESggSHy1HLhWFLT+nQFEzzYq4x2psj6PyUNlaPWfnwVizyOB31qTBMPiNmlf7Qgp/yArNoYDSuvrjR9Jmlku+iA5MU", "vrf_proof": "eKDyJl+cnnPH5/6ssSxdSiizLlATcMNwgYAFaynyuImzKHJqvl6cQ3TCnArWDnImX6OOGBGrsK4ZOf9qilihEwSHy1HLhWFLT+nQFEzzYq4x2psj6PyUNlaPWfnwVizyOB31qTBMPiNmlf7Qgp/yArNoYDSuvrjR9Jmlku+iA5MU",
"map_inclusion": { "map_inclusion": {
"leaf": { "leaf": {
"index": "Umn2fclcSdomcb9UlHcDY1SLm1A/wILzh8NdoYffbcQ=" "index": "Umn2fclcSdomcb9UlHcDY1SLm1A/wILzh8NdoYffbcQ="
@ -581,46 +586,49 @@
"", "",
"", "",
"", "",
"KQiyHtT5vfRpQnj3OgxqeTUJjwgqKRh8YrhyDumC9EE=", "3ZKTcseQ0R0OSbWOic6vzi/yxHE5c0lpJCz3pjhLlKc=",
"" ""
] ]
} }
} }
}, },
"BatchListUserRevisionsResp": null,
"TrustNewLog": false "TrustNewLog": false
}, },
{ {
"Desc": "set_carol", "Desc": "set_carol",
"UserID": "carol", "UserIDs": [
"Resp": { "carol"
],
"GetUserResp": {
"revision": { "revision": {
"map_root": { "map_root": {
"map_root": { "map_root": {
"map_root": "AAEgq+MeLyWm6X86AnvprF02hvXcohWdvDQ80iI3oSmegPwVfD9mx3OVmAAAAAAAAAACABwSAhgBEhYIsf2F/Onsj74VEIGz9Nvr7I++FRgC", "map_root": "AAEgwS/id1lfTZjhbfKdlZN0T6SNj+lsTgrvgC3tLQYJqakVg0kvNAxqPwAAAAAAAAACABwSAhgBEhYImv2Yq/Gl0sEVEMrXsu/ypdLBFRgC",
"signature": "MEYCIQD07j/mrSgmwpxSrJ1Y+Wphn8OQUefOAnVf1uhk8ayylAIhAMcZ3Whap5q86/DoB3PPc559er22dhswOzc2f8Fju+N0" "signature": "MEYCIQDdMG9YU9AsqeGRbBcB3TPoBmpXXROtqDti1eWNzKRV9wIhANPkgGRQ7AZPxMIL3phdEwoxom+T8nuNTCHxMMC0VGYV"
}, },
"log_inclusion": [ "log_inclusion": [
"iLzyO7OaPvSPpetDbGd9eNpo/NfUcEYeBta8z/hYG3M=" "VigWWNMYkkC/tjfy+5vtTukZXZnppaYja73iLZoyD7c="
] ]
}, },
"latest_log_root": { "latest_log_root": {
"log_root": { "log_root": {
"timestamp_nanos": 1548182082690022000, "timestamp_nanos": 1550163164059865137,
"root_hash": "e+oc/pnJysibFt+ukJjutna0OsAuYxSjpTnSQ/mfYtg=", "root_hash": "nC756jJgJOBGCYGS/xtyZ7eTNNkwKZ95MSHz+d766bM=",
"tree_size": 3, "tree_size": 3,
"tree_revision": 3, "tree_revision": 3,
"key_hint": "dooq7H3+13E=", "key_hint": "Wl0ihYmY00A=",
"log_root": "AAEAAAAAAAAAAyB76hz+mcnKyJsW366QmO62drQ6wC5jFKOlOdJD+Z9i2BV8P2bOXPpwAAAAAAAAAAMAAA==", "log_root": "AAEAAAAAAAAAAyCcLvnqMmAk4EYJgZL/G3Jnt5M02TApn3kxIfP53vrpsxWDSS9BrxAxAAAAAAAAAAMAAA==",
"log_root_signature": "MEUCIQDEMBnuDL0+CdMf7tLf53eD0+oA5uaBaZ1nRwWZYf5shAIgZJvUuSH2x3H5KrL+tmPYALaTXCfG7RggbxHnai2DYqA=" "log_root_signature": "MEQCIClfsNvnG/4xpxbTs4jzwsE4Vikpe0Fro0OfdT3efOM4AiADIW6Z63SOUxg+lYUl2MX8rYdlAfKp2n+NxgalObtZuA=="
}, },
"log_consistency": [ "log_consistency": [
"74BJt1hw9+CNRkcAlNhl0+nTymtLDwUBRsf4xRZP+Wk=", "bg5yr6Foqkjs6NdFXL3HqQqTBzy8hIsJ/a4JdgCH2jg=",
"JwjPnsR+/v/pG79eO4J9a+7huD2EB4ulIYwrrWhH7V0=" "jfNr9jOWGPAwb9oanLSYBabZXp5ShHREKQkDXxJ58Ts="
] ]
} }
}, },
"leaf": { "leaf": {
"vrf_proof": "2oz80eq1m7ogFje1YDDFi82+kApvcQ3/wSMyegOlE5gNZEzguYGUGWdcG09XWokfGMJwd3NEWR2TLf/uxZkENwSmXjmxq3oAt/q89fwRZiF4eoAGepK4YcyzKtBD4mfS6gB7/AQ7PNRUocMVfUQnZCienTNXyrdtaOTCtzwaDIjc", "vrf_proof": "jUkQi8HRJjH94aNdAxbVNcIP+Yq8wh937Odr8hZCgBkQfNa5pbQrNbyrc3Kn7DXa/djUY9pqrhiAb6VfnKxzNwSmXjmxq3oAt/q89fwRZiF4eoAGepK4YcyzKtBD4mfS6gB7/AQ7PNRUocMVfUQnZCienTNXyrdtaOTCtzwaDIjc",
"map_inclusion": { "map_inclusion": {
"leaf": { "leaf": {
"index": "JYx5mwUZM4rLnxSas5/NC9GiuqMYVWThqNqJdDz6bvw=" "index": "JYx5mwUZM4rLnxSas5/NC9GiuqMYVWThqNqJdDz6bvw="
@ -879,53 +887,56 @@
"", "",
"", "",
"", "",
"PhzerX1zlg+7TcTDd4dqhaEoCHIBEx/gkOHP4HwMrEE=", "CS0fqBO+DMoGNyIO4OtmyMS99zrCjYAsidzOoIuujVU=",
"+Yz47fGxCilRS8cEhVPZNxVJDbwWMtb8kc7ckdny0Yw=", "5QCCepR0tYVczBRnCvxlBMim8tO00KBKfX6dgKpDDQ8=",
"" ""
] ]
} }
} }
}, },
"BatchListUserRevisionsResp": null,
"TrustNewLog": false "TrustNewLog": false
}, },
{ {
"Desc": "bob1_get", "Desc": "bob1_get",
"UserID": "bob", "UserIDs": [
"Resp": { "bob"
],
"GetUserResp": {
"revision": { "revision": {
"map_root": { "map_root": {
"map_root": { "map_root": {
"map_root": "AAEgPJMMcS3zYA+lydtkvfNk8bgbQzLfemGSX7FhLr1kM4QVfD9m4Ul9wAAAAAAAAAADABwSAhgBEhYIgbP02+vsj74VEPm2kLrt7I++FRgC", "map_root": "AAEgr8xAKP6EGtoSi+ZFNI6ZwPimd9+KywJtad/yEJNevXgVg0kvUUZDTwAAAAAAAAADACYSDBDchvfv9KXSwRUYARIWCMrXsu/ypdLBFRDK17Lv8qXSwRUYAg==",
"signature": "MEUCIBtnr8CGfXlQvwD5zlGHu8YkTLktLv5QWbajrB1/kKQQAiEAzTdT19rnuMNtOROFryClaVo2f2KSRwuxTYFxOA/qNk4=" "signature": "MEUCIQDOirn/pstnBvgxdGn1kjNh5+fU5r2nrQvixjrboEHWigIgUFhzwB9b2+wEtd7hEuhTOOVx51/mO2CL+7tRrdFQ4D8="
}, },
"log_inclusion": [ "log_inclusion": [
"JwjPnsR+/v/pG79eO4J9a+7huD2EB4ulIYwrrWhH7V0=", "jfNr9jOWGPAwb9oanLSYBabZXp5ShHREKQkDXxJ58Ts=",
"iLzyO7OaPvSPpetDbGd9eNpo/NfUcEYeBta8z/hYG3M=" "VigWWNMYkkC/tjfy+5vtTukZXZnppaYja73iLZoyD7c="
] ]
}, },
"latest_log_root": { "latest_log_root": {
"log_root": { "log_root": {
"timestamp_nanos": 1548182083190127000, "timestamp_nanos": 1550163164559404835,
"root_hash": "lE/bH+OqOzXs2rFNvTfgu7gAyqYdx8s9kdUPLjHhS1M=", "root_hash": "rPSTqha5qIxXu6quztMmbIFdA6JNSzcUt+SvWyfwoEw=",
"tree_size": 4, "tree_size": 4,
"tree_revision": 4, "tree_revision": 4,
"key_hint": "dooq7H3+13E=", "key_hint": "Wl0ihYmY00A=",
"log_root": "AAEAAAAAAAAABCCUT9sf46o7NezasU29N+C7uADKph3Hyz2R1Q8uMeFLUxV8P2bsK/mYAAAAAAAAAAQAAA==", "log_root": "AAEAAAAAAAAABCCs9JOqFrmojFe7qq7O0yZsgV0Dok1LNxS35K9bJ/CgTBWDSS9fdW8jAAAAAAAAAAQAAA==",
"log_root_signature": "MEUCIBMfUpMx8mu2yHupR8QZJrTGZL2HSHQW89iDo60LGETzAiEAjmthkwb3+Tzkz5QKSOU93XEmVKA7qzZgwqRuVClWXZc=" "log_root_signature": "MEQCIDM6z/nW99h5K/uRJXb26Mhby7CIfXOWJjw8Uw57HLuhAiA0zeJKHaA0PKpi2gI1xVYVm2GFI3U0I1nmIuWqhLSbsw=="
}, },
"log_consistency": [ "log_consistency": [
"74BJt1hw9+CNRkcAlNhl0+nTymtLDwUBRsf4xRZP+Wk=", "bg5yr6Foqkjs6NdFXL3HqQqTBzy8hIsJ/a4JdgCH2jg=",
"KpnnGZxuBOimg2bVX+gxdBjrJEAWe/6Fr71HavKgH3M=" "vhBdOlNe4DDPlfxT3Bd1xabPmwVsu5CKflo21y6C8Ms="
] ]
} }
}, },
"leaf": { "leaf": {
"vrf_proof": "BGmKJMizUD1YVQuhuDdCGQkYuwo7fyJVx5naAXFgT8gySm+KCJHXHGQT4B6HVUoFSrDG5Hw3JRdKRn3GgLKyzgSHy1HLhWFLT+nQFEzzYq4x2psj6PyUNlaPWfnwVizyOB31qTBMPiNmlf7Qgp/yArNoYDSuvrjR9Jmlku+iA5MU", "vrf_proof": "X4rdMUgJvFL9XtkGlXq8e4D5nxsI13ZGSlcGYlGhtn0RM0u8sBtETbUIxDkZqYAIIHSNKzzeNwELrmNES9gr5ASHy1HLhWFLT+nQFEzzYq4x2psj6PyUNlaPWfnwVizyOB31qTBMPiNmlf7Qgp/yArNoYDSuvrjR9Jmlku+iA5MU",
"map_inclusion": { "map_inclusion": {
"leaf": { "leaf": {
"index": "Umn2fclcSdomcb9UlHcDY1SLm1A/wILzh8NdoYffbcQ=", "index": "Umn2fclcSdomcb9UlHcDY1SLm1A/wILzh8NdoYffbcQ=",
"leaf_hash": "dHeM44usHkJb0qjAYu8JX02MCq8VHolTyVXLDGJbWdI=", "leaf_hash": "SQW0xHDpdSOozSCd4yT9rzz1fKmsJECsgcucG7F2ax0=",
"leaf_value": "Cv4BGiBSafZ9yVxJ2iZxv1SUdwNjVIubUD/AgvOHw12hh99txDIgHo4zXOSWjFcn5ACKbEwbEI9J4dXDq/gCobR85mMaO3E6lQEIARKQAQqHAQo1dHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUuY3J5cHRvLnRpbmsuRWNkc2FQdWJsaWNLZXkSTBIGCAMQAhgCGiD7FU52mGR+kS2Xs4XygLK9bDfV1XiGcZtcM9t0WUvWeSIgmsoZ6shH0XVzZaQU9lPYV3EsZYiiNax6wCRQ8d53LbQYAxABGAEgAUIg47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFUSTAEAAAABMEUCIFaVVPyv3/XXtr4t9EaFRN7L0aqd6MvqNf5TuriTLAbKAiEA5aSSC4erRALn7WKjs+eXaXG2wa8WYxEjAaHC4a2QLlQ=" "leaf_value": "Cv4BGiBSafZ9yVxJ2iZxv1SUdwNjVIubUD/AgvOHw12hh99txDIgUnwYF+DKuiCzn5AtkpYfMrJLLhmHjQFOs5PN/b2TTDQ6lQEIARKQAQqHAQo1dHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUuY3J5cHRvLnRpbmsuRWNkc2FQdWJsaWNLZXkSTBIGCAMQAhgCGiD7FU52mGR+kS2Xs4XygLK9bDfV1XiGcZtcM9t0WUvWeSIgmsoZ6shH0XVzZaQU9lPYV3EsZYiiNax6wCRQ8d53LbQYAxABGAEgA0Ig47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFUSRjBEAiBlnjoyOans9beKGWEzPPslAcXzzI2f2c6iMNbSvcxwbwIgKaJFc0fkIYLqmGXTcoifGz/SpWsxxG0VWmmcJuqm03k="
}, },
"inclusion": [ "inclusion": [
"", "",
@ -1182,56 +1193,59 @@
"", "",
"", "",
"", "",
"H8rbxUJbcMZtCaW49J4nTPywn99uazGOTxYgJwV0XJI=", "Rkta1z50aDLsEglMAtxm5hm1eskPnOzZaIoDVWuYRTo=",
"" ""
] ]
}, },
"committed": { "committed": {
"key": "9yOpkO/50Op5NJJKdx1etw==", "key": "lOyD/kDFCHddGQaufT0A9Q==",
"data": "Ym9iLWtleTE=" "data": "Ym9iLWtleTE="
} }
} }
}, },
"BatchListUserRevisionsResp": null,
"TrustNewLog": false "TrustNewLog": false
}, },
{ {
"Desc": "bob1_set", "Desc": "bob1_set",
"UserID": "bob", "UserIDs": [
"Resp": { "bob"
],
"GetUserResp": {
"revision": { "revision": {
"map_root": { "map_root": {
"map_root": { "map_root": {
"map_root": "AAEgPJMMcS3zYA+lydtkvfNk8bgbQzLfemGSX7FhLr1kM4QVfD9m4Ul9wAAAAAAAAAADABwSAhgBEhYIgbP02+vsj74VEPm2kLrt7I++FRgC", "map_root": "AAEgr8xAKP6EGtoSi+ZFNI6ZwPimd9+KywJtad/yEJNevXgVg0kvUUZDTwAAAAAAAAADACYSDBDchvfv9KXSwRUYARIWCMrXsu/ypdLBFRDK17Lv8qXSwRUYAg==",
"signature": "MEUCIBtnr8CGfXlQvwD5zlGHu8YkTLktLv5QWbajrB1/kKQQAiEAzTdT19rnuMNtOROFryClaVo2f2KSRwuxTYFxOA/qNk4=" "signature": "MEUCIQDOirn/pstnBvgxdGn1kjNh5+fU5r2nrQvixjrboEHWigIgUFhzwB9b2+wEtd7hEuhTOOVx51/mO2CL+7tRrdFQ4D8="
}, },
"log_inclusion": [ "log_inclusion": [
"JwjPnsR+/v/pG79eO4J9a+7huD2EB4ulIYwrrWhH7V0=", "jfNr9jOWGPAwb9oanLSYBabZXp5ShHREKQkDXxJ58Ts=",
"iLzyO7OaPvSPpetDbGd9eNpo/NfUcEYeBta8z/hYG3M=" "VigWWNMYkkC/tjfy+5vtTukZXZnppaYja73iLZoyD7c="
] ]
}, },
"latest_log_root": { "latest_log_root": {
"log_root": { "log_root": {
"timestamp_nanos": 1548182083190127000, "timestamp_nanos": 1550163164559404835,
"root_hash": "lE/bH+OqOzXs2rFNvTfgu7gAyqYdx8s9kdUPLjHhS1M=", "root_hash": "rPSTqha5qIxXu6quztMmbIFdA6JNSzcUt+SvWyfwoEw=",
"tree_size": 4, "tree_size": 4,
"tree_revision": 4, "tree_revision": 4,
"key_hint": "dooq7H3+13E=", "key_hint": "Wl0ihYmY00A=",
"log_root": "AAEAAAAAAAAABCCUT9sf46o7NezasU29N+C7uADKph3Hyz2R1Q8uMeFLUxV8P2bsK/mYAAAAAAAAAAQAAA==", "log_root": "AAEAAAAAAAAABCCs9JOqFrmojFe7qq7O0yZsgV0Dok1LNxS35K9bJ/CgTBWDSS9fdW8jAAAAAAAAAAQAAA==",
"log_root_signature": "MEUCIBMfUpMx8mu2yHupR8QZJrTGZL2HSHQW89iDo60LGETzAiEAjmthkwb3+Tzkz5QKSOU93XEmVKA7qzZgwqRuVClWXZc=" "log_root_signature": "MEQCIDM6z/nW99h5K/uRJXb26Mhby7CIfXOWJjw8Uw57HLuhAiA0zeJKHaA0PKpi2gI1xVYVm2GFI3U0I1nmIuWqhLSbsw=="
}, },
"log_consistency": [ "log_consistency": [
"74BJt1hw9+CNRkcAlNhl0+nTymtLDwUBRsf4xRZP+Wk=", "bg5yr6Foqkjs6NdFXL3HqQqTBzy8hIsJ/a4JdgCH2jg=",
"KpnnGZxuBOimg2bVX+gxdBjrJEAWe/6Fr71HavKgH3M=" "vhBdOlNe4DDPlfxT3Bd1xabPmwVsu5CKflo21y6C8Ms="
] ]
} }
}, },
"leaf": { "leaf": {
"vrf_proof": "GUOB6Ea7R/98Hdbx8iAcupj7RwWYDucLOTR5h2I4VhfTRhIYL5gdSxCeLHpeM4ryKt+OMjbXMbFbPYLpLf56JQSHy1HLhWFLT+nQFEzzYq4x2psj6PyUNlaPWfnwVizyOB31qTBMPiNmlf7Qgp/yArNoYDSuvrjR9Jmlku+iA5MU", "vrf_proof": "1AP1ak1iddu5JsgtPI+bH0DOnqV40H/10G5oa92euFHvrxacC0E3bf4Sx4c9l01939a+ggMbJW+bQtaVVfoW2gSHy1HLhWFLT+nQFEzzYq4x2psj6PyUNlaPWfnwVizyOB31qTBMPiNmlf7Qgp/yArNoYDSuvrjR9Jmlku+iA5MU",
"map_inclusion": { "map_inclusion": {
"leaf": { "leaf": {
"index": "Umn2fclcSdomcb9UlHcDY1SLm1A/wILzh8NdoYffbcQ=", "index": "Umn2fclcSdomcb9UlHcDY1SLm1A/wILzh8NdoYffbcQ=",
"leaf_hash": "dHeM44usHkJb0qjAYu8JX02MCq8VHolTyVXLDGJbWdI=", "leaf_hash": "SQW0xHDpdSOozSCd4yT9rzz1fKmsJECsgcucG7F2ax0=",
"leaf_value": "Cv4BGiBSafZ9yVxJ2iZxv1SUdwNjVIubUD/AgvOHw12hh99txDIgHo4zXOSWjFcn5ACKbEwbEI9J4dXDq/gCobR85mMaO3E6lQEIARKQAQqHAQo1dHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUuY3J5cHRvLnRpbmsuRWNkc2FQdWJsaWNLZXkSTBIGCAMQAhgCGiD7FU52mGR+kS2Xs4XygLK9bDfV1XiGcZtcM9t0WUvWeSIgmsoZ6shH0XVzZaQU9lPYV3EsZYiiNax6wCRQ8d53LbQYAxABGAEgAUIg47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFUSTAEAAAABMEUCIFaVVPyv3/XXtr4t9EaFRN7L0aqd6MvqNf5TuriTLAbKAiEA5aSSC4erRALn7WKjs+eXaXG2wa8WYxEjAaHC4a2QLlQ=" "leaf_value": "Cv4BGiBSafZ9yVxJ2iZxv1SUdwNjVIubUD/AgvOHw12hh99txDIgUnwYF+DKuiCzn5AtkpYfMrJLLhmHjQFOs5PN/b2TTDQ6lQEIARKQAQqHAQo1dHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUuY3J5cHRvLnRpbmsuRWNkc2FQdWJsaWNLZXkSTBIGCAMQAhgCGiD7FU52mGR+kS2Xs4XygLK9bDfV1XiGcZtcM9t0WUvWeSIgmsoZ6shH0XVzZaQU9lPYV3EsZYiiNax6wCRQ8d53LbQYAxABGAEgA0Ig47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFUSRjBEAiBlnjoyOans9beKGWEzPPslAcXzzI2f2c6iMNbSvcxwbwIgKaJFc0fkIYLqmGXTcoifGz/SpWsxxG0VWmmcJuqm03k="
}, },
"inclusion": [ "inclusion": [
"", "",
@ -1488,16 +1502,636 @@
"", "",
"", "",
"", "",
"H8rbxUJbcMZtCaW49J4nTPywn99uazGOTxYgJwV0XJI=", "Rkta1z50aDLsEglMAtxm5hm1eskPnOzZaIoDVWuYRTo=",
"" ""
] ]
}, },
"committed": { "committed": {
"key": "9yOpkO/50Op5NJJKdx1etw==", "key": "lOyD/kDFCHddGQaufT0A9Q==",
"data": "Ym9iLWtleTE=" "data": "Ym9iLWtleTE="
} }
} }
}, },
"BatchListUserRevisionsResp": null,
"TrustNewLog": false "TrustNewLog": false
},
{
"Desc": "bob2_setkeys",
"UserIDs": [
"bob"
],
"GetUserResp": {
"revision": {
"map_root": {
"map_root": {
"map_root": "AAEg5FcjGwHeNaom4LLIVsDoAq78pdUAmPh4aO73uTsFxgkVg0kvddsCAwAAAAAAAAAEADASFgjchvfv9KXSwRUQ3Ib37/Sl0sEVGAESFgjK17Lv8qXSwRUQ1Lm98fal0sEVGAI=",
"signature": "MEUCIHWmtQTKyy50UJSRn5LwvDTbaCUxu0p81Ni9C3w9/wj8AiEAhab0XFv1oRLDYY6glFksjySMozOKs96TjQOv6lf/UX4="
},
"log_inclusion": [
"rPSTqha5qIxXu6quztMmbIFdA6JNSzcUt+SvWyfwoEw="
]
},
"latest_log_root": {
"log_root": {
"timestamp_nanos": 1550163165060111318,
"root_hash": "LAc8Z7Ikc8riPIslJt9F5znG1UQ++Dkn17hCGMzDqfc=",
"tree_size": 5,
"tree_revision": 5,
"key_hint": "Wl0ihYmY00A=",
"log_root": "AAEAAAAAAAAABSAsBzxnsiRzyuI8iyUm30XnOcbVRD74OSfXuEIYzMOp9xWDSS99TZvWAAAAAAAAAAUAAA==",
"log_root_signature": "MEYCIQC4mAyVpIIvN0RY6qnIKP1JzpWZ3Pu6KW7YyZvcJOjuVwIhAKfT+h+rqHo/89BcbDlr1WifsSJTqPK2Oyn3DuT5am7A"
},
"log_consistency": [
"bg5yr6Foqkjs6NdFXL3HqQqTBzy8hIsJ/a4JdgCH2jg=",
"vhBdOlNe4DDPlfxT3Bd1xabPmwVsu5CKflo21y6C8Ms=",
"M8kPsR06rZIcu6/1lIROQQF0CMbJyu9lax9B9R6YK8c="
]
}
},
"leaf": {
"vrf_proof": "GLa+Gy7Oacf0fOcldzfdn8u22z30nk79AOodoUPGSo2xW3ch7yU415QSuJq81bicrnUOI0SHyE6DAJMeSoqWZgSHy1HLhWFLT+nQFEzzYq4x2psj6PyUNlaPWfnwVizyOB31qTBMPiNmlf7Qgp/yArNoYDSuvrjR9Jmlku+iA5MU",
"map_inclusion": {
"leaf": {
"index": "Umn2fclcSdomcb9UlHcDY1SLm1A/wILzh8NdoYffbcQ=",
"leaf_hash": "N6v5xDU33hGz222cGxckUks1jgMs/v5CQl9wUEhIVQU=",
"leaf_value": "Cv4BGiBSafZ9yVxJ2iZxv1SUdwNjVIubUD/AgvOHw12hh99txDIgS3wBoYbDyPPn1y3yT4dSrxcsUDwCTY0nsXHyPptnH3I6lQEIARKQAQqHAQo1dHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUuY3J5cHRvLnRpbmsuRWNkc2FQdWJsaWNLZXkSTBIGCAMQAhgCGiD7FU52mGR+kS2Xs4XygLK9bDfV1XiGcZtcM9t0WUvWeSIgmsoZ6shH0XVzZaQU9lPYV3EsZYiiNax6wCRQ8d53LbQYAxABGAEgA0IgjKqSavCq1amOKbJKnkFkGDb6+bovPPwVkOU6pEwvIGQSRzBFAiEA8EyAUwNQjico7PZJHAJj9bENb1BFu++5FYwm2AFnkQUCIGCLB6s9C3t+O8hzmF9qhOCRKkUCdxIr79ZitbSdrHHn"
},
"inclusion": [
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"Rkta1z50aDLsEglMAtxm5hm1eskPnOzZaIoDVWuYRTo=",
""
]
},
"committed": {
"key": "TIb45cgNVG99ZcMZZwKITg==",
"data": "Ym9iLWtleTI="
}
}
},
"BatchListUserRevisionsResp": null,
"TrustNewLog": false
},
{
"Desc": "bob3_setnewkeys",
"UserIDs": [
"bob"
],
"GetUserResp": {
"revision": {
"map_root": {
"map_root": {
"map_root": "AAEgDEDkZ5Tp6QdJEtRV64qx1/BDWrHSacJRpuqjquFRwHgVg0kvjXzLqgAAAAAAAAAFADASFgjchvfv9KXSwRUQ3Ib37/Sl0sEVGAESFgjUub3x9qXSwRUQ/ISMyfil0sEVGAI=",
"signature": "MEUCIF/M2SrwldR0o6165z08Z+9qO2VZbai3VNPu/bFFQYJHAiEA9xFhOEKVvCEbDIIgP8n1G21IZwXRDzPsQnj3zvMs1LM="
},
"log_inclusion": [
"M8kPsR06rZIcu6/1lIROQQF0CMbJyu9lax9B9R6YK8c=",
"rPSTqha5qIxXu6quztMmbIFdA6JNSzcUt+SvWyfwoEw="
]
},
"latest_log_root": {
"log_root": {
"timestamp_nanos": 1550163165559842130,
"root_hash": "yqUFUtrPFww3aKN+UqSpmY02Gjp0Dyq8PzXHynF6tEE=",
"tree_size": 6,
"tree_revision": 6,
"key_hint": "Wl0ihYmY00A=",
"log_root": "AAEAAAAAAAAABiDKpQVS2s8XDDdoo35SpKmZjTYaOnQPKrw/NcfKcXq0QRWDSS+bFuVSAAAAAAAAAAYAAA==",
"log_root_signature": "MEQCIAZ9y3+bWwHTN232J6i52pBpdCGNdvmTEgOVVkztLq7WAiAarGKYogkwbmvSSvLUoLLcnB2pGI3CayBfC0TkWCGtAw=="
},
"log_consistency": [
"bg5yr6Foqkjs6NdFXL3HqQqTBzy8hIsJ/a4JdgCH2jg=",
"vhBdOlNe4DDPlfxT3Bd1xabPmwVsu5CKflo21y6C8Ms=",
"a/zGHQv9tBdH+doHigIOUzgLqJlKMamdrgsOC59ah2o="
]
}
},
"leaf": {
"vrf_proof": "4SwdCKyFF9PinfDMpOdaxQYU2RPOxGNBT61RkTZ/81aaozLD9UIt1zfau/0IJy+wQ5A9QSdAuNKE2MV2J4G7cwSHy1HLhWFLT+nQFEzzYq4x2psj6PyUNlaPWfnwVizyOB31qTBMPiNmlf7Qgp/yArNoYDSuvrjR9Jmlku+iA5MU",
"map_inclusion": {
"leaf": {
"index": "Umn2fclcSdomcb9UlHcDY1SLm1A/wILzh8NdoYffbcQ=",
"leaf_hash": "Z8QYCCHOq9Wu/AGoO/nievbwKK1LDO+Ob3qcjprsbqE=",
"leaf_value": "CpEDGiBSafZ9yVxJ2iZxv1SUdwNjVIubUD/AgvOHw12hh99txDIgvpXp3sPS32ohOJNBIQi0hEPCxpaxv3ONUe26VKz0Tk06qAIIAhKQAQqHAQo1dHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUuY3J5cHRvLnRpbmsuRWNkc2FQdWJsaWNLZXkSTBIGCAMQAhgCGiD7FU52mGR+kS2Xs4XygLK9bDfV1XiGcZtcM9t0WUvWeSIgmsoZ6shH0XVzZaQU9lPYV3EsZYiiNax6wCRQ8d53LbQYAxABGAEgAxKQAQqHAQo1dHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUuY3J5cHRvLnRpbmsuRWNkc2FQdWJsaWNLZXkSTBIGCAMQAhgCGiAkoNtHi7KFIxdbzTHTY21hFQWUxBss4D60t78xvBnr1CIgkXRukcfdn/bWwunc+1FbMiO7yI0vvetbBCwOhXkfdZQYAxABGAIgA0IgSQjPfrKaUfNwT7SzUAlctcI2sRG/9MOqWc9FwP+ZSrISRjBEAiBFlOdB2SS19frUv7lXcwaMJmooaRP7l7JfRku+tSjVTQIgO7Ctz7gBqv7FEqrM04WfzmBUQU4bUUHNTzaxhZPgd9oSRzBFAiEAmdsQwMPZxuUUoYPgCAP+yWgUR8/5sR5Rfby+3JFjpcoCIB5shoI3OKMpO97q5VdCSuUOkWbra7tRehiFmQ/JAT12"
},
"inclusion": [
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"Rkta1z50aDLsEglMAtxm5hm1eskPnOzZaIoDVWuYRTo=",
""
]
},
"committed": {
"key": "ijmEOG0QwH9tYl3DhXBT5g==",
"data": "Ym9iLWtleTM="
}
}
},
"BatchListUserRevisionsResp": null,
"TrustNewLog": true
} }
] ]
Loading…
Cancel
Save