commit
4bf2b14847
@ -1,3 +0,0 @@ |
||||
package beaconchain |
||||
|
||||
//go:generate protoc beaconchain.proto --go_out=plugins=grpc:.
|
@ -1,256 +0,0 @@ |
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: beaconchain.proto
|
||||
|
||||
package beaconchain |
||||
|
||||
import ( |
||||
context "context" |
||||
fmt "fmt" |
||||
proto "github.com/golang/protobuf/proto" |
||||
grpc "google.golang.org/grpc" |
||||
math "math" |
||||
) |
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal |
||||
var _ = fmt.Errorf |
||||
var _ = math.Inf |
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// FetchLeadersRequest is the request to fetch the current leaders.
|
||||
type FetchLeadersRequest struct { |
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"` |
||||
XXX_unrecognized []byte `json:"-"` |
||||
XXX_sizecache int32 `json:"-"` |
||||
} |
||||
|
||||
func (m *FetchLeadersRequest) Reset() { *m = FetchLeadersRequest{} } |
||||
func (m *FetchLeadersRequest) String() string { return proto.CompactTextString(m) } |
||||
func (*FetchLeadersRequest) ProtoMessage() {} |
||||
func (*FetchLeadersRequest) Descriptor() ([]byte, []int) { |
||||
return fileDescriptor_474fd8061d1037cf, []int{0} |
||||
} |
||||
|
||||
func (m *FetchLeadersRequest) XXX_Unmarshal(b []byte) error { |
||||
return xxx_messageInfo_FetchLeadersRequest.Unmarshal(m, b) |
||||
} |
||||
func (m *FetchLeadersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
||||
return xxx_messageInfo_FetchLeadersRequest.Marshal(b, m, deterministic) |
||||
} |
||||
func (m *FetchLeadersRequest) XXX_Merge(src proto.Message) { |
||||
xxx_messageInfo_FetchLeadersRequest.Merge(m, src) |
||||
} |
||||
func (m *FetchLeadersRequest) XXX_Size() int { |
||||
return xxx_messageInfo_FetchLeadersRequest.Size(m) |
||||
} |
||||
func (m *FetchLeadersRequest) XXX_DiscardUnknown() { |
||||
xxx_messageInfo_FetchLeadersRequest.DiscardUnknown(m) |
||||
} |
||||
|
||||
var xxx_messageInfo_FetchLeadersRequest proto.InternalMessageInfo |
||||
|
||||
// FetchLeadersResponse is the response of FetchLeadersRequest.
|
||||
type FetchLeadersResponse struct { |
||||
Leaders []*FetchLeadersResponse_Leader `protobuf:"bytes,1,rep,name=leaders,proto3" json:"leaders,omitempty"` |
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"` |
||||
XXX_unrecognized []byte `json:"-"` |
||||
XXX_sizecache int32 `json:"-"` |
||||
} |
||||
|
||||
func (m *FetchLeadersResponse) Reset() { *m = FetchLeadersResponse{} } |
||||
func (m *FetchLeadersResponse) String() string { return proto.CompactTextString(m) } |
||||
func (*FetchLeadersResponse) ProtoMessage() {} |
||||
func (*FetchLeadersResponse) Descriptor() ([]byte, []int) { |
||||
return fileDescriptor_474fd8061d1037cf, []int{1} |
||||
} |
||||
|
||||
func (m *FetchLeadersResponse) XXX_Unmarshal(b []byte) error { |
||||
return xxx_messageInfo_FetchLeadersResponse.Unmarshal(m, b) |
||||
} |
||||
func (m *FetchLeadersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
||||
return xxx_messageInfo_FetchLeadersResponse.Marshal(b, m, deterministic) |
||||
} |
||||
func (m *FetchLeadersResponse) XXX_Merge(src proto.Message) { |
||||
xxx_messageInfo_FetchLeadersResponse.Merge(m, src) |
||||
} |
||||
func (m *FetchLeadersResponse) XXX_Size() int { |
||||
return xxx_messageInfo_FetchLeadersResponse.Size(m) |
||||
} |
||||
func (m *FetchLeadersResponse) XXX_DiscardUnknown() { |
||||
xxx_messageInfo_FetchLeadersResponse.DiscardUnknown(m) |
||||
} |
||||
|
||||
var xxx_messageInfo_FetchLeadersResponse proto.InternalMessageInfo |
||||
|
||||
func (m *FetchLeadersResponse) GetLeaders() []*FetchLeadersResponse_Leader { |
||||
if m != nil { |
||||
return m.Leaders |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
type FetchLeadersResponse_Leader struct { |
||||
Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` |
||||
Port string `protobuf:"bytes,2,opt,name=port,proto3" json:"port,omitempty"` |
||||
ShardId uint32 `protobuf:"varint,3,opt,name=shardId,proto3" json:"shardId,omitempty"` |
||||
PeerID string `protobuf:"bytes,4,opt,name=peerID,proto3" json:"peerID,omitempty"` |
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"` |
||||
XXX_unrecognized []byte `json:"-"` |
||||
XXX_sizecache int32 `json:"-"` |
||||
} |
||||
|
||||
func (m *FetchLeadersResponse_Leader) Reset() { *m = FetchLeadersResponse_Leader{} } |
||||
func (m *FetchLeadersResponse_Leader) String() string { return proto.CompactTextString(m) } |
||||
func (*FetchLeadersResponse_Leader) ProtoMessage() {} |
||||
func (*FetchLeadersResponse_Leader) Descriptor() ([]byte, []int) { |
||||
return fileDescriptor_474fd8061d1037cf, []int{1, 0} |
||||
} |
||||
|
||||
func (m *FetchLeadersResponse_Leader) XXX_Unmarshal(b []byte) error { |
||||
return xxx_messageInfo_FetchLeadersResponse_Leader.Unmarshal(m, b) |
||||
} |
||||
func (m *FetchLeadersResponse_Leader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
||||
return xxx_messageInfo_FetchLeadersResponse_Leader.Marshal(b, m, deterministic) |
||||
} |
||||
func (m *FetchLeadersResponse_Leader) XXX_Merge(src proto.Message) { |
||||
xxx_messageInfo_FetchLeadersResponse_Leader.Merge(m, src) |
||||
} |
||||
func (m *FetchLeadersResponse_Leader) XXX_Size() int { |
||||
return xxx_messageInfo_FetchLeadersResponse_Leader.Size(m) |
||||
} |
||||
func (m *FetchLeadersResponse_Leader) XXX_DiscardUnknown() { |
||||
xxx_messageInfo_FetchLeadersResponse_Leader.DiscardUnknown(m) |
||||
} |
||||
|
||||
var xxx_messageInfo_FetchLeadersResponse_Leader proto.InternalMessageInfo |
||||
|
||||
func (m *FetchLeadersResponse_Leader) GetIp() string { |
||||
if m != nil { |
||||
return m.Ip |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func (m *FetchLeadersResponse_Leader) GetPort() string { |
||||
if m != nil { |
||||
return m.Port |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func (m *FetchLeadersResponse_Leader) GetShardId() uint32 { |
||||
if m != nil { |
||||
return m.ShardId |
||||
} |
||||
return 0 |
||||
} |
||||
|
||||
func (m *FetchLeadersResponse_Leader) GetPeerID() string { |
||||
if m != nil { |
||||
return m.PeerID |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func init() { |
||||
proto.RegisterType((*FetchLeadersRequest)(nil), "beaconchain.FetchLeadersRequest") |
||||
proto.RegisterType((*FetchLeadersResponse)(nil), "beaconchain.FetchLeadersResponse") |
||||
proto.RegisterType((*FetchLeadersResponse_Leader)(nil), "beaconchain.FetchLeadersResponse.Leader") |
||||
} |
||||
|
||||
func init() { proto.RegisterFile("beaconchain.proto", fileDescriptor_474fd8061d1037cf) } |
||||
|
||||
var fileDescriptor_474fd8061d1037cf = []byte{ |
||||
// 222 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x90, 0xcd, 0x4a, 0xc4, 0x30, |
||||
0x14, 0x85, 0x4d, 0x67, 0xe8, 0xe0, 0x1d, 0x15, 0xbc, 0xfe, 0x10, 0x66, 0x15, 0xbb, 0xca, 0xaa, |
||||
0x8b, 0xf1, 0x0d, 0xaa, 0x08, 0x05, 0x57, 0x11, 0xb7, 0x42, 0x9a, 0x5e, 0x68, 0x50, 0x9a, 0x98, |
||||
0x44, 0x1f, 0xce, 0xa7, 0x13, 0x53, 0x0b, 0x15, 0x44, 0x77, 0x39, 0x1f, 0x39, 0xe4, 0xcb, 0x81, |
||||
0xd3, 0x8e, 0xb4, 0x71, 0xa3, 0x19, 0xb4, 0x1d, 0x6b, 0x1f, 0x5c, 0x72, 0xb8, 0x5d, 0xa0, 0xea, |
||||
0x02, 0xce, 0xee, 0x28, 0x99, 0xe1, 0x9e, 0x74, 0x4f, 0x21, 0x2a, 0x7a, 0x7d, 0xa3, 0x98, 0xaa, |
||||
0x0f, 0x06, 0xe7, 0x3f, 0x79, 0xf4, 0x6e, 0x8c, 0x84, 0x0d, 0x6c, 0x5e, 0x26, 0xc4, 0x99, 0x58, |
||||
0xc9, 0xed, 0x5e, 0xd6, 0xcb, 0x17, 0x7e, 0xeb, 0xd4, 0x53, 0x56, 0x73, 0x71, 0xf7, 0x04, 0xe5, |
||||
0x84, 0xf0, 0x04, 0x0a, 0xeb, 0x39, 0x13, 0x4c, 0x1e, 0xaa, 0xc2, 0x7a, 0x44, 0x58, 0x7b, 0x17, |
||||
0x12, 0x2f, 0x32, 0xc9, 0x67, 0xe4, 0xb0, 0x89, 0x83, 0x0e, 0x7d, 0xdb, 0xf3, 0x95, 0x60, 0xf2, |
||||
0x58, 0xcd, 0x11, 0x2f, 0xa1, 0xf4, 0x44, 0xa1, 0xbd, 0xe5, 0xeb, 0x7c, 0xff, 0x3b, 0xed, 0x9f, |
||||
0x01, 0x9b, 0xec, 0x74, 0xf3, 0xe5, 0xf4, 0x40, 0xe1, 0xdd, 0x1a, 0xc2, 0x47, 0x38, 0x5a, 0xda, |
||||
0xa1, 0xf8, 0x43, 0x3c, 0x8f, 0xb0, 0xbb, 0xfa, 0xf7, 0x6b, 0xd5, 0x41, 0x57, 0xe6, 0x51, 0xaf, |
||||
0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x35, 0x50, 0x26, 0x86, 0x69, 0x01, 0x00, 0x00, |
||||
} |
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context |
||||
var _ grpc.ClientConn |
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4 |
||||
|
||||
// BeaconChainServiceClient is the client API for BeaconChainService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type BeaconChainServiceClient interface { |
||||
FetchLeaders(ctx context.Context, in *FetchLeadersRequest, opts ...grpc.CallOption) (*FetchLeadersResponse, error) |
||||
} |
||||
|
||||
type beaconChainServiceClient struct { |
||||
cc *grpc.ClientConn |
||||
} |
||||
|
||||
func NewBeaconChainServiceClient(cc *grpc.ClientConn) BeaconChainServiceClient { |
||||
return &beaconChainServiceClient{cc} |
||||
} |
||||
|
||||
func (c *beaconChainServiceClient) FetchLeaders(ctx context.Context, in *FetchLeadersRequest, opts ...grpc.CallOption) (*FetchLeadersResponse, error) { |
||||
out := new(FetchLeadersResponse) |
||||
err := c.cc.Invoke(ctx, "/beaconchain.BeaconChainService/FetchLeaders", in, out, opts...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return out, nil |
||||
} |
||||
|
||||
// BeaconChainServiceServer is the server API for BeaconChainService service.
|
||||
type BeaconChainServiceServer interface { |
||||
FetchLeaders(context.Context, *FetchLeadersRequest) (*FetchLeadersResponse, error) |
||||
} |
||||
|
||||
func RegisterBeaconChainServiceServer(s *grpc.Server, srv BeaconChainServiceServer) { |
||||
s.RegisterService(&_BeaconChainService_serviceDesc, srv) |
||||
} |
||||
|
||||
func _BeaconChainService_FetchLeaders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
||||
in := new(FetchLeadersRequest) |
||||
if err := dec(in); err != nil { |
||||
return nil, err |
||||
} |
||||
if interceptor == nil { |
||||
return srv.(BeaconChainServiceServer).FetchLeaders(ctx, in) |
||||
} |
||||
info := &grpc.UnaryServerInfo{ |
||||
Server: srv, |
||||
FullMethod: "/beaconchain.BeaconChainService/FetchLeaders", |
||||
} |
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
||||
return srv.(BeaconChainServiceServer).FetchLeaders(ctx, req.(*FetchLeadersRequest)) |
||||
} |
||||
return interceptor(ctx, in, info, handler) |
||||
} |
||||
|
||||
var _BeaconChainService_serviceDesc = grpc.ServiceDesc{ |
||||
ServiceName: "beaconchain.BeaconChainService", |
||||
HandlerType: (*BeaconChainServiceServer)(nil), |
||||
Methods: []grpc.MethodDesc{ |
||||
{ |
||||
MethodName: "FetchLeaders", |
||||
Handler: _BeaconChainService_FetchLeaders_Handler, |
||||
}, |
||||
}, |
||||
Streams: []grpc.StreamDesc{}, |
||||
Metadata: "beaconchain.proto", |
||||
} |
@ -1,23 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package beaconchain; |
||||
|
||||
// BeaconChainService is the service used for any beacon chain requests. |
||||
service BeaconChainService { |
||||
rpc FetchLeaders(FetchLeadersRequest) returns (FetchLeadersResponse) {} |
||||
} |
||||
|
||||
// FetchLeadersRequest is the request to fetch the current leaders. |
||||
message FetchLeadersRequest { |
||||
} |
||||
|
||||
// FetchLeadersResponse is the response of FetchLeadersRequest. |
||||
message FetchLeadersResponse { |
||||
message Leader { |
||||
string ip = 1; |
||||
string port = 2; |
||||
uint32 shardId = 3; |
||||
string peerID = 4; |
||||
} |
||||
repeated Leader leaders = 1; |
||||
} |
@ -1,3 +0,0 @@ |
||||
The beaconchain package currently is a centralized service that allocates every potential new node (uses newnode package) a specific shard. |
||||
If N is the number of shards, supplied as a parameter at bootup, then first N joining nodes are assigned to be the leaders of those N shards. The nodes that come after that are then assigned shards based on their order of entry. |
||||
In the future, the generation of randomness would be decentralized. Such randomness would be provided to a new node once its PoS has been verified and then the node would be able to calculate its own shard automatically. |
@ -1,200 +0,0 @@ |
||||
package beaconchain |
||||
|
||||
import ( |
||||
"math/rand" |
||||
"os" |
||||
"strconv" |
||||
"sync" |
||||
|
||||
"github.com/harmony-one/bls/ffi/go/bls" |
||||
"github.com/harmony-one/harmony/api/proto/bcconn" |
||||
proto_identity "github.com/harmony-one/harmony/api/proto/identity" |
||||
"github.com/harmony-one/harmony/api/proto/node" |
||||
"github.com/harmony-one/harmony/crypto/pki" |
||||
beaconchain "github.com/harmony-one/harmony/internal/beaconchain/rpc" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
"github.com/harmony-one/harmony/p2p" |
||||
"github.com/harmony-one/harmony/p2p/host" |
||||
"github.com/harmony-one/harmony/p2p/p2pimpl" |
||||
p2p_crypto "github.com/libp2p/go-libp2p-crypto" |
||||
peer "github.com/libp2p/go-libp2p-peer" |
||||
) |
||||
|
||||
//BCState keeps track of the state the beaconchain is in
|
||||
type BCState int |
||||
|
||||
var mutex sync.Mutex |
||||
var identityPerBlock = 100000 |
||||
|
||||
// BeaconchainServicePortDiff is the positive port diff from beacon chain's self port
|
||||
const BeaconchainServicePortDiff = 4444 |
||||
|
||||
//BCInfo is the information that needs to be stored on the disk in order to allow for a restart.
|
||||
type BCInfo struct { |
||||
Leaders []*node.Info `json:"leaders"` |
||||
ShardLeaderMap map[int]*node.Info `json:"shardLeaderMap"` |
||||
NumberOfShards int `json:"numShards"` |
||||
NumberOfNodesAdded int `json:"numNodesAdded"` |
||||
IP string `json:"ip"` |
||||
Port string `json:"port"` |
||||
} |
||||
|
||||
// BeaconChain (Blockchain) keeps Identities per epoch, currently centralized!
|
||||
type BeaconChain struct { |
||||
BCInfo BCInfo |
||||
ShardLeaderMap map[int]*node.Info |
||||
PubKey *bls.PublicKey |
||||
host p2p.Host |
||||
state BCState |
||||
rpcServer *beaconchain.Server |
||||
Peer p2p.Peer |
||||
Self p2p.Peer // self Peer
|
||||
} |
||||
|
||||
//SaveFile is to store the file in which beaconchain info will be stored.
|
||||
var SaveFile string |
||||
|
||||
// Followings are the set of states of that beaconchain can be in.
|
||||
const ( |
||||
NodeInfoReceived BCState = iota |
||||
RandomInfoSent |
||||
) |
||||
|
||||
// SupportRPC initializes and starts the rpc service
|
||||
func (bc *BeaconChain) SupportRPC() { |
||||
bc.InitRPCServer() |
||||
bc.StartRPCServer() |
||||
} |
||||
|
||||
// InitRPCServer initializes Rpc server.
|
||||
func (bc *BeaconChain) InitRPCServer() { |
||||
bc.rpcServer = beaconchain.NewServer(bc.GetShardLeaderMap) |
||||
} |
||||
|
||||
// StartRPCServer starts Rpc server.
|
||||
func (bc *BeaconChain) StartRPCServer() { |
||||
port, err := strconv.Atoi(bc.BCInfo.Port) |
||||
if err != nil { |
||||
port = 0 |
||||
} |
||||
utils.GetLogInstance().Info("support_client: StartRpcServer on port:", "port", strconv.Itoa(port+BeaconchainServicePortDiff)) |
||||
bc.rpcServer.Start(bc.BCInfo.IP, strconv.Itoa(port+BeaconchainServicePortDiff)) |
||||
} |
||||
|
||||
// GetShardLeaderMap returns the map from shard id to leader.
|
||||
func (bc *BeaconChain) GetShardLeaderMap() map[int]*node.Info { |
||||
result := make(map[int]*node.Info) |
||||
for i, leader := range bc.BCInfo.Leaders { |
||||
result[i] = leader |
||||
} |
||||
return result |
||||
} |
||||
|
||||
//New beaconchain initialization
|
||||
func New(numShards int, ip, port string, key p2p_crypto.PrivKey) *BeaconChain { |
||||
bc := BeaconChain{} |
||||
bc.PubKey = generateBCKey() |
||||
bc.Self = p2p.Peer{IP: ip, Port: port} |
||||
bc.host, _ = p2pimpl.NewHost(&bc.Self, key) |
||||
bcinfo := &BCInfo{NumberOfShards: numShards, NumberOfNodesAdded: 0, |
||||
IP: ip, |
||||
Port: port, |
||||
ShardLeaderMap: make(map[int]*node.Info)} |
||||
bc.BCInfo = *bcinfo |
||||
return &bc |
||||
} |
||||
|
||||
func generateBCKey() *bls.PublicKey { |
||||
r := rand.Intn(1000) |
||||
priKey := pki.GetBLSPrivateKeyFromInt(r) |
||||
pubkey := priKey.GetPublicKey() |
||||
return pubkey |
||||
} |
||||
|
||||
//AcceptNodeInfo deserializes node information received via beaconchain handler
|
||||
func (bc *BeaconChain) AcceptNodeInfo(b []byte) *node.Info { |
||||
Node := bcconn.DeserializeNodeInfo(b) |
||||
utils.GetLogInstance().Info("New Node Connection", "IP", Node.IP, "Port", Node.Port, "PeerID", Node.PeerID) |
||||
bc.Peer = p2p.Peer{IP: Node.IP, Port: Node.Port, PeerID: Node.PeerID} |
||||
bc.host.AddPeer(&bc.Peer) |
||||
|
||||
bc.BCInfo.NumberOfNodesAdded = bc.BCInfo.NumberOfNodesAdded + 1 |
||||
shardNum, isLeader := utils.AllocateShard(bc.BCInfo.NumberOfNodesAdded, bc.BCInfo.NumberOfShards) |
||||
if isLeader { |
||||
bc.BCInfo.Leaders = append(bc.BCInfo.Leaders, Node) |
||||
bc.BCInfo.ShardLeaderMap[shardNum] = Node |
||||
} |
||||
go SaveBeaconChainInfo(SaveFile, bc) |
||||
bc.state = NodeInfoReceived |
||||
return Node |
||||
} |
||||
|
||||
//RespondRandomness sends a randomness beacon to the node inorder for it process what shard it will be in
|
||||
func (bc *BeaconChain) RespondRandomness(Node *node.Info) { |
||||
bci := bc.BCInfo |
||||
response := bcconn.ResponseRandomNumber{NumberOfShards: bci.NumberOfShards, NumberOfNodesAdded: bci.NumberOfNodesAdded, Leaders: bci.Leaders} |
||||
msg := bcconn.SerializeRandomInfo(response) |
||||
msgToSend := proto_identity.ConstructIdentityMessage(proto_identity.Acknowledge, msg) |
||||
utils.GetLogInstance().Info("Sent Out Msg", "# Nodes", response.NumberOfNodesAdded) |
||||
for i, n := range response.Leaders { |
||||
utils.GetLogInstance().Info("Sent Out Msg", "leader", i, "nodeInfo", n.PeerID) |
||||
} |
||||
host.SendMessage(bc.host, bc.Peer, msgToSend, nil) |
||||
bc.state = RandomInfoSent |
||||
} |
||||
|
||||
//AcceptConnections welcomes new connections
|
||||
func (bc *BeaconChain) AcceptConnections(b []byte) { |
||||
node := bc.AcceptNodeInfo(b) |
||||
bc.RespondRandomness(node) |
||||
} |
||||
|
||||
//StartServer a server and process the request by a handler.
|
||||
func (bc *BeaconChain) StartServer() { |
||||
bc.host.BindHandlerAndServe(bc.BeaconChainHandler) |
||||
} |
||||
|
||||
//SaveBeaconChainInfo to disk
|
||||
func SaveBeaconChainInfo(filePath string, bc *BeaconChain) error { |
||||
bci := BCtoBCI(bc) |
||||
err := utils.Save(filePath, bci) |
||||
return err |
||||
} |
||||
|
||||
//LoadBeaconChainInfo from disk
|
||||
func LoadBeaconChainInfo(path string) (*BeaconChain, error) { |
||||
bci := &BCInfo{} |
||||
var err error |
||||
if _, err := os.Stat(path); err != nil { |
||||
return nil, err |
||||
} |
||||
err = utils.Load(path, bci) |
||||
var bc *BeaconChain |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
bc = BCItoBC(bci) |
||||
return bc, err |
||||
} |
||||
|
||||
// BCtoBCI converts beaconchain into beaconchaininfo
|
||||
func BCtoBCI(bc *BeaconChain) *BCInfo { |
||||
bci := &BCInfo{Leaders: bc.BCInfo.Leaders, ShardLeaderMap: bc.BCInfo.ShardLeaderMap, NumberOfShards: bc.BCInfo.NumberOfShards, NumberOfNodesAdded: bc.BCInfo.NumberOfNodesAdded, IP: bc.BCInfo.IP, Port: bc.BCInfo.Port} |
||||
return bci |
||||
} |
||||
|
||||
//BCItoBC converts beconchaininfo to beaconchain
|
||||
func BCItoBC(bci *BCInfo) *BeaconChain { |
||||
bc := &BeaconChain{BCInfo: *bci} |
||||
return bc |
||||
} |
||||
|
||||
//SetSaveFile sets the filepath where beaconchain will be saved
|
||||
func SetSaveFile(path string) { |
||||
SaveFile = path |
||||
} |
||||
|
||||
//GetID return ID
|
||||
func (bc *BeaconChain) GetID() peer.ID { |
||||
return bc.host.GetID() |
||||
} |
@ -1,59 +0,0 @@ |
||||
package beaconchain |
||||
|
||||
import ( |
||||
"github.com/harmony-one/harmony/api/proto" |
||||
proto_identity "github.com/harmony-one/harmony/api/proto/identity" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
"github.com/harmony-one/harmony/p2p" |
||||
) |
||||
|
||||
// BeaconChainHandler handles registration of new Identities
|
||||
func (bc *BeaconChain) BeaconChainHandler(s p2p.Stream) { |
||||
content, err := p2p.ReadMessageContent(s) |
||||
if err != nil { |
||||
utils.GetLogInstance().Error("Read p2p data failed") |
||||
return |
||||
} |
||||
msgCategory, err := proto.GetMessageCategory(content) |
||||
if err != nil { |
||||
utils.GetLogInstance().Error("Read message category failed", "err", err) |
||||
return |
||||
} |
||||
msgType, err := proto.GetMessageType(content) |
||||
if err != nil { |
||||
utils.GetLogInstance().Error("Read action type failed") |
||||
return |
||||
} |
||||
msgPayload, err := proto.GetMessagePayload(content) |
||||
if err != nil { |
||||
utils.GetLogInstance().Error("Read message payload failed") |
||||
return |
||||
} |
||||
identityMsgPayload, err := proto_identity.GetIdentityMessagePayload(msgPayload) |
||||
if err != nil { |
||||
utils.GetLogInstance().Error("Read message payload failed") |
||||
return |
||||
} |
||||
switch msgCategory { |
||||
case proto.Identity: |
||||
actionType := proto_identity.IDMessageType(msgType) |
||||
switch actionType { |
||||
case proto_identity.Identity: |
||||
utils.GetLogInstance().Info("Message category is of the type identity protocol, which is correct!") |
||||
idMsgType, err := proto_identity.GetIdentityMessageType(msgPayload) |
||||
if err != nil { |
||||
utils.GetLogInstance().Error("Error finding the identity message type") |
||||
} |
||||
switch idMsgType { |
||||
case proto_identity.Register: |
||||
utils.GetLogInstance().Info("Identity Message Type is of the type Register") |
||||
bc.AcceptConnections(identityMsgPayload) |
||||
default: |
||||
utils.GetLogInstance().Error("Unrecognized identity message type", "type", idMsgType) |
||||
} |
||||
default: |
||||
utils.GetLogInstance().Error("Unrecognized message category", "actionType", actionType) |
||||
} |
||||
|
||||
} |
||||
} |
@ -1,150 +0,0 @@ |
||||
package beaconchain |
||||
|
||||
import ( |
||||
"log" |
||||
"os" |
||||
"reflect" |
||||
"strconv" |
||||
"testing" |
||||
|
||||
"github.com/harmony-one/harmony/api/proto/bcconn" |
||||
"github.com/harmony-one/harmony/api/proto/node" |
||||
beaconchain "github.com/harmony-one/harmony/internal/beaconchain/rpc" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
"github.com/stretchr/testify/assert" |
||||
) |
||||
|
||||
var ( |
||||
leader1 = &node.Info{IP: "127.0.0.1", Port: "9981"} |
||||
leader2 = &node.Info{IP: "127.0.0.1", Port: "9982"} |
||||
leaders = []*node.Info{leader1, leader2} |
||||
shardLeaderMap = map[int]*node.Info{ |
||||
0: leader1, |
||||
1: leader2, |
||||
} |
||||
) |
||||
|
||||
func TestNewNode(t *testing.T) { |
||||
var ip, port string |
||||
ip = "127.0.0.1" |
||||
port = "7523" |
||||
numshards := 2 |
||||
priKey, _, _ := utils.GenKeyP2P(ip, port) |
||||
bc := New(numshards, ip, port, priKey) |
||||
|
||||
if bc.PubKey == nil { |
||||
t.Error("beacon chain public key not initialized") |
||||
} |
||||
|
||||
if bc.BCInfo.NumberOfNodesAdded != 0 { |
||||
t.Error("beacon chain number of nodes starting with is not zero! (should be zero)") |
||||
} |
||||
|
||||
if bc.BCInfo.NumberOfShards != numshards { |
||||
t.Error("beacon chain number of shards not initialized to given number of desired shards") |
||||
} |
||||
} |
||||
|
||||
func TestShardLeaderMap(t *testing.T) { |
||||
var ip string |
||||
ip = "127.0.0.1" |
||||
beaconport := "7523" |
||||
numshards := 1 |
||||
priKey, _, _ := utils.GenKeyP2P(ip, beaconport) |
||||
bc := New(numshards, ip, beaconport, priKey) |
||||
bc.BCInfo.Leaders = leaders |
||||
if !reflect.DeepEqual(bc.GetShardLeaderMap(), shardLeaderMap) { |
||||
t.Error("The function GetShardLeaderMap doesn't work well") |
||||
} |
||||
|
||||
} |
||||
|
||||
func TestFetchLeaders(t *testing.T) { |
||||
var ip string |
||||
ip = "127.0.0.1" |
||||
beaconport := "7523" |
||||
numshards := 1 |
||||
priKey, _, _ := utils.GenKeyP2P(ip, beaconport) |
||||
bc := New(numshards, ip, beaconport, priKey) |
||||
bc.BCInfo.Leaders = leaders |
||||
bc.rpcServer = beaconchain.NewServer(bc.GetShardLeaderMap) |
||||
bc.StartRPCServer() |
||||
port, _ := strconv.Atoi(beaconport) |
||||
bcClient := beaconchain.NewClient("127.0.0.1", strconv.Itoa(port+BeaconchainServicePortDiff)) |
||||
response := bcClient.GetLeaders() |
||||
retleaders := response.GetLeaders() |
||||
if !(retleaders[0].GetIp() == leaders[0].IP || retleaders[0].GetPort() == leaders[0].Port || retleaders[1].GetPort() == leaders[1].Port) { |
||||
t.Error("Fetch leaders response is not as expected") |
||||
} |
||||
|
||||
} |
||||
|
||||
func TestAcceptNodeInfo(t *testing.T) { |
||||
var ip string |
||||
ip = "127.0.0.1" |
||||
beaconport := "7523" |
||||
numshards := 1 |
||||
priKey, _, _ := utils.GenKeyP2P(ip, beaconport) |
||||
bc := New(numshards, ip, beaconport, priKey) |
||||
b := bcconn.SerializeNodeInfo(leader1) |
||||
node := bc.AcceptNodeInfo(b) |
||||
if !reflect.DeepEqual(node, leader1) { |
||||
t.Error("Beaconchain is unable to deserialize incoming node info") |
||||
} |
||||
if len(bc.BCInfo.Leaders) != 1 { |
||||
t.Error("Beaconchain was unable to update the leader array") |
||||
} |
||||
|
||||
} |
||||
|
||||
func TestRespondRandomness(t *testing.T) { |
||||
var ip string |
||||
ip = "127.0.0.1" |
||||
beaconport := "7523" |
||||
numshards := 1 |
||||
priKey, _, _ := utils.GenKeyP2P(ip, beaconport) |
||||
bc := New(numshards, ip, beaconport, priKey) |
||||
bc.RespondRandomness(leader1) |
||||
assert.Equal(t, RandomInfoSent, bc.state) |
||||
} |
||||
|
||||
func TestAcceptConnections(t *testing.T) { |
||||
var ip string |
||||
ip = "127.0.0.1" |
||||
beaconport := "7523" |
||||
numshards := 1 |
||||
priKey, _, _ := utils.GenKeyP2P(ip, beaconport) |
||||
bc := New(numshards, ip, beaconport, priKey) |
||||
b := bcconn.SerializeNodeInfo(leader1) |
||||
bc.AcceptConnections(b) |
||||
assert.Equal(t, RandomInfoSent, bc.state) |
||||
} |
||||
|
||||
func TestSaveBC(t *testing.T) { |
||||
var ip, port string |
||||
ip = "127.0.0.1" |
||||
port = "7523" |
||||
numshards := 2 |
||||
bci := &BCInfo{IP: ip, Port: port, NumberOfShards: numshards} |
||||
bc := &BeaconChain{BCInfo: *bci} |
||||
err := SaveBeaconChainInfo("test.json", bc) |
||||
if err != nil { |
||||
log.Fatalln(err) |
||||
} |
||||
bc2, err2 := LoadBeaconChainInfo("test.json") |
||||
if err2 != nil { |
||||
log.Fatalln(err2) |
||||
} |
||||
if !reflect.DeepEqual(bc, bc2) { |
||||
t.Error("beacon chain info objects are not same") |
||||
} |
||||
os.Remove("test.json") |
||||
} |
||||
|
||||
func TestSaveFile(t *testing.T) { |
||||
filepath := "test" |
||||
SetSaveFile(filepath) |
||||
if !reflect.DeepEqual(filepath, SaveFile) { |
||||
t.Error("Could not set savefile") |
||||
} |
||||
} |
@ -1,51 +0,0 @@ |
||||
package beaconchain |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"log" |
||||
"time" |
||||
|
||||
proto "github.com/harmony-one/harmony/api/beaconchain" |
||||
|
||||
"google.golang.org/grpc" |
||||
) |
||||
|
||||
// Client is the client model for beaconchain service.
|
||||
type Client struct { |
||||
beaconChainServiceClient proto.BeaconChainServiceClient |
||||
opts []grpc.DialOption |
||||
conn *grpc.ClientConn |
||||
} |
||||
|
||||
// NewClient setups a Client given ip and port.
|
||||
func NewClient(ip, port string) *Client { |
||||
client := Client{} |
||||
client.opts = append(client.opts, grpc.WithInsecure()) |
||||
var err error |
||||
client.conn, err = grpc.Dial(fmt.Sprintf("%s:%s", ip, port), client.opts...) |
||||
if err != nil { |
||||
log.Fatalf("fail to dial: %v", err) |
||||
return nil |
||||
} |
||||
|
||||
client.beaconChainServiceClient = proto.NewBeaconChainServiceClient(client.conn) |
||||
return &client |
||||
} |
||||
|
||||
// Close closes the Client.
|
||||
func (client *Client) Close() { |
||||
client.conn.Close() |
||||
} |
||||
|
||||
// GetLeaders gets current leaders from beacon chain
|
||||
func (client *Client) GetLeaders() *proto.FetchLeadersResponse { |
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) |
||||
defer cancel() |
||||
request := &proto.FetchLeadersRequest{} |
||||
response, err := client.beaconChainServiceClient.FetchLeaders(ctx, request) |
||||
if err != nil { |
||||
log.Fatalf("Error fetching leaders from beacon chain: %s", err) |
||||
} |
||||
return response |
||||
} |
@ -1,51 +0,0 @@ |
||||
package beaconchain |
||||
|
||||
import ( |
||||
"context" |
||||
"log" |
||||
"net" |
||||
|
||||
"github.com/harmony-one/harmony/api/proto/node" |
||||
|
||||
"google.golang.org/grpc" |
||||
|
||||
proto "github.com/harmony-one/harmony/api/beaconchain" |
||||
) |
||||
|
||||
// Server is the Server struct for beacon chain package.
|
||||
type Server struct { |
||||
shardLeaderMap func() map[int]*node.Info |
||||
} |
||||
|
||||
// FetchLeaders implements the FetchLeaders interface to return current leaders.
|
||||
func (s *Server) FetchLeaders(ctx context.Context, request *proto.FetchLeadersRequest) (*proto.FetchLeadersResponse, error) { |
||||
log.Println("Returning FetchLeadersResponse") |
||||
|
||||
leaders := []*proto.FetchLeadersResponse_Leader{} |
||||
for shardID, leader := range s.shardLeaderMap() { |
||||
leaders = append(leaders, &proto.FetchLeadersResponse_Leader{Ip: leader.IP, Port: leader.Port, ShardId: uint32(shardID), PeerID: leader.PeerID.Pretty()}) |
||||
} |
||||
log.Println(leaders) |
||||
return &proto.FetchLeadersResponse{Leaders: leaders}, nil |
||||
} |
||||
|
||||
// Start starts the Server on given ip and port.
|
||||
func (s *Server) Start(ip, port string) (*grpc.Server, error) { |
||||
// TODO(minhdoan): Currently not using ip. Fix it later.
|
||||
addr := net.JoinHostPort("", port) |
||||
lis, err := net.Listen("tcp", addr) |
||||
if err != nil { |
||||
log.Fatalf("failed to listen: %v", err) |
||||
} |
||||
var opts []grpc.ServerOption |
||||
grpcServer := grpc.NewServer(opts...) |
||||
proto.RegisterBeaconChainServiceServer(grpcServer, s) |
||||
go grpcServer.Serve(lis) |
||||
return grpcServer, nil |
||||
} |
||||
|
||||
// NewServer creates new Server which implements BeaconChainServiceServer interface.
|
||||
func NewServer(shardLeaderMap func() map[int]*node.Info) *Server { |
||||
s := &Server{shardLeaderMap} |
||||
return s |
||||
} |
@ -1 +0,0 @@ |
||||
Newnode package is for handling the interactions of a new candidate node that wants to join the network. Such interaction at the moment is about contacting the beaconchain and getting assigned a shard and findingout the shardleader. In future this package will be merged into the node package. |
@ -1,170 +0,0 @@ |
||||
package newnode |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"os" |
||||
"strconv" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/harmony-one/bls/ffi/go/bls" |
||||
"github.com/harmony-one/harmony/api/proto/bcconn" |
||||
proto_identity "github.com/harmony-one/harmony/api/proto/identity" |
||||
proto_node "github.com/harmony-one/harmony/api/proto/node" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
"github.com/harmony-one/harmony/p2p" |
||||
"github.com/harmony-one/harmony/p2p/host" |
||||
"github.com/harmony-one/harmony/p2p/p2pimpl" |
||||
|
||||
p2p_crypto "github.com/libp2p/go-libp2p-crypto" |
||||
multiaddr "github.com/multiformats/go-multiaddr" |
||||
) |
||||
|
||||
//NewNode is the struct for a candidate node
|
||||
type NewNode struct { |
||||
Role string |
||||
ShardID int |
||||
ValidatorID int // Validator ID in its shard.
|
||||
leader p2p.Peer |
||||
isLeader bool |
||||
Self p2p.Peer |
||||
Leaders map[uint32]p2p.Peer |
||||
PubK *bls.PublicKey |
||||
priK *bls.SecretKey |
||||
log log.Logger |
||||
SetInfo chan bool |
||||
host p2p.Host |
||||
} |
||||
|
||||
// New candidate node initialization
|
||||
func New(ip string, port string, nodePk p2p_crypto.PrivKey) *NewNode { |
||||
priKey, pubKey := utils.GenKey(ip, port) |
||||
var node NewNode |
||||
var err error |
||||
node.PubK = pubKey |
||||
node.priK = priKey |
||||
node.Self = p2p.Peer{IP: ip, Port: port, PubKey: pubKey, ValidatorID: -1} |
||||
node.log = utils.GetLogInstance() |
||||
node.SetInfo = make(chan bool) |
||||
node.host, err = p2pimpl.NewHost(&node.Self, nodePk) |
||||
if err != nil { |
||||
node.log.Error("failed to create new host", "msg", err) |
||||
return nil |
||||
} |
||||
node.Leaders = map[uint32]p2p.Peer{} |
||||
return &node |
||||
} |
||||
|
||||
type registerResponseRandomNumber struct { |
||||
NumberOfShards int |
||||
NumberOfNodesAdded int |
||||
Leaders []*proto_node.Info |
||||
} |
||||
|
||||
// ContactBeaconChain starts a newservice in the candidate node
|
||||
func (node *NewNode) ContactBeaconChain(BCPeer p2p.Peer) error { |
||||
go node.host.BindHandlerAndServe(node.NodeHandler) |
||||
return node.requestBeaconChain(BCPeer) |
||||
} |
||||
|
||||
func (node NewNode) String() string { |
||||
return fmt.Sprintf("bc: %v:%v => %v", node.Self.IP, node.Self.Port, node.Self.PeerID) |
||||
} |
||||
|
||||
// RequestBeaconChain requests beacon chain for identity data
|
||||
func (node *NewNode) requestBeaconChain(BCPeer p2p.Peer) (err error) { |
||||
node.log.Info("connecting to beacon chain now ...") |
||||
pubk := node.PubK.Serialize() |
||||
if err != nil { |
||||
node.log.Error("Could not Marshall public key into binary") |
||||
} |
||||
fmt.Printf("[New Node]: %v\n", *node) |
||||
nodeInfo := &proto_node.Info{IP: node.Self.IP, Port: node.Self.Port, PubKey: pubk, PeerID: node.Self.PeerID} |
||||
msg := bcconn.SerializeNodeInfo(nodeInfo) |
||||
msgToSend := proto_identity.ConstructIdentityMessage(proto_identity.Register, msg) |
||||
gotShardInfo := false |
||||
timeout := time.After(2 * time.Minute) |
||||
tick := time.Tick(3 * time.Second) |
||||
checkLoop: |
||||
for { |
||||
select { |
||||
case <-timeout: |
||||
gotShardInfo = false |
||||
break checkLoop |
||||
case <-tick: |
||||
select { |
||||
case setinfo := <-node.SetInfo: |
||||
if setinfo { |
||||
gotShardInfo = true |
||||
break checkLoop |
||||
} |
||||
default: |
||||
host.SendMessage(node.host, BCPeer, msgToSend, nil) |
||||
} |
||||
} |
||||
} |
||||
if !gotShardInfo { |
||||
err = errors.New("could not create connection") |
||||
node.log.Crit("Could not get sharding info after 2 minutes") |
||||
os.Exit(10) |
||||
} |
||||
return |
||||
} |
||||
|
||||
// ProcessShardInfo
|
||||
func (node *NewNode) processShardInfo(msgPayload []byte) bool { |
||||
leadersInfo := bcconn.DeserializeRandomInfo(msgPayload) |
||||
leaders := leadersInfo.Leaders |
||||
shardNum, isLeader := utils.AllocateShard(leadersInfo.NumberOfNodesAdded, leadersInfo.NumberOfShards) |
||||
for n, v := range leaders { |
||||
leaderPeer := p2p.Peer{IP: v.IP, Port: v.Port, PeerID: v.PeerID} |
||||
|
||||
addr := fmt.Sprintf("/ip4/%s/tcp/%s", leaderPeer.IP, leaderPeer.Port) |
||||
targetAddr, err := multiaddr.NewMultiaddr(addr) |
||||
if err != nil { |
||||
log.Error("processShardInfo NewMultiaddr error", "error", err) |
||||
return false |
||||
} |
||||
leaderPeer.Addrs = append(leaderPeer.Addrs, targetAddr) |
||||
|
||||
leaderPeer.PubKey = &bls.PublicKey{} |
||||
err = leaderPeer.PubKey.Deserialize(v.PubKey[:]) |
||||
if err != nil { |
||||
node.log.Error("Could not unmarshall leaders public key from binary") |
||||
} |
||||
node.Leaders[uint32(n)] = leaderPeer |
||||
} |
||||
|
||||
node.leader = node.Leaders[uint32(shardNum-1)] |
||||
node.isLeader = isLeader |
||||
node.ShardID = shardNum - 1 //0 indexing.
|
||||
node.SetInfo <- true |
||||
node.log.Info("Shard information obtained ..") |
||||
return true |
||||
} |
||||
|
||||
// GetShardID gives shardid of node
|
||||
func (node *NewNode) GetShardID() string { |
||||
return strconv.Itoa(node.ShardID) |
||||
} |
||||
|
||||
// GetLeader gives the leader of the node
|
||||
func (node *NewNode) GetLeader() p2p.Peer { |
||||
return node.leader |
||||
} |
||||
|
||||
// GetClientPeer gives the client of the node
|
||||
func (node *NewNode) GetClientPeer() *p2p.Peer { |
||||
return nil |
||||
} |
||||
|
||||
// GetSelfPeer gives the peer part of the node's own struct
|
||||
func (node *NewNode) GetSelfPeer() p2p.Peer { |
||||
return node.Self |
||||
} |
||||
|
||||
// AddPeer add new peer for newnode
|
||||
func (node *NewNode) AddPeer(p *p2p.Peer) error { |
||||
return node.host.AddPeer(p) |
||||
} |
@ -1,63 +0,0 @@ |
||||
package newnode |
||||
|
||||
import ( |
||||
"time" |
||||
|
||||
"github.com/harmony-one/harmony/api/proto" |
||||
proto_identity "github.com/harmony-one/harmony/api/proto/identity" |
||||
"github.com/harmony-one/harmony/p2p" |
||||
) |
||||
|
||||
// NodeHandler handles a new incoming connection.
|
||||
func (node *NewNode) NodeHandler(s p2p.Stream) { |
||||
defer s.Close() |
||||
defer node.host.Close() |
||||
s.SetReadDeadline(time.Now().Add(1 * time.Second)) // This deadline is for 1 second to accept new connections.
|
||||
content, err := p2p.ReadMessageContent(s) |
||||
if err != nil { |
||||
node.log.Error("Read p2p data failed", "err", err, "node", node) |
||||
return |
||||
} |
||||
|
||||
msgCategory, err := proto.GetMessageCategory(content) |
||||
if err != nil { |
||||
node.log.Error("Read node type failed", "err", err, "node", node) |
||||
return |
||||
} |
||||
|
||||
msgType, err := proto.GetMessageType(content) |
||||
if err != nil { |
||||
node.log.Error("Read action type failed", "err", err, "node", node) |
||||
return |
||||
} |
||||
|
||||
msgPayload, err := proto.GetMessagePayload(content) |
||||
if err != nil { |
||||
node.log.Error("Read message payload failed", "err", err, "node", node) |
||||
return |
||||
} |
||||
identityMsgPayload, err := proto_identity.GetIdentityMessagePayload(msgPayload) |
||||
if err != nil { |
||||
node.log.Error("Read message payload failed") |
||||
return |
||||
} |
||||
switch msgCategory { |
||||
case proto.Identity: |
||||
actionType := proto_identity.IDMessageType(msgType) |
||||
switch actionType { |
||||
case proto_identity.Identity: |
||||
idMsgType, err := proto_identity.GetIdentityMessageType(msgPayload) |
||||
if err != nil { |
||||
node.log.Error("Error finding the identity message type", err) |
||||
} |
||||
switch idMsgType { |
||||
case proto_identity.Acknowledge: |
||||
node.processShardInfo(identityMsgPayload) |
||||
default: |
||||
panic("The identity message type is wrong/missing and newnode does not handle this identity message type") |
||||
} |
||||
default: |
||||
panic("The msgCategory is wrong/missing and newnode does not handle this protocol message type") |
||||
} |
||||
} |
||||
} |
@ -1,65 +0,0 @@ |
||||
package newnode |
||||
|
||||
import ( |
||||
"fmt" |
||||
"testing" |
||||
"time" |
||||
|
||||
beaconchain "github.com/harmony-one/harmony/internal/beaconchain/libs" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
"github.com/harmony-one/harmony/p2p" |
||||
peerstore "github.com/libp2p/go-libp2p-peerstore" |
||||
multiaddr "github.com/multiformats/go-multiaddr" |
||||
) |
||||
|
||||
func TestNewNode(t *testing.T) { |
||||
var ip, port string |
||||
ip = "127.0.0.1" |
||||
port = "8088" |
||||
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "8088") |
||||
nnode := New(ip, port, priKey) |
||||
|
||||
if nnode.PubK == nil { |
||||
t.Error("new node public key not initialized") |
||||
} |
||||
} |
||||
|
||||
func TestBeaconChainConnect(t *testing.T) { |
||||
var ip, beaconport, bcma, nodeport string |
||||
|
||||
ip = "127.0.0.1" |
||||
beaconport = "8081" |
||||
nodeport = "9081" |
||||
|
||||
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9081") |
||||
nnode := New(ip, nodeport, priKey) |
||||
|
||||
priKey, _, _ = utils.GenKeyP2P("127.0.0.1", "8081") |
||||
bc := beaconchain.New(1, ip, beaconport, priKey) |
||||
|
||||
bcma = fmt.Sprintf("/ip4/%s/tcp/%s/ipfs/%s", bc.Self.IP, bc.Self.Port, bc.GetID().Pretty()) |
||||
|
||||
go bc.StartServer() |
||||
time.Sleep(3 * time.Second) |
||||
|
||||
maddr, err := multiaddr.NewMultiaddr(bcma) |
||||
if err != nil { |
||||
t.Errorf("new multiaddr error: %v", err) |
||||
} |
||||
|
||||
// Extract the peer ID from the multiaddr.
|
||||
info, err2 := peerstore.InfoFromP2pAddr(maddr) |
||||
if err2 != nil { |
||||
t.Errorf("info from p2p addr error: %v", err2) |
||||
} |
||||
|
||||
BCPeer := &p2p.Peer{IP: ip, Port: beaconport, Addrs: info.Addrs, PeerID: info.ID} |
||||
|
||||
nnode.AddPeer(BCPeer) |
||||
|
||||
err3 := nnode.ContactBeaconChain(*BCPeer) |
||||
|
||||
if err3 != nil { |
||||
t.Errorf("could not read from connection: %v", err3) |
||||
} |
||||
} |
@ -1,59 +0,0 @@ |
||||
package host |
||||
|
||||
import ( |
||||
"fmt" |
||||
"reflect" |
||||
"testing" |
||||
"time" |
||||
|
||||
libp2p_peer "github.com/libp2p/go-libp2p-peer" |
||||
ma "github.com/multiformats/go-multiaddr" |
||||
|
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
"github.com/harmony-one/harmony/p2p" |
||||
"github.com/harmony-one/harmony/p2p/p2pimpl" |
||||
) |
||||
|
||||
func TestSendMessage(test *testing.T) { |
||||
peer1 := p2p.Peer{IP: "127.0.0.1", Port: "9000"} |
||||
selfAddr1, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", peer1.Port)) |
||||
peer1.Addrs = append(peer1.Addrs, selfAddr1) |
||||
priKey1, pubKey1, _ := utils.GenKeyP2P(peer1.IP, peer1.Port) |
||||
peerID1, _ := libp2p_peer.IDFromPublicKey(pubKey1) |
||||
peer1.PeerID = peerID1 |
||||
host1, _ := p2pimpl.NewHost(&peer1, priKey1) |
||||
|
||||
peer2 := p2p.Peer{IP: "127.0.0.1", Port: "9001"} |
||||
selfAddr2, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", peer2.Port)) |
||||
peer2.Addrs = append(peer2.Addrs, selfAddr2) |
||||
priKey2, pubKey2, _ := utils.GenKeyP2P(peer2.IP, peer2.Port) |
||||
peerID2, _ := libp2p_peer.IDFromPublicKey(pubKey2) |
||||
peer2.PeerID = peerID2 |
||||
host2, _ := p2pimpl.NewHost(&peer2, priKey2) |
||||
|
||||
msg := []byte{0x00, 0x01, 0x02, 0x03, 0x04} |
||||
if err := host1.AddPeer(&peer2); err != nil { |
||||
test.Fatalf("cannot add peer2 to host1: %v", err) |
||||
} |
||||
|
||||
go host2.BindHandlerAndServe(handler) |
||||
SendMessage(host1, peer2, msg, nil) |
||||
time.Sleep(3 * time.Second) |
||||
} |
||||
|
||||
func handler(s p2p.Stream) { |
||||
defer func() { |
||||
if err := s.Close(); err != nil { |
||||
panic(fmt.Sprintf("Close(%v) failed: %v", s, err)) |
||||
} |
||||
}() |
||||
content, err := p2p.ReadMessageContent(s) |
||||
if err != nil { |
||||
panic("Read p2p data failed") |
||||
} |
||||
golden := []byte{0x00, 0x01, 0x02, 0x03, 0x04} |
||||
|
||||
if !reflect.DeepEqual(content, golden) { |
||||
panic("received message not equal original message") |
||||
} |
||||
} |
@ -0,0 +1,121 @@ |
||||
#!/bin/bash |
||||
|
||||
ROOT=$(dirname $0)/.. |
||||
USER=$(whoami) |
||||
|
||||
. "${ROOT}/scripts/setup_bls_build_flags.sh" |
||||
|
||||
set -x |
||||
set -eo pipefail |
||||
|
||||
function check_result() { |
||||
find $log_folder -name leader-*.log > $log_folder/all-leaders.txt |
||||
find $log_folder -name validator-*.log > $log_folder/all-validators.txt |
||||
|
||||
echo ====== RESULTS ====== |
||||
results=$($ROOT/test/cal_tps.sh $log_folder/all-leaders.txt $log_folder/all-validators.txt) |
||||
echo $results | tee -a $LOG_FILE |
||||
echo $results > $log_folder/tps.log |
||||
} |
||||
|
||||
function cleanup() { |
||||
for pid in `/bin/ps -fu $USER| grep "harmony\|txgen\|soldier\|commander\|profiler\|beacon\|bootnode" | grep -v "grep" | grep -v "vi" | awk '{print $2}'`; |
||||
do |
||||
echo 'Killed process: '$pid |
||||
$DRYRUN kill -9 $pid 2> /dev/null |
||||
done |
||||
# Remove bc_config.json before starting experiment. |
||||
rm -f bc_config.json |
||||
rm -rf ./db/harmony_* |
||||
} |
||||
|
||||
function killnode() { |
||||
local port=$1 |
||||
|
||||
if [ -n "port" ]; then |
||||
pid=$(/bin/ps -fu $USER | grep "harmony" | grep "$port" | awk '{print $2}') |
||||
echo "killing node with port: $port" |
||||
$DRYRUN kill -9 $pid 2> /dev/null |
||||
echo "node with port: $port is killed" |
||||
fi |
||||
} |
||||
|
||||
trap cleanup SIGINT SIGTERM |
||||
|
||||
function usage { |
||||
local ME=$(basename $0) |
||||
|
||||
cat<<EOU |
||||
USAGE: $ME [OPTIONS] config_file_name |
||||
|
||||
-h print this help message |
||||
-d enable db support (default: $DB) |
||||
-t toggle txgen (default: $TXGEN) |
||||
-D duration txgen run duration (default: $DURATION) |
||||
-m min_peers minimal number of peers to start consensus (default: $MIN) |
||||
-s shards number of shards (default: $SHARDS) |
||||
-k nodeport kill the node with specified port number (default: $KILLPORT) |
||||
-n dryrun mode (default: $DRYRUN) |
||||
-S enable sync test (default: $SYNC) |
||||
-P enable libp2p peer discovery test (default: $P2P) |
||||
|
||||
This script will build all the binaries and start harmony and txgen based on the configuration file. |
||||
|
||||
EXAMPLES: |
||||
|
||||
$ME local_config.txt |
||||
$ME -p local_config.txt |
||||
|
||||
EOU |
||||
exit 0 |
||||
} |
||||
|
||||
DB= |
||||
TXGEN=true |
||||
DURATION=90 |
||||
MIN=5 |
||||
SHARDS=2 |
||||
KILLPORT=9004 |
||||
SYNC=true |
||||
DRYRUN= |
||||
P2P=false |
||||
|
||||
while getopts "hdtD:m:s:k:nSP" option; do |
||||
case $option in |
||||
h) usage ;; |
||||
d) DB='-db_supported' ;; |
||||
t) TXGEN=false ;; |
||||
D) DURATION=$OPTARG ;; |
||||
m) MIN=$OPTARG ;; |
||||
s) SHARDS=$OPTARG ;; |
||||
k) KILLPORT=$OPTARG ;; |
||||
n) DRYRUN=echo ;; |
||||
S) SYNC=true ;; |
||||
P) P2P=true ;; |
||||
esac |
||||
done |
||||
|
||||
shift $((OPTIND-1)) |
||||
|
||||
# Since `go run` will generate a temporary exe every time, |
||||
# On windows, your system will pop up a network security dialog for each instance |
||||
# and you won't be able to turn it off. With `go build` generating one |
||||
# exe, the dialog will only pop up once at the very first time. |
||||
# Also it's recommended to use `go build` for testing the whole exe. |
||||
pushd $ROOT |
||||
echo "compiling ..." |
||||
go build -o bin/harmony cmd/harmony.go |
||||
popd |
||||
|
||||
# Create a tmp folder for logs |
||||
t=`date +"%Y%m%d-%H%M%S"` |
||||
log_folder="tmp_log/log-$t" |
||||
|
||||
mkdir -p $log_folder |
||||
LOG_FILE=$log_folder/r.log |
||||
|
||||
HMY_OPT= |
||||
HMY_OPT2= |
||||
HMY_OPT3= |
||||
|
||||
($DRYRUN $ROOT/bin/harmony -ip 127.0.0.1 -port 9100 -log_folder $log_folder -is_newnode $DB -min_peers $MIN $HMY_OPT $HMY_OPT2 $HMY_OPT3 -key /tmp/127.0.0.1-9100.key 2>&1 | tee -a $LOG_FILE ) & |
Loading…
Reference in new issue