Merge remote-tracking branch 'harmony/main' into traceDB

pull/3799/head
peekpi 3 years ago
commit 897b6cd119
  1. 5
      .travis.yml
  2. 8
      Makefile
  3. 1
      accounts/keystore/watch.go
  4. 1
      accounts/keystore/watch_fallback.go
  5. 3
      api/proto/message/gen.sh
  6. 2
      api/proto/message/message.go
  7. 84
      api/proto/message/message.pb.go
  8. 101
      api/proto/message/message_grpc.pb.go
  9. 1
      api/proto/message/server.go
  10. 36
      api/service/explorer/service.go
  11. 15
      api/service/legacysync/downloader/client.go
  12. 5
      api/service/legacysync/downloader/gen.sh
  13. 3
      api/service/legacysync/downloader/proto/downloader.go
  14. 160
      api/service/legacysync/downloader/proto/downloader.pb.go
  15. 2
      api/service/legacysync/downloader/proto/downloader.proto
  16. 101
      api/service/legacysync/downloader/proto/downloader_grpc.pb.go
  17. 1
      api/service/legacysync/downloader/server.go
  18. 3
      api/service/prometheus/service.go
  19. 2
      block/factory/factory.go
  20. 2
      block/v3/header.go
  21. 3
      cmd/harmony/config.go
  22. 7
      cmd/harmony/config_migrations_test.go
  23. 7
      cmd/harmony/config_test.go
  24. 33
      cmd/harmony/default.go
  25. 418
      cmd/harmony/dumpdb.go
  26. 56
      cmd/harmony/flags.go
  27. 53
      cmd/harmony/flags_test.go
  28. 24
      cmd/harmony/main.go
  29. 1
      common/fdlimit/fdlimit_unix.go
  30. 1
      consensus/consensus_v2.go
  31. 12
      consensus/construct_test.go
  32. 12
      consensus/double_sign.go
  33. 6
      consensus/quorum/one-node-staked-vote_test.go
  34. 6
      consensus/quorum/quorom_test.go
  35. 4
      consensus/votepower/roster_test.go
  36. 115
      core/blockchain.go
  37. 15
      core/evm_test.go
  38. 1
      core/gen_genesis.go
  39. 5
      core/headerchain.go
  40. 72
      core/rawdb/accessors_offchain.go
  41. 210
      core/state/dump.go
  42. 40
      core/state/state_test.go
  43. 4
      core/state/statedb_test.go
  44. 2
      core/state_processor.go
  45. 2
      core/types/block_test.go
  46. 15
      core/types/eth_transaction.go
  47. 49
      core/types/transaction.go
  48. 55
      core/types/transaction_test.go
  49. 1
      core/vm/int_pool_verifier.go
  50. 1
      core/vm/int_pool_verifier_empty.go
  51. 1
      core/vm/runtime/fuzz.go
  52. 1
      eth/rpc/constants_unix.go
  53. 1
      eth/rpc/constants_unix_nocgo.go
  54. 1
      eth/rpc/ipc_js.go
  55. 1
      eth/rpc/ipc_unix.go
  56. 1
      eth/rpc/ipc_windows.go
  57. 9
      go.mod
  58. 5
      go.sum
  59. 1
      hmy/downloader/downloader.go
  60. 8
      hmy/downloader/longrange_test.go
  61. 5
      hmy/downloader/shortrange.go
  62. 2
      hmy/gasprice.go
  63. 6
      hmy/hmy.go
  64. 2
      hmy/staking.go
  65. 2
      hmy/transaction.go
  66. 4
      internal/chain/engine.go
  67. 2
      internal/chain/reward.go
  68. 9
      internal/configs/harmony/harmony.go
  69. 35
      internal/shardchain/dbfactory.go
  70. 37
      internal/shardchain/leveldb_shard/common.go
  71. 200
      internal/shardchain/leveldb_shard/shard.go
  72. 119
      internal/shardchain/leveldb_shard/shard_batch.go
  73. 22
      internal/shardchain/local_cache/hack.go
  74. 83
      internal/shardchain/local_cache/local_cache_batch.go
  75. 121
      internal/shardchain/local_cache/local_cache_database.go
  76. 15
      node/node.go
  77. 2
      node/node_genesis.go
  78. 7
      node/node_handler_test.go
  79. 42
      node/node_syncing.go
  80. 6
      node/worker/worker.go
  81. 1
      p2p/discovery/discovery.go
  82. 4
      p2p/host.go
  83. 38
      p2p/metrics.go
  84. 21
      p2p/stream/common/requestmanager/requestmanager_test.go
  85. 40
      p2p/stream/common/streammanager/cooldown.go
  86. 76
      p2p/stream/common/streammanager/streammanager.go
  87. 6
      p2p/stream/types/stream.go
  88. 4
      rosetta/common/config.go
  89. 10
      rosetta/infra/Dockerfile
  90. 29
      rosetta/infra/docker-compose-testnet.yaml
  91. 29
      rosetta/infra/docker-compose.yaml
  92. 7
      rosetta/infra/harmony-mainnet.conf
  93. 7
      rosetta/infra/harmony-pstn.conf
  94. 8
      rosetta/infra/rclone.conf
  95. 36
      rosetta/infra/run.sh
  96. 4
      rosetta/services/block.go
  97. 2
      rosetta/services/construction.go
  98. 8
      rosetta/services/construction_create.go
  99. 8
      rosetta/services/construction_create_test.go
  100. 8
      rosetta/services/tx_construction.go
  101. Some files were not shown because too many files have changed in this diff Show More

@ -20,7 +20,12 @@ install:
- git clone https://github.com/harmony-one/bls.git $HOME/gopath/src/github.com/harmony-one/bls
- (cd $HOME/gopath/src/github.com/harmony-one/mcl; make -j4)
- (cd $HOME/gopath/src/github.com/harmony-one/bls; make BLS_SWAP_G=1 -j4)
- go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.26
- go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.1
- curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.41.1
- make go-get
- go install golang.org/x/tools/cmd/goimports@latest
- go install github.com/harmony-ek/gencodec@latest
- scripts/install_build_tools.sh
script:
- ${TEST}

@ -1,7 +1,7 @@
TOP:=$(realpath ..)
export CGO_CFLAGS:=-I$(TOP)/bls/include -I$(TOP)/mcl/include -I/usr/local/opt/openssl/include
export CGO_LDFLAGS:=-L$(TOP)/bls/lib -L/usr/local/opt/openssl/lib
export LD_LIBRARY_PATH:=$(TOP)/bls/lib:$(TOP)/mcl/lib:/usr/local/opt/openssl/lib
export LD_LIBRARY_PATH:=$(TOP)/bls/lib:$(TOP)/mcl/lib:/usr/local/opt/openssl/lib:/opt/homebrew/opt/gmp/lib/:/opt/homebrew/opt/openssl/lib
export LIBRARY_PATH:=$(LD_LIBRARY_PATH)
export DYLD_FALLBACK_LIBRARY_PATH:=$(LD_LIBRARY_PATH)
export GO111MODULE:=on
@ -151,3 +151,9 @@ rpmpub_dev: rpm
rpmpub_prod: rpm
./scripts/package/publish-repo.sh -p prod -n rpm -s $(RPMBUILD)
go-vet:
go vet ./...
go-test:
go test ./...

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build (darwin && !ios) || freebsd || (linux && !arm64) || netbsd || solaris
// +build darwin,!ios freebsd linux,!arm64 netbsd solaris
package keystore

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build ios || (linux && arm64) || windows || (!darwin && !freebsd && !linux && !netbsd && !solaris)
// +build ios linux,arm64 windows !darwin,!freebsd,!linux,!netbsd,!solaris
// This is the fallback implementation of directory watching.

@ -1,2 +1 @@
protoc -I ./ message.proto --go_out=plugins=grpc:./
# protoc -I ./ message.proto --go_out=./
protoc -I ./ message.proto --go_out=. --go-grpc_out=.

@ -1,3 +1,3 @@
package message
//go:generate protoc message.proto --go_out=plugins=grpc:.
//go:generate protoc message.proto --go_out=. --go-grpc_out=.

@ -7,10 +7,6 @@
package message
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
@ -1296,83 +1292,3 @@ func file_message_proto_init() {
file_message_proto_goTypes = nil
file_message_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// ClientServiceClient is the client API for ClientService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ClientServiceClient interface {
Process(ctx context.Context, in *Message, opts ...grpc.CallOption) (*Response, error)
}
type clientServiceClient struct {
cc grpc.ClientConnInterface
}
func NewClientServiceClient(cc grpc.ClientConnInterface) ClientServiceClient {
return &clientServiceClient{cc}
}
func (c *clientServiceClient) Process(ctx context.Context, in *Message, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := c.cc.Invoke(ctx, "/message.ClientService/Process", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ClientServiceServer is the server API for ClientService service.
type ClientServiceServer interface {
Process(context.Context, *Message) (*Response, error)
}
// UnimplementedClientServiceServer can be embedded to have forward compatible implementations.
type UnimplementedClientServiceServer struct {
}
func (*UnimplementedClientServiceServer) Process(context.Context, *Message) (*Response, error) {
return nil, status.Errorf(codes.Unimplemented, "method Process not implemented")
}
func RegisterClientServiceServer(s *grpc.Server, srv ClientServiceServer) {
s.RegisterService(&_ClientService_serviceDesc, srv)
}
func _ClientService_Process_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Message)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ClientServiceServer).Process(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/message.ClientService/Process",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ClientServiceServer).Process(ctx, req.(*Message))
}
return interceptor(ctx, in, info, handler)
}
var _ClientService_serviceDesc = grpc.ServiceDesc{
ServiceName: "message.ClientService",
HandlerType: (*ClientServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Process",
Handler: _ClientService_Process_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "message.proto",
}

@ -0,0 +1,101 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
package message
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// ClientServiceClient is the client API for ClientService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type ClientServiceClient interface {
Process(ctx context.Context, in *Message, opts ...grpc.CallOption) (*Response, error)
}
type clientServiceClient struct {
cc grpc.ClientConnInterface
}
func NewClientServiceClient(cc grpc.ClientConnInterface) ClientServiceClient {
return &clientServiceClient{cc}
}
func (c *clientServiceClient) Process(ctx context.Context, in *Message, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := c.cc.Invoke(ctx, "/message.ClientService/Process", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ClientServiceServer is the server API for ClientService service.
// All implementations must embed UnimplementedClientServiceServer
// for forward compatibility
type ClientServiceServer interface {
Process(context.Context, *Message) (*Response, error)
mustEmbedUnimplementedClientServiceServer()
}
// UnimplementedClientServiceServer must be embedded to have forward compatible implementations.
type UnimplementedClientServiceServer struct {
}
func (UnimplementedClientServiceServer) Process(context.Context, *Message) (*Response, error) {
return nil, status.Errorf(codes.Unimplemented, "method Process not implemented")
}
func (UnimplementedClientServiceServer) mustEmbedUnimplementedClientServiceServer() {}
// UnsafeClientServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ClientServiceServer will
// result in compilation errors.
type UnsafeClientServiceServer interface {
mustEmbedUnimplementedClientServiceServer()
}
func RegisterClientServiceServer(s grpc.ServiceRegistrar, srv ClientServiceServer) {
s.RegisterService(&ClientService_ServiceDesc, srv)
}
func _ClientService_Process_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Message)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ClientServiceServer).Process(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/message.ClientService/Process",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ClientServiceServer).Process(ctx, req.(*Message))
}
return interceptor(ctx, in, info, handler)
}
// ClientService_ServiceDesc is the grpc.ServiceDesc for ClientService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var ClientService_ServiceDesc = grpc.ServiceDesc{
ServiceName: "message.ClientService",
HandlerType: (*ClientServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Process",
Handler: _ClientService_Process_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "message.proto",
}

@ -18,6 +18,7 @@ const (
// Server is the Server struct for client service package.
type Server struct {
UnimplementedClientServiceServer
server *grpc.Server
CreateTransactionForEnterMethod func(int64, string) error
GetResult func(string) ([]string, []*big.Int)

@ -11,12 +11,11 @@ import (
"strconv"
"time"
"github.com/harmony-one/harmony/core/types"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/gorilla/mux"
msg_pb "github.com/harmony-one/harmony/api/proto/message"
"github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/hmy"
"github.com/harmony-one/harmony/hmy/tracers"
"github.com/harmony-one/harmony/internal/chain"
@ -115,6 +114,7 @@ func (s *Service) Run() *http.Server {
// parameter prefix: from which address prefix start
s.router.Path("/addresses").Queries("size", "{[0-9]*?}", "prefix", "{[a-zA-Z0-9]*?}").HandlerFunc(s.GetAddresses).Methods("GET")
s.router.Path("/addresses").HandlerFunc(s.GetAddresses)
s.router.Path("/height").HandlerFunc(s.GetHeight)
// Set up router for supply info
s.router.Path("/burn-addresses").Queries().HandlerFunc(s.GetInaccessibleAddressInfo).Methods("GET")
@ -175,6 +175,38 @@ func (s *Service) GetAddresses(w http.ResponseWriter, r *http.Request) {
}
}
type HeightResponse struct {
S0 uint64 `json:"0,omitempty"`
S1 uint64 `json:"1,omitempty"`
S2 uint64 `json:"2,omitempty"`
S3 uint64 `json:"3,omitempty"`
}
// GetHeight returns heights of current and beacon chains if needed.
func (s *Service) GetHeight(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
bc := s.backend.Blockchain()
out := HeightResponse{}
switch bc.ShardID() {
case 0:
out.S0 = s.backend.Blockchain().CurrentBlock().NumberU64()
case 1:
out.S0 = s.backend.Beaconchain().CurrentBlock().NumberU64()
out.S1 = s.backend.Blockchain().CurrentBlock().NumberU64()
case 2:
out.S0 = s.backend.Beaconchain().CurrentBlock().NumberU64()
out.S2 = s.backend.Blockchain().CurrentBlock().NumberU64()
case 3:
out.S0 = s.backend.Beaconchain().CurrentBlock().NumberU64()
out.S3 = s.backend.Blockchain().CurrentBlock().NumberU64()
}
if err := json.NewEncoder(w).Encode(out); err != nil {
utils.Logger().Warn().Err(err).Msg("cannot JSON-encode addresses")
}
}
// GetNormalTxHashesByAccount get the normal transaction hashes by account
func (s *Service) GetNormalTxHashesByAccount(address string) ([]ethCommon.Hash, []TxType, error) {
return s.storage.GetNormalTxsByAddress(address)

@ -57,6 +57,21 @@ func (client *Client) GetBlockHashes(startHash []byte, size uint32, ip, port str
return response
}
// GetBlocksByHeights gets blocks from peers by calling grpc request.
func (client *Client) GetBlocksByHeights(heights []uint64) *pb.DownloaderResponse {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
request := &pb.DownloaderRequest{
Type: pb.DownloaderRequest_BLOCKBYHEIGHT,
Heights: heights,
}
response, err := client.dlClient.Query(ctx, request)
if err != nil {
utils.Logger().Error().Err(err).Str("target", client.conn.Target()).Msg("[SYNC] GetBlockHashes query failed")
}
return response
}
// GetBlockHeaders gets block headers in serialization byte array by calling a grpc request.
func (client *Client) GetBlockHeaders(hashes [][]byte) *pb.DownloaderResponse {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)

@ -1,2 +1,5 @@
# used versions
#go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.26
#go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.1
SRC_DIR=$(dirname $0)
protoc -I ${SRC_DIR}/proto/ ${SRC_DIR}/proto/downloader.proto --go_out=plugins=grpc:${SRC_DIR}/proto
protoc -I ${SRC_DIR}/proto/ ${SRC_DIR}/proto/downloader.proto --go_out=${SRC_DIR}/proto --go-grpc_out=${SRC_DIR}/proto

@ -1,3 +1,4 @@
package downloader
//go:generate protoc downloader.proto --go_out=plugins=grpc:.
///go:generate protoc downloader.proto --go_out=plugins=grpc:.
//go:generate protoc downloader.proto --go_out=. --go-grpc_out=.

@ -7,10 +7,6 @@
package downloader
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
@ -35,6 +31,7 @@ const (
DownloaderRequest_REGISTERTIMEOUT DownloaderRequest_RequestType = 5
DownloaderRequest_UNKNOWN DownloaderRequest_RequestType = 6
DownloaderRequest_BLOCKHEADER DownloaderRequest_RequestType = 7
DownloaderRequest_BLOCKBYHEIGHT DownloaderRequest_RequestType = 8
)
// Enum value maps for DownloaderRequest_RequestType.
@ -48,6 +45,7 @@ var (
5: "REGISTERTIMEOUT",
6: "UNKNOWN",
7: "BLOCKHEADER",
8: "BLOCKBYHEIGHT",
}
DownloaderRequest_RequestType_value = map[string]int32{
"BLOCKHASH": 0,
@ -58,6 +56,7 @@ var (
"REGISTERTIMEOUT": 5,
"UNKNOWN": 6,
"BLOCKHEADER": 7,
"BLOCKBYHEIGHT": 8,
}
)
@ -154,6 +153,7 @@ type DownloaderRequest struct {
Size uint32 `protobuf:"varint,7,opt,name=size,proto3" json:"size,omitempty"`
RegisterWithSig bool `protobuf:"varint,8,opt,name=registerWithSig,proto3" json:"registerWithSig,omitempty"` // Expect to have NEWBLOCK response of block along with current signature
GetBlocksWithSig bool `protobuf:"varint,9,opt,name=getBlocksWithSig,proto3" json:"getBlocksWithSig,omitempty"` // Have block along with signature for BLOCK request.
Heights []uint64 `protobuf:"varint,10,rep,packed,name=heights,proto3" json:"heights,omitempty"`
}
func (x *DownloaderRequest) Reset() {
@ -251,6 +251,13 @@ func (x *DownloaderRequest) GetGetBlocksWithSig() bool {
return false
}
func (x *DownloaderRequest) GetHeights() []uint64 {
if x != nil {
return x.Heights
}
return nil
}
// DownloaderResponse is the generic response of DownloaderRequest.
type DownloaderResponse struct {
state protoimpl.MessageState
@ -321,7 +328,7 @@ var File_downloader_proto protoreflect.FileDescriptor
var file_downloader_proto_rawDesc = []byte{
0x0a, 0x10, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x12, 0x0a, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x22, 0xbc,
0x74, 0x6f, 0x12, 0x0a, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x22, 0xe9,
0x03, 0x0a, 0x11, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0e, 0x32, 0x29, 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e,
@ -341,36 +348,39 @@ var file_downloader_proto_rawDesc = []byte{
0x57, 0x69, 0x74, 0x68, 0x53, 0x69, 0x67, 0x12, 0x2a, 0x0a, 0x10, 0x67, 0x65, 0x74, 0x42, 0x6c,
0x6f, 0x63, 0x6b, 0x73, 0x57, 0x69, 0x74, 0x68, 0x53, 0x69, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28,
0x08, 0x52, 0x10, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x57, 0x69, 0x74, 0x68,
0x53, 0x69, 0x67, 0x22, 0x87, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54,
0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x48, 0x41, 0x53, 0x48,
0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x0c, 0x0a,
0x08, 0x4e, 0x45, 0x57, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x42,
0x4c, 0x4f, 0x43, 0x4b, 0x48, 0x45, 0x49, 0x47, 0x48, 0x54, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08,
0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x52, 0x45,
0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x54, 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x05, 0x12,
0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x06, 0x12, 0x0f, 0x0a, 0x0b,
0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x07, 0x22, 0xd4, 0x01,
0x0a, 0x12, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18,
0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x47,
0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x64,
0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f,
0x61, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x67,
0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, 0x70,
0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c,
0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x39, 0x0a, 0x14, 0x52, 0x65, 0x67,
0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, 0x70,
0x65, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x08,
0x0a, 0x04, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x4e, 0x53, 0x59,
0x4e, 0x43, 0x10, 0x02, 0x32, 0x56, 0x0a, 0x0a, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64,
0x65, 0x72, 0x12, 0x48, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1d, 0x2e, 0x64, 0x6f,
0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61,
0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x64, 0x6f, 0x77,
0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64,
0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x0f, 0x5a, 0x0d,
0x2e, 0x2f, 0x3b, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x62, 0x06, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x33,
0x53, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x0a,
0x20, 0x03, 0x28, 0x04, 0x52, 0x07, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x22, 0x9a, 0x01,
0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a,
0x09, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x48, 0x41, 0x53, 0x48, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05,
0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x4e, 0x45, 0x57, 0x42, 0x4c,
0x4f, 0x43, 0x4b, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x48, 0x45,
0x49, 0x47, 0x48, 0x54, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54,
0x45, 0x52, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52,
0x54, 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b,
0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x06, 0x12, 0x0f, 0x0a, 0x0b, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x48,
0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x07, 0x12, 0x11, 0x0a, 0x0d, 0x42, 0x4c, 0x4f, 0x43, 0x4b,
0x42, 0x59, 0x48, 0x45, 0x49, 0x47, 0x48, 0x54, 0x10, 0x08, 0x22, 0xd4, 0x01, 0x0a, 0x12, 0x44,
0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x03,
0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x47, 0x0a, 0x04, 0x74,
0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x64, 0x6f, 0x77, 0x6e,
0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65,
0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04,
0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69,
0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x39, 0x0a, 0x14, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b,
0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x46,
0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x4e, 0x53, 0x59, 0x4e, 0x43, 0x10,
0x02, 0x32, 0x56, 0x0a, 0x0a, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x12,
0x48, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1d, 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c,
0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f,
0x61, 0x64, 0x65, 0x72, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x0f, 0x5a, 0x0d, 0x2e, 0x2f, 0x3b,
0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
}
var (
@ -456,83 +466,3 @@ func file_downloader_proto_init() {
file_downloader_proto_goTypes = nil
file_downloader_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// DownloaderClient is the client API for Downloader service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type DownloaderClient interface {
Query(ctx context.Context, in *DownloaderRequest, opts ...grpc.CallOption) (*DownloaderResponse, error)
}
type downloaderClient struct {
cc grpc.ClientConnInterface
}
func NewDownloaderClient(cc grpc.ClientConnInterface) DownloaderClient {
return &downloaderClient{cc}
}
func (c *downloaderClient) Query(ctx context.Context, in *DownloaderRequest, opts ...grpc.CallOption) (*DownloaderResponse, error) {
out := new(DownloaderResponse)
err := c.cc.Invoke(ctx, "/downloader.Downloader/Query", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// DownloaderServer is the server API for Downloader service.
type DownloaderServer interface {
Query(context.Context, *DownloaderRequest) (*DownloaderResponse, error)
}
// UnimplementedDownloaderServer can be embedded to have forward compatible implementations.
type UnimplementedDownloaderServer struct {
}
func (*UnimplementedDownloaderServer) Query(context.Context, *DownloaderRequest) (*DownloaderResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Query not implemented")
}
func RegisterDownloaderServer(s *grpc.Server, srv DownloaderServer) {
s.RegisterService(&_Downloader_serviceDesc, srv)
}
func _Downloader_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DownloaderRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DownloaderServer).Query(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/downloader.Downloader/Query",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DownloaderServer).Query(ctx, req.(*DownloaderRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Downloader_serviceDesc = grpc.ServiceDesc{
ServiceName: "downloader.Downloader",
HandlerType: (*DownloaderServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Query",
Handler: _Downloader_Query_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "downloader.proto",
}

@ -19,6 +19,7 @@ message DownloaderRequest {
REGISTERTIMEOUT = 5;
UNKNOWN = 6;
BLOCKHEADER = 7;
BLOCKBYHEIGHT = 8;
}
// Request type.
@ -33,6 +34,7 @@ message DownloaderRequest {
uint32 size = 7;
bool registerWithSig = 8; // Expect to have NEWBLOCK response of block along with current signature
bool getBlocksWithSig = 9; // Have block along with signature for BLOCK request.
repeated uint64 heights = 10;
}
// DownloaderResponse is the generic response of DownloaderRequest.

@ -0,0 +1,101 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
package downloader
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// DownloaderClient is the client API for Downloader service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type DownloaderClient interface {
Query(ctx context.Context, in *DownloaderRequest, opts ...grpc.CallOption) (*DownloaderResponse, error)
}
type downloaderClient struct {
cc grpc.ClientConnInterface
}
func NewDownloaderClient(cc grpc.ClientConnInterface) DownloaderClient {
return &downloaderClient{cc}
}
func (c *downloaderClient) Query(ctx context.Context, in *DownloaderRequest, opts ...grpc.CallOption) (*DownloaderResponse, error) {
out := new(DownloaderResponse)
err := c.cc.Invoke(ctx, "/downloader.Downloader/Query", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// DownloaderServer is the server API for Downloader service.
// All implementations must embed UnimplementedDownloaderServer
// for forward compatibility
type DownloaderServer interface {
Query(context.Context, *DownloaderRequest) (*DownloaderResponse, error)
mustEmbedUnimplementedDownloaderServer()
}
// UnimplementedDownloaderServer must be embedded to have forward compatible implementations.
type UnimplementedDownloaderServer struct {
}
func (UnimplementedDownloaderServer) Query(context.Context, *DownloaderRequest) (*DownloaderResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Query not implemented")
}
func (UnimplementedDownloaderServer) mustEmbedUnimplementedDownloaderServer() {}
// UnsafeDownloaderServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to DownloaderServer will
// result in compilation errors.
type UnsafeDownloaderServer interface {
mustEmbedUnimplementedDownloaderServer()
}
func RegisterDownloaderServer(s grpc.ServiceRegistrar, srv DownloaderServer) {
s.RegisterService(&Downloader_ServiceDesc, srv)
}
func _Downloader_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DownloaderRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DownloaderServer).Query(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/downloader.Downloader/Query",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DownloaderServer).Query(ctx, req.(*DownloaderRequest))
}
return interceptor(ctx, in, info, handler)
}
// Downloader_ServiceDesc is the grpc.ServiceDesc for Downloader service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Downloader_ServiceDesc = grpc.ServiceDesc{
ServiceName: "downloader.Downloader",
HandlerType: (*DownloaderServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Query",
Handler: _Downloader_Query_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "downloader.proto",
}

@ -19,6 +19,7 @@ const (
// Server is the Server struct for downloader package.
type Server struct {
pb.UnimplementedDownloaderServer
downloadInterface DownloadInterface
GrpcServer *grpc.Server
Port int

@ -11,6 +11,8 @@ import (
"sync"
"time"
"github.com/ethereum/go-ethereum/metrics"
eth_prometheus "github.com/ethereum/go-ethereum/metrics/prometheus"
"github.com/harmony-one/harmony/internal/utils"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
@ -104,6 +106,7 @@ func newService(cfg Config, additionalHandlers ...Handler) *Service {
mux := http.NewServeMux()
mux.Handle("/metrics", handler)
mux.Handle("/metrics/eth", eth_prometheus.Handler(metrics.DefaultRegistry))
mux.HandleFunc("/goroutinez", svc.goroutinezHandler)
// Register additional handlers.

@ -40,7 +40,7 @@ func (f *factory) NewHeader(epoch *big.Int) *block.Header {
impl = v0.NewHeader()
}
impl.SetEpoch(epoch)
return &block.Header{impl}
return &block.Header{Header: impl}
}
// Factories corresponding to well-known chain configurations.

@ -70,7 +70,7 @@ type headerFields struct {
Vdf []byte `json:"vdf"`
ShardState []byte `json:"shardState"`
CrossLinks []byte `json:"crossLink"`
Slashes []byte `json:slashes`
Slashes []byte `json:"slashes"`
}
// ParentHash is the header hash of the parent block. For the genesis block

@ -95,7 +95,7 @@ func getDefaultDNSSyncConfig(nt nodeconfig.NetworkType) harmonyconfig.DnsSync {
dnsSync.Client = true
case nodeconfig.Localnet:
dnsSync.Server = true
dnsSync.Client = true
dnsSync.Client = false
default:
dnsSync.Server = true
dnsSync.Client = false
@ -194,7 +194,6 @@ var dumpConfigLegacyCmd = &cobra.Command{
func registerDumpConfigFlags() error {
return cli.RegisterFlags(dumpConfigCmd, []cli.Flag{networkTypeFlag})
return cli.RegisterFlags(dumpConfigLegacyCmd, []cli.Flag{networkTypeFlag})
}
func promptConfigUpdate() bool {

@ -284,6 +284,13 @@ Version = "1.0.4"
LegacyServer = true
MinPeers = 6
[ShardData]
EnableShardData = false
DiskCount = 8
ShardCount = 4
CacheTime = 10
CacheSize = 512
[TxPool]
BlacklistFile = "./.hmy/blacklist.txt"

@ -96,6 +96,13 @@ Version = "1.0.4"
LegacyServer = true
MinPeers = 6
[ShardData]
EnableShardData = false
DiskCount = 8
ShardCount = 4
CacheTime = 10
CacheSize = 512
[WS]
Enabled = true
IP = "127.0.0.1"

@ -89,6 +89,13 @@ var defaultConfig = harmonyconfig.HarmonyConfig{
},
},
DNSSync: getDefaultDNSSyncConfig(defNetworkType),
ShardData: harmonyconfig.ShardDataConfig{
EnableShardData: false,
DiskCount: 8,
ShardCount: 4,
CacheTime: 10,
CacheSize: 512,
},
}
var defaultSysConfig = harmonyconfig.SysConfig{
@ -141,25 +148,25 @@ var (
defaultTestNetSyncConfig = harmonyconfig.SyncConfig{
Enabled: true,
Downloader: false,
Concurrency: 4,
MinPeers: 4,
InitStreams: 4,
DiscSoftLowCap: 4,
DiscHardLowCap: 4,
Concurrency: 2,
MinPeers: 2,
InitStreams: 2,
DiscSoftLowCap: 2,
DiscHardLowCap: 2,
DiscHighCap: 1024,
DiscBatch: 8,
DiscBatch: 3,
}
defaultLocalNetSyncConfig = harmonyconfig.SyncConfig{
Enabled: true,
Downloader: false,
Concurrency: 4,
MinPeers: 4,
InitStreams: 4,
DiscSoftLowCap: 4,
DiscHardLowCap: 4,
Downloader: true,
Concurrency: 2,
MinPeers: 2,
InitStreams: 2,
DiscSoftLowCap: 2,
DiscHardLowCap: 2,
DiscHighCap: 1024,
DiscBatch: 8,
DiscBatch: 3,
}
defaultElseSyncConfig = harmonyconfig.SyncConfig{

@ -0,0 +1,418 @@
package main
import (
"fmt"
"math/big"
"os"
"time"
lru "github.com/hashicorp/golang-lru"
"github.com/spf13/cobra"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
ethRawDB "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/block"
"github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/hmy"
"github.com/harmony-one/harmony/internal/cli"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
shardingconfig "github.com/harmony-one/harmony/internal/configs/sharding"
)
var batchFlag = cli.IntFlag{
Name: "batch",
Shorthand: "b",
Usage: "batch size limit in MB",
DefValue: 512,
}
var dumpDBCmd = &cobra.Command{
Use: "dumpdb srcdb destdb [startKey [endKey [firstStateStartKey [firstStateEndKey]",
Short: "dump a snapshot db.",
Long: "dump a snapshot db.",
Example: "harmony dumpdb /srcDir/harmony_db_0 /destDir/harmony_db_0",
Args: cobra.RangeArgs(2, 6),
Run: func(cmd *cobra.Command, args []string) {
srcDBDir, destDBDir := args[0], args[1]
var batchLimitMB int
var startKey []byte
var endKey []byte
var firstStateStartKey []byte
var firstStateEndKey []byte
if len(args) > 2 {
_startKey, err := hexutil.Decode(args[2])
if err != nil {
fmt.Println("invalid startKey:", err)
os.Exit(-1)
}
startKey = _startKey
}
if len(args) > 3 {
_endKey, err := hexutil.Decode(args[3])
if err != nil {
fmt.Println("invalid endKey:", err)
os.Exit(-1)
}
endKey = _endKey
}
if len(args) > 4 {
_startKey, err := hexutil.Decode(args[4])
if err != nil {
fmt.Println("invalid stateStartKey:", err)
os.Exit(-1)
}
firstStateStartKey = _startKey
}
if len(args) > 5 {
_endKey, err := hexutil.Decode(args[5])
if err != nil {
fmt.Println("invalid stateEndKey:", err)
os.Exit(-1)
}
firstStateEndKey = _endKey
}
batchLimitMB = cli.GetIntFlagValue(cmd, batchFlag)
networkType := getNetworkType(cmd)
shardSchedule = getShardSchedule(networkType)
if shardSchedule == nil {
fmt.Println("unsupported network type")
os.Exit(-1)
}
fmt.Println(srcDBDir, destDBDir, batchLimitMB, hexutil.Encode(startKey), hexutil.Encode(endKey), hexutil.Encode(firstStateStartKey), hexutil.Encode(firstStateEndKey))
dumpMain(srcDBDir, destDBDir, batchLimitMB*MB, startKey, endKey, firstStateStartKey, firstStateEndKey)
os.Exit(0)
},
}
func getShardSchedule(networkType nodeconfig.NetworkType) shardingconfig.Schedule {
switch networkType {
case nodeconfig.Mainnet:
return shardingconfig.MainnetSchedule
case nodeconfig.Testnet:
return shardingconfig.TestnetSchedule
case nodeconfig.Pangaea:
return shardingconfig.PangaeaSchedule
case nodeconfig.Localnet:
return shardingconfig.LocalnetSchedule
case nodeconfig.Partner:
return shardingconfig.PartnerSchedule
case nodeconfig.Stressnet:
return shardingconfig.StressNetSchedule
}
return nil
}
func registerDumpDBFlags() error {
return cli.RegisterFlags(dumpDBCmd, []cli.Flag{batchFlag, networkTypeFlag})
}
type KakashiDB struct {
ethdb.Database
toDB ethdb.Database
toDBBatch ethdb.Batch
batchLimit int
cache *lru.Cache
}
const (
MB = 1024 * 1024
BLOCKS_DUMP = 512 // must >= 256
EPOCHS_DUMP = 10
STATEDB_CACHE_SIZE = 64 // size in MB
LEVELDB_CACHE_SIZE = 256
LEVELDB_HANDLES = 1024
LRU_CACHE_SIZE = 64 * 1024 * 1024
)
const (
NONE = iota
ON_ACCOUNT_START
ON_ACCOUNT_STATE
ON_ACCOUNT_END
)
var (
totalSize = 0 // current dump size
printSize = 0 // last print dump size
flushSize = 0 // size flushed into db
accountCount = 0 // number of accounts
lastAccount = state.DumpAccount{
Address: &common.Address{},
}
savedStateKey hexutil.Bytes
accountState = NONE
emptyHash = common.Hash{}
shardSchedule shardingconfig.Schedule // init by cli flag
)
func dumpPrint(prefix string, showAccount bool) {
if totalSize-printSize > MB || showAccount {
now := time.Now().Unix()
fmt.Println(now, prefix, accountCount, totalSize, printSize/MB, flushSize/MB)
if showAccount {
fmt.Println("account:", lastAccount.Address.Hex(), lastAccount.Balance, len(lastAccount.Code), accountState, lastAccount.SecureKey.String(), savedStateKey.String())
}
printSize = totalSize
}
}
func (db *KakashiDB) Get(key []byte) ([]byte, error) {
value, err := db.Database.Get(key)
if exist, _ := db.cache.ContainsOrAdd(string(key), nil); !exist {
db.copyKV(key, value)
}
return value, err
}
func (db *KakashiDB) Put(key []byte, value []byte) error {
return nil
}
// Delete removes the key from the key-value data store.
func (db *KakashiDB) Delete(key []byte) error {
return nil
}
// copy key,value to toDB
func (db *KakashiDB) copyKV(key, value []byte) {
db.toDBBatch.Put(key, value)
totalSize += len(key) + len(value)
dumpPrint("copyKV", false)
}
func (db *KakashiDB) flush() {
dumpPrint("KakashiDB batch writhing", true)
db.toDBBatch.Write()
db.toDBBatch.Reset()
flushSize = totalSize
dumpPrint("KakashiDB flushed", false)
}
func (db *KakashiDB) Close() error {
db.toDBBatch.Reset() // drop dirty cache
fmt.Println("KakashiDB Close")
db.toDB.Close()
return db.Database.Close()
}
func (db *KakashiDB) OnRoot(common.Hash) {}
// OnAccount implements DumpCollector interface
func (db *KakashiDB) OnAccountStart(addr common.Address, acc state.DumpAccount) {
accountState = ON_ACCOUNT_START
lastAccount = acc
lastAccount.Address = &addr
}
// OnAccount implements DumpCollector interface
func (db *KakashiDB) OnAccountState(addr common.Address, StateSecureKey hexutil.Bytes, key, value []byte) {
accountState = ON_ACCOUNT_STATE
if totalSize-flushSize > int(db.batchLimit) {
savedStateKey = StateSecureKey
db.flush()
}
}
// OnAccount implements DumpCollector interface
func (db *KakashiDB) OnAccountEnd(addr common.Address, acc state.DumpAccount) {
accountCount++
accountState = ON_ACCOUNT_END
if totalSize-flushSize > int(db.batchLimit) {
db.flush()
}
}
func (db *KakashiDB) getHashByNumber(number uint64) common.Hash {
hash := rawdb.ReadCanonicalHash(db, number)
return hash
}
func (db *KakashiDB) GetHeaderByNumber(number uint64) *block.Header {
hash := db.getHashByNumber(number)
if hash == (common.Hash{}) {
return nil
}
return db.GetHeader(hash, number)
}
func (db *KakashiDB) GetHeader(hash common.Hash, number uint64) *block.Header {
header := rawdb.ReadHeader(db, hash, number)
return header
}
func (db *KakashiDB) GetHeaderByHash(hash common.Hash) *block.Header {
number := rawdb.ReadHeaderNumber(db, hash)
return rawdb.ReadHeader(db, hash, *number)
}
// GetBlock retrieves a block from the database by hash and number
func (db *KakashiDB) GetBlock(hash common.Hash, number uint64) *types.Block {
block := rawdb.ReadBlock(db, hash, number)
return block
}
// GetBlockNumber retrieves the block number belonging to the given hash
// from the database
func (db *KakashiDB) GetBlockNumber(hash common.Hash) *uint64 {
return rawdb.ReadHeaderNumber(db, hash)
}
// GetBlockByHash retrieves a block from the database by hash
func (db *KakashiDB) GetBlockByHash(hash common.Hash) *types.Block {
number := db.GetBlockNumber(hash)
return db.GetBlock(hash, *number)
}
// GetBlockByNumber retrieves a block from the database by number
func (db *KakashiDB) GetBlockByNumber(number uint64) *types.Block {
hash := rawdb.ReadCanonicalHash(db, number)
return db.GetBlock(hash, number)
}
func (db *KakashiDB) indexerDataDump(block *types.Block) {
fmt.Println("indexerDataDump:")
bloomIndexer := hmy.NewBloomIndexer(db, params.BloomBitsBlocks, params.BloomConfirms)
bloomIndexer.Close() // just stop event loop
section, blkno, blkhash := bloomIndexer.Sections()
bloomIndexer.AddCheckpoint(section-1, blkhash)
for i := blkno; i <= block.NumberU64(); i++ {
db.GetHeaderByNumber(i)
}
db.flush()
}
func (db *KakashiDB) offchainDataDump(block *types.Block) {
fmt.Println("offchainDataDump:")
rawdb.WriteHeadBlockHash(db.toDBBatch, block.Hash())
rawdb.WriteHeadHeaderHash(db.toDBBatch, block.Hash())
db.GetHeaderByNumber(0)
db.GetBlockByNumber(0)
db.GetHeaderByHash(block.Hash())
// EVM may access the last 256 block hash
for i := 0; i <= BLOCKS_DUMP; i++ {
if block.NumberU64() < uint64(i) {
break
}
latestNumber := block.NumberU64() - uint64(i)
latestBlock := db.GetBlockByNumber(latestNumber)
db.GetBlockByHash(latestBlock.Hash())
db.GetHeaderByHash(latestBlock.Hash())
db.GetBlockByHash(latestBlock.Hash())
rawdb.ReadBlockRewardAccumulator(db, latestNumber)
rawdb.ReadBlockCommitSig(db, latestNumber)
epoch := block.Epoch()
epochInstance := shardSchedule.InstanceForEpoch(epoch)
for shard := 0; shard < int(epochInstance.NumShards()); shard++ {
rawdb.ReadCrossLinkShardBlock(db, uint32(shard), latestNumber)
}
}
headEpoch := block.Epoch()
epochInstance := shardSchedule.InstanceForEpoch(headEpoch)
for shard := 0; shard < int(epochInstance.NumShards()); shard++ {
rawdb.ReadShardLastCrossLink(db, uint32(shard))
}
rawdb.IteratorValidatorStats(db, func(it ethdb.Iterator, addr common.Address) bool {
db.copyKV(it.Key(), it.Value())
return true
})
rawdb.ReadPendingCrossLinks(db)
rawdb.IteratorDelegatorDelegations(db, func(it ethdb.Iterator, delegator common.Address) bool {
db.copyKV(it.Key(), it.Value())
return true
})
for i := 0; i < EPOCHS_DUMP; i++ {
epoch := new(big.Int).Sub(headEpoch, big.NewInt(int64(i)))
if epoch.Sign() < 0 {
break
}
rawdb.ReadShardState(db, epoch)
rawdb.ReadEpochBlockNumber(db, epoch)
rawdb.ReadEpochVrfBlockNums(db, epoch)
rawdb.ReadEpochVdfBlockNum(db, epoch)
var validators []common.Address
rawdb.IteratorValidatorSnapshot(db, func(addr common.Address, _epoch *big.Int) bool {
if _epoch.Cmp(epoch) == 0 {
validator, err := rawdb.ReadValidatorSnapshot(db, addr, epoch)
if err != nil {
panic(err)
}
validators = append(validators, validator.Validator.Address)
}
return true
})
if i == 0 {
rawdb.ReadValidatorList(db)
}
}
rawdb.IteratorCXReceiptsProofSpent(db, func(it ethdb.Iterator, shardID uint32, number uint64) bool {
db.copyKV(it.Key(), it.Value())
return true
})
db.flush()
}
func (db *KakashiDB) stateDataDump(block *types.Block, startKey, endKey, firstStateStartKey, firstStateEndKey []byte) {
fmt.Println("stateDataDump:")
stateDB0 := state.NewDatabaseWithCache(db, STATEDB_CACHE_SIZE)
rootHash := block.Root()
stateDB, err := state.New(rootHash, stateDB0)
if err != nil {
panic(err)
}
config := new(state.DumpConfig)
config.Start = startKey
config.End = endKey
config.StateStart = firstStateStartKey
config.StateEnd = firstStateEndKey
stateDB.DumpToCollector(db, config)
db.flush()
}
func dumpMain(srcDBDir, destDBDir string, batchLimit int, startKey, endKey, firstStateStartKey, firstStateEndKey []byte) {
fmt.Println("===dumpMain===")
srcDB, err := ethRawDB.NewLevelDBDatabase(srcDBDir, LEVELDB_CACHE_SIZE, LEVELDB_HANDLES, "")
if err != nil {
fmt.Println("open src db error:", err)
os.Exit(-1)
}
destDB, err := ethRawDB.NewLevelDBDatabase(destDBDir, LEVELDB_CACHE_SIZE, LEVELDB_HANDLES, "")
if err != nil {
fmt.Println("open dest db error:", err)
os.Exit(-1)
}
headHash := rawdb.ReadHeadBlockHash(srcDB)
headNumber := *rawdb.ReadHeaderNumber(srcDB, headHash)
fmt.Println("head-block:", headNumber, headHash.Hex())
if headHash == emptyHash {
fmt.Println("empty head block hash")
os.Exit(-1)
}
block := rawdb.ReadBlock(srcDB, headHash, headNumber)
if block == nil {
fmt.Println("ReadBlock error:")
os.Exit(-1)
}
fmt.Println("start copying...")
cache, _ := lru.New(LRU_CACHE_SIZE)
copier := &KakashiDB{
Database: srcDB,
toDB: destDB,
toDBBatch: destDB.NewBatch(),
batchLimit: batchLimit,
cache: cache,
}
defer copier.Close()
copier.offchainDataDump(block)
copier.indexerDataDump(block)
copier.stateDataDump(block, startKey, endKey, firstStateStartKey, firstStateEndKey)
}

@ -216,6 +216,14 @@ var (
syncDiscHighFlag,
syncDiscBatchFlag,
}
shardDataFlags = []cli.Flag{
enableShardDataFlag,
diskCountFlag,
shardCountFlag,
cacheTimeFlag,
cacheSizeFlag,
}
)
var (
@ -321,6 +329,7 @@ func getRootFlags() []cli.Flag {
flags = append(flags, legacyMiscFlags...)
flags = append(flags, prometheusFlags...)
flags = append(flags, syncFlags...)
flags = append(flags, shardDataFlags...)
return flags
}
@ -1620,3 +1629,50 @@ func applySyncFlags(cmd *cobra.Command, config *harmonyconfig.HarmonyConfig) {
config.Sync.DiscBatch = cli.GetIntFlagValue(cmd, syncDiscBatchFlag)
}
}
// shard data flags
var (
enableShardDataFlag = cli.BoolFlag{
Name: "sharddata.enable",
Usage: "whether use multi-database mode of levelDB",
DefValue: defaultConfig.ShardData.EnableShardData,
}
diskCountFlag = cli.IntFlag{
Name: "sharddata.disk_count",
Usage: "the count of disks you want to storage block data",
DefValue: defaultConfig.ShardData.DiskCount,
}
shardCountFlag = cli.IntFlag{
Name: "sharddata.shard_count",
Usage: "the count of shards you want to split in each disk",
DefValue: defaultConfig.ShardData.ShardCount,
}
cacheTimeFlag = cli.IntFlag{
Name: "sharddata.cache_time",
Usage: "local cache save time (minute)",
DefValue: defaultConfig.ShardData.CacheTime,
}
cacheSizeFlag = cli.IntFlag{
Name: "sharddata.cache_size",
Usage: "local cache storage size (MB)",
DefValue: defaultConfig.ShardData.CacheSize,
}
)
func applyShardDataFlags(cmd *cobra.Command, cfg *harmonyconfig.HarmonyConfig) {
if cli.IsFlagChanged(cmd, enableShardDataFlag) {
cfg.ShardData.EnableShardData = cli.GetBoolFlagValue(cmd, enableShardDataFlag)
}
if cli.IsFlagChanged(cmd, diskCountFlag) {
cfg.ShardData.DiskCount = cli.GetIntFlagValue(cmd, diskCountFlag)
}
if cli.IsFlagChanged(cmd, shardCountFlag) {
cfg.ShardData.ShardCount = cli.GetIntFlagValue(cmd, shardCountFlag)
}
if cli.IsFlagChanged(cmd, cacheTimeFlag) {
cfg.ShardData.CacheTime = cli.GetIntFlagValue(cmd, cacheTimeFlag)
}
if cli.IsFlagChanged(cmd, cacheSizeFlag) {
cfg.ShardData.CacheSize = cli.GetIntFlagValue(cmd, cacheSizeFlag)
}
}

@ -141,6 +141,13 @@ func TestHarmonyFlags(t *testing.T) {
Gateway: "https://gateway.harmony.one",
},
Sync: defaultMainnetSyncConfig,
ShardData: harmonyconfig.ShardDataConfig{
EnableShardData: false,
DiskCount: 8,
ShardCount: 4,
CacheTime: 10,
CacheSize: 512,
},
},
},
}
@ -1245,6 +1252,52 @@ func TestSyncFlags(t *testing.T) {
}
}
func TestShardDataFlags(t *testing.T) {
tests := []struct {
args []string
expConfig harmonyconfig.ShardDataConfig
expErr error
}{
{
args: []string{},
expConfig: defaultConfig.ShardData,
},
{
args: []string{"--sharddata.enable",
"--sharddata.disk_count", "8",
"--sharddata.shard_count", "4",
"--sharddata.cache_time", "10",
"--sharddata.cache_size", "512",
},
expConfig: harmonyconfig.ShardDataConfig{
EnableShardData: true,
DiskCount: 8,
ShardCount: 4,
CacheTime: 10,
CacheSize: 512,
},
},
}
for i, test := range tests {
ts := newFlagTestSuite(t, shardDataFlags, func(command *cobra.Command, config *harmonyconfig.HarmonyConfig) {
applyShardDataFlags(command, config)
})
hc, err := ts.run(test.args)
if assErr := assertError(err, test.expErr); assErr != nil {
t.Fatalf("Test %v: %v", i, assErr)
}
if err != nil || test.expErr != nil {
continue
}
if !reflect.DeepEqual(hc.ShardData, test.expConfig) {
t.Errorf("Test %v:\n\t%+v\n\t%+v", i, hc.ShardData, test.expConfig)
}
ts.tearDown()
}
}
type flagTestSuite struct {
t *testing.T

@ -101,6 +101,7 @@ func init() {
rootCmd.AddCommand(configCmd)
rootCmd.AddCommand(versionCmd)
rootCmd.AddCommand(dumpConfigLegacyCmd)
rootCmd.AddCommand(dumpDBCmd)
if err := registerRootCmdFlags(); err != nil {
os.Exit(2)
@ -108,6 +109,9 @@ func init() {
if err := registerDumpConfigFlags(); err != nil {
os.Exit(2)
}
if err := registerDumpDBFlags(); err != nil {
os.Exit(2)
}
}
func main() {
@ -231,6 +235,7 @@ func applyRootFlags(cmd *cobra.Command, config *harmonyconfig.HarmonyConfig) {
applyRevertFlags(cmd, config)
applyPrometheusFlags(cmd, config)
applySyncFlags(cmd, config)
applyShardDataFlags(cmd, config)
}
func setupNodeLog(config harmonyconfig.HarmonyConfig) {
@ -657,7 +662,18 @@ func setupConsensusAndNode(hc harmonyconfig.HarmonyConfig, nodeConfig *nodeconfi
}
// Current node.
chainDBFactory := &shardchain.LDBFactory{RootDir: nodeConfig.DBDir}
var chainDBFactory shardchain.DBFactory
if hc.ShardData.EnableShardData {
chainDBFactory = &shardchain.LDBShardFactory{
RootDir: nodeConfig.DBDir,
DiskCount: hc.ShardData.DiskCount,
ShardCount: hc.ShardData.ShardCount,
CacheTime: hc.ShardData.CacheTime,
CacheSize: hc.ShardData.CacheSize,
}
} else {
chainDBFactory = &shardchain.LDBFactory{RootDir: nodeConfig.DBDir}
}
currentNode := node.New(myHost, currentConsensus, chainDBFactory, blacklist, nodeConfig.ArchiveModes(), &hc)
@ -797,7 +813,9 @@ func setupSyncService(node *node.Node, host p2p.Host, hc harmonyconfig.HarmonyCo
node.RegisterService(service.Synchronize, s)
d := s.Downloaders.GetShardDownloader(node.Blockchain().ShardID())
node.Consensus.SetDownloader(d)
if hc.Sync.Downloader {
node.Consensus.SetDownloader(d) // Set downloader when stream client is active
}
}
func setupBlacklist(hc harmonyconfig.HarmonyConfig) (map[ethCommon.Address]struct{}, error) {
@ -824,7 +842,7 @@ func setupBlacklist(hc harmonyconfig.HarmonyConfig) (map[ethCommon.Address]struc
func listenOSSigAndShutDown(node *node.Node) {
// Prepare for graceful shutdown from os signals
osSignal := make(chan os.Signal)
osSignal := make(chan os.Signal, 1)
signal.Notify(osSignal, syscall.SIGINT, syscall.SIGTERM)
sig := <-osSignal
utils.Logger().Warn().Str("signal", sig.String()).Msg("Gracefully shutting down...")

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build linux || netbsd || openbsd || solaris
// +build linux netbsd openbsd solaris
package fdlimit

@ -424,7 +424,6 @@ func (consensus *Consensus) Start(
return
}
}
consensus.getLogger().Info().Msg("[ConsensusMainLoop] Ended.")
}()
if consensus.dHelper != nil {

@ -39,7 +39,7 @@ func TestConstructAnnounceMessage(test *testing.T) {
consensus.blockHash = [32]byte{}
pubKeyWrapper := bls.PublicKeyWrapper{Object: blsPriKey.GetPublicKey()}
pubKeyWrapper.Bytes.FromLibBLSPublicKey(pubKeyWrapper.Object)
priKeyWrapper := bls.PrivateKeyWrapper{blsPriKey, &pubKeyWrapper}
priKeyWrapper := bls.PrivateKeyWrapper{Pri: blsPriKey, Pub: &pubKeyWrapper}
if _, err = consensus.construct(msg_pb.MessageType_ANNOUNCE, nil, []*bls.PrivateKeyWrapper{&priKeyWrapper}); err != nil {
test.Fatalf("could not construct announce: %v", err)
}
@ -110,7 +110,7 @@ func TestConstructPreparedMessage(test *testing.T) {
pubKeyWrapper := bls.PublicKeyWrapper{Object: blsPriKey.GetPublicKey()}
pubKeyWrapper.Bytes.FromLibBLSPublicKey(pubKeyWrapper.Object)
priKeyWrapper := bls.PrivateKeyWrapper{blsPriKey, &pubKeyWrapper}
priKeyWrapper := bls.PrivateKeyWrapper{Pri: blsPriKey, Pub: &pubKeyWrapper}
network, err := consensus.construct(msg_pb.MessageType_PREPARED, nil, []*bls.PrivateKeyWrapper{&priKeyWrapper})
if err != nil {
test.Errorf("Error when creating prepared message")
@ -134,12 +134,12 @@ func TestConstructPrepareMessage(test *testing.T) {
blsPriKey1 := bls.RandPrivateKey()
pubKeyWrapper1 := bls.PublicKeyWrapper{Object: blsPriKey1.GetPublicKey()}
pubKeyWrapper1.Bytes.FromLibBLSPublicKey(pubKeyWrapper1.Object)
priKeyWrapper1 := bls.PrivateKeyWrapper{blsPriKey1, &pubKeyWrapper1}
priKeyWrapper1 := bls.PrivateKeyWrapper{Pri: blsPriKey1, Pub: &pubKeyWrapper1}
blsPriKey2 := bls.RandPrivateKey()
pubKeyWrapper2 := bls.PublicKeyWrapper{Object: blsPriKey2.GetPublicKey()}
pubKeyWrapper2.Bytes.FromLibBLSPublicKey(pubKeyWrapper2.Object)
priKeyWrapper2 := bls.PrivateKeyWrapper{blsPriKey2, &pubKeyWrapper2}
priKeyWrapper2 := bls.PrivateKeyWrapper{Pri: blsPriKey2, Pub: &pubKeyWrapper2}
decider := quorum.NewDecider(
quorum.SuperMajorityStake, shard.BeaconChainShardID,
@ -226,12 +226,12 @@ func TestConstructCommitMessage(test *testing.T) {
blsPriKey1 := bls.RandPrivateKey()
pubKeyWrapper1 := bls.PublicKeyWrapper{Object: blsPriKey1.GetPublicKey()}
pubKeyWrapper1.Bytes.FromLibBLSPublicKey(pubKeyWrapper1.Object)
priKeyWrapper1 := bls.PrivateKeyWrapper{blsPriKey1, &pubKeyWrapper1}
priKeyWrapper1 := bls.PrivateKeyWrapper{Pri: blsPriKey1, Pub: &pubKeyWrapper1}
blsPriKey2 := bls.RandPrivateKey()
pubKeyWrapper2 := bls.PublicKeyWrapper{Object: blsPriKey2.GetPublicKey()}
pubKeyWrapper2.Bytes.FromLibBLSPublicKey(pubKeyWrapper2.Object)
priKeyWrapper2 := bls.PrivateKeyWrapper{blsPriKey2, &pubKeyWrapper2}
priKeyWrapper2 := bls.PrivateKeyWrapper{Pri: blsPriKey2, Pub: &pubKeyWrapper2}
decider := quorum.NewDecider(
quorum.SuperMajorityStake, shard.BeaconChainShardID,

@ -86,14 +86,14 @@ func (consensus *Consensus) checkDoubleSign(recvMsg *FBFTMessage) bool {
evid := slash.Evidence{
ConflictingVotes: slash.ConflictingVotes{
FirstVote: slash.Vote{
alreadyCastBallot.SignerPubKeys,
alreadyCastBallot.BlockHeaderHash,
alreadyCastBallot.Signature,
SignerPubKeys: alreadyCastBallot.SignerPubKeys,
BlockHeaderHash: alreadyCastBallot.BlockHeaderHash,
Signature: alreadyCastBallot.Signature,
},
SecondVote: slash.Vote{
secondKeys,
recvMsg.BlockHash,
common.Hex2Bytes(doubleSign.SerializeToHexStr()),
SignerPubKeys: secondKeys,
BlockHeaderHash: recvMsg.BlockHash,
Signature: common.Hex2Bytes(doubleSign.SerializeToHexStr()),
}},
Moment: slash.Moment{
Epoch: curHeader.Epoch(),

@ -45,7 +45,7 @@ func generateRandomSlot() (shard.Slot, bls_core.SecretKey) {
key := bls.SerializedPublicKey{}
key.FromLibBLSPublicKey(secretKey.GetPublicKey())
stake := numeric.NewDecFromBigInt(big.NewInt(int64(stakeGen.Int63n(maxStakeGen))))
return shard.Slot{addr, key, &stake}, secretKey
return shard.Slot{EcdsaAddress: addr, BLSPublicKey: key, EffectiveStake: &stake}, secretKey
}
// 50 Harmony Nodes, 50 Staked Nodes
@ -73,7 +73,7 @@ func setupBaseCase() (Decider, *TallyResult, shard.SlotList, map[string]secretKe
decider := NewDecider(SuperMajorityStake, shard.BeaconChainShardID)
decider.UpdateParticipants(pubKeys)
tally, err := decider.SetVoters(&shard.Committee{
shard.BeaconChainShardID, slotList,
ShardID: shard.BeaconChainShardID, Slots: slotList,
}, big.NewInt(3))
if err != nil {
panic("Unable to SetVoters for Base Case")
@ -102,7 +102,7 @@ func setupEdgeCase() (Decider, *TallyResult, shard.SlotList, secretKeyMap) {
decider := NewDecider(SuperMajorityStake, shard.BeaconChainShardID)
decider.UpdateParticipants(pubKeys)
tally, err := decider.SetVoters(&shard.Committee{
shard.BeaconChainShardID, slotList,
ShardID: shard.BeaconChainShardID, Slots: slotList,
}, big.NewInt(3))
if err != nil {
panic("Unable to SetVoters for Edge Case")

@ -223,7 +223,7 @@ func TestAddNewVote(test *testing.T) {
decider.UpdateParticipants(pubKeys)
decider.SetVoters(&shard.Committee{
shard.BeaconChainShardID, slotList,
ShardID: shard.BeaconChainShardID, Slots: slotList,
}, big.NewInt(3))
aggSig := &bls_core.Sign{}
@ -328,7 +328,7 @@ func TestAddNewVoteAggregateSig(test *testing.T) {
decider.UpdateParticipants(pubKeys)
decider.SetVoters(&shard.Committee{
shard.BeaconChainShardID, slotList,
ShardID: shard.BeaconChainShardID, Slots: slotList,
}, big.NewInt(3))
aggSig := &bls_core.Sign{}
@ -412,7 +412,7 @@ func TestAddNewVoteInvalidAggregateSig(test *testing.T) {
decider.UpdateParticipants(pubKeys)
decider.SetVoters(&shard.Committee{
shard.BeaconChainShardID, slotList,
ShardID: shard.BeaconChainShardID, Slots: slotList,
}, big.NewInt(3))
aggSig := &bls_core.Sign{}

@ -52,7 +52,7 @@ func generateRandomSlot() shard.Slot {
key := bls.SerializedPublicKey{}
key.FromLibBLSPublicKey(secretKey.GetPublicKey())
stake := numeric.NewDecFromBigInt(big.NewInt(int64(stakeGen.Int63n(maxStakeGen))))
return shard.Slot{addr, key, &stake}
return shard.Slot{EcdsaAddress: addr, BLSPublicKey: key, EffectiveStake: &stake}
}
func TestCompute(t *testing.T) {
@ -100,7 +100,7 @@ func TestCompute(t *testing.T) {
expectedRoster.TheirVotingPowerTotalPercentage = theirPercentage
computedRoster, err := Compute(&shard.Committee{
shard.BeaconChainShardID, slotList,
ShardID: shard.BeaconChainShardID, Slots: slotList,
}, big.NewInt(3))
if err != nil {
t.Error("Computed Roster failed on vote summation to one")

@ -61,8 +61,25 @@ import (
)
var (
// blockInsertTimer
blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
headBlockGauge = metrics.NewRegisteredGauge("chain/head/block", nil)
headHeaderGauge = metrics.NewRegisteredGauge("chain/head/header", nil)
headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil)
accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil)
accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil)
accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil)
accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil)
storageReadTimer = metrics.NewRegisteredTimer("chain/storage/reads", nil)
storageHashTimer = metrics.NewRegisteredTimer("chain/storage/hashes", nil)
storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil)
storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil)
blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
// ErrNoGenesis is the error when there is no genesis.
ErrNoGenesis = errors.New("Genesis not found in chain")
// errExceedMaxPendingSlashes ..
@ -165,8 +182,7 @@ type BlockChain struct {
running int32 // running must be called atomically
blockchainPruner *blockchainPruner // use to prune beacon chain
// procInterrupt must be atomically called
procInterrupt int32 // interrupt signaler for block processing
wg sync.WaitGroup // chain processing wait group for shutting down
procInterrupt int32 // interrupt signaler for block processing
engine consensus_engine.Engine
processor Processor // block processor interface
@ -334,6 +350,7 @@ func (bc *BlockChain) loadLastState() error {
}
// Everything seems to be fine, set as the head block
bc.currentBlock.Store(currentBlock)
headBlockGauge.Update(int64(currentBlock.NumberU64()))
// We don't need the following as we want the current header and block to be consistent
// Restore the last known head header
@ -350,9 +367,11 @@ func (bc *BlockChain) loadLastState() error {
// Restore the last known head fast block
bc.currentFastBlock.Store(currentBlock)
headFastBlockGauge.Update(int64(currentBlock.NumberU64()))
if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
if block := bc.GetBlockByHash(head); block != nil {
bc.currentFastBlock.Store(block)
headFastBlockGauge.Update(int64(block.NumberU64()))
}
}
@ -414,24 +433,31 @@ func (bc *BlockChain) SetHead(head uint64) error {
// Rewind the block chain, ensuring we don't end up with a stateless head block
if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number().Uint64() < currentBlock.NumberU64() {
bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number().Uint64()))
newHeadBlock := bc.GetBlock(currentHeader.Hash(), currentHeader.Number().Uint64())
bc.currentBlock.Store(newHeadBlock)
headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
}
if currentBlock := bc.CurrentBlock(); currentBlock != nil {
if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
// Rewound state missing, rolled back to before pivot, reset to genesis
bc.currentBlock.Store(bc.genesisBlock)
headBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
}
}
// Rewind the fast block in a simpleton way to the target head
if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number().Uint64() < currentFastBlock.NumberU64() {
bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number().Uint64()))
newHeadFastBlock := bc.GetBlock(currentHeader.Hash(), currentHeader.Number().Uint64())
bc.currentFastBlock.Store(newHeadFastBlock)
headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
}
// If either blocks reached nil, reset to the genesis state
if currentBlock := bc.CurrentBlock(); currentBlock == nil {
bc.currentBlock.Store(bc.genesisBlock)
headBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
}
if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil {
bc.currentFastBlock.Store(bc.genesisBlock)
headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
}
currentBlock := bc.CurrentBlock()
currentFastBlock := bc.CurrentFastBlock()
@ -536,8 +562,9 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
return err
}
bc.currentBlock.Store(bc.genesisBlock)
headBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
bc.currentFastBlock.Store(bc.genesisBlock)
headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
return nil
}
@ -657,6 +684,7 @@ func (bc *BlockChain) writeHeadBlock(block *types.Block) error {
}
bc.currentBlock.Store(block)
headBlockGauge.Update(int64(block.NumberU64()))
// If the block is better than our head or is on a different chain, force update heads
if updateHeads {
@ -668,6 +696,7 @@ func (bc *BlockChain) writeHeadBlock(block *types.Block) error {
}
bc.currentFastBlock.Store(block)
headFastBlockGauge.Update(int64(block.NumberU64()))
}
return nil
}
@ -847,6 +876,9 @@ func (bc *BlockChain) Stop() {
return
}
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
if err := bc.SavePendingCrossLinks(); err != nil {
utils.Logger().Error().Err(err).Msg("Failed to save pending cross links")
}
@ -856,8 +888,6 @@ func (bc *BlockChain) Stop() {
close(bc.quit)
atomic.StoreInt32(&bc.procInterrupt, 1)
bc.wg.Wait()
// Ensure the state of a recent block is also stored to disk before exiting.
// We're writing three different states to catch different restart scenarios:
// - HEAD: So we don't need to reprocess any blocks in the general case
@ -941,6 +971,7 @@ func (bc *BlockChain) Rollback(chain []common.Hash) error {
newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
if newFastBlock != nil {
bc.currentFastBlock.Store(newFastBlock)
headFastBlockGauge.Update(int64(newFastBlock.NumberU64()))
rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
}
}
@ -948,6 +979,7 @@ func (bc *BlockChain) Rollback(chain []common.Hash) error {
newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
if newBlock != nil {
bc.currentBlock.Store(newBlock)
headBlockGauge.Update(int64(newBlock.NumberU64()))
if err := rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash()); err != nil {
return err
}
@ -1030,9 +1062,6 @@ func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts ty
// InsertReceiptChain attempts to complete an already existing header chain with
// transaction and receipt data.
func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
bc.wg.Add(1)
defer bc.wg.Done()
// Do a sanity check that the provided chain is actually ordered and linked
for i := 1; i < len(blockChain); i++ {
if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
@ -1048,6 +1077,9 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
}
}
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
var (
stats = struct{ processed, ignored int32 }{}
start = time.Now()
@ -1112,6 +1144,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
bc.currentFastBlock.Store(head)
headFastBlockGauge.Update(int64(head.NumberU64()))
}
}
bc.mu.Unlock()
@ -1135,8 +1168,8 @@ var lastWrite uint64
// but does not write any state. This is used to construct competing side forks
// up to the point where they exceed the canonical total difficulty.
func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) {
bc.wg.Add(1)
defer bc.wg.Done()
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
return err
@ -1156,9 +1189,6 @@ func (bc *BlockChain) WriteBlockWithState(
paid reward.Reader,
state *state.DB,
) (status WriteStatus, err error) {
bc.wg.Add(1)
defer bc.wg.Done()
// Make sure no inconsistent state is leaked during insertion
bc.mu.Lock()
defer bc.mu.Unlock()
@ -1336,9 +1366,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifyHeaders bool) (int,
chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
}
}
// Pre-checks passed, start the full block imports
bc.wg.Add(1)
defer bc.wg.Done()
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
@ -1484,6 +1511,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifyHeaders bool) (int,
events = append(events, ev)
}
// Process block using the parent state as reference point.
substart := time.Now()
receipts, cxReceipts, stakeMsgs, logs, usedGas, payout, newState, err := bc.processor.Process(
block, state, vmConfig, true,
)
@ -1493,7 +1521,18 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifyHeaders bool) (int,
return i, events, coalescedLogs, err
}
// Update the metrics touched during block processing
accountReadTimer.Update(state.AccountReads) // Account reads are complete, we can mark them
storageReadTimer.Update(state.StorageReads) // Storage reads are complete, we can mark them
accountUpdateTimer.Update(state.AccountUpdates) // Account updates are complete, we can mark them
storageUpdateTimer.Update(state.StorageUpdates) // Storage updates are complete, we can mark them
triehash := state.AccountHashes + state.StorageHashes // Save to not double count in validation
trieproc := state.AccountReads + state.AccountUpdates
trieproc += state.StorageReads + state.StorageUpdates
blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash)
// Validate the state using the default validator
substart = time.Now()
if err := bc.Validator().ValidateState(
block, state, receipts, cxReceipts, usedGas,
); err != nil {
@ -1502,7 +1541,13 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifyHeaders bool) (int,
}
proctime := time.Since(bstart)
// Update the metrics touched during block validation
accountHashTimer.Update(state.AccountHashes) // Account hashes are complete, we can mark them
storageHashTimer.Update(state.StorageHashes) // Storage hashes are complete, we can mark them
blockValidationTimer.Update(time.Since(substart) - (state.AccountHashes + state.StorageHashes - triehash))
// Write the block to the chain and get the status.
substart = time.Now()
status, err := bc.WriteBlockWithState(
block, receipts, cxReceipts, stakeMsgs, payout, state,
)
@ -1519,6 +1564,13 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifyHeaders bool) (int,
Str("elapsed", common.PrettyDuration(time.Since(bstart)).String()).
Logger()
// Update the metrics touched during block commit
accountCommitTimer.Update(state.AccountCommits) // Account commits are complete, we can mark them
storageCommitTimer.Update(state.StorageCommits) // Storage commits are complete, we can mark them
blockWriteTimer.Update(time.Since(substart) - state.AccountCommits - state.StorageCommits)
blockInsertTimer.UpdateSince(bstart)
switch status {
case CanonStatTy:
logger.Info().Msg("Inserted new block")
@ -1733,9 +1785,6 @@ func (bc *BlockChain) InsertHeaderChain(chain []*block.Header, checkFreq int) (i
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
bc.wg.Add(1)
defer bc.wg.Done()
whFunc := func(header *block.Header) error {
bc.mu.Lock()
defer bc.mu.Unlock()
@ -2567,7 +2616,7 @@ func (bc *BlockChain) ComputeAndUpdateAPR(
}
} else {
// only insert if APR for current epoch does not exists
aprEntry := staking.APREntry{now, *aprComputed}
aprEntry := staking.APREntry{Epoch: now, Value: *aprComputed}
l := len(stats.APRs)
// first time inserting apr for validator or
// apr for current epoch does not exists
@ -2605,7 +2654,7 @@ func (bc *BlockChain) UpdateValidatorSnapshots(
return err
}
snapshot := &staking.ValidatorSnapshot{validator, epoch}
snapshot := &staking.ValidatorSnapshot{Validator: validator, Epoch: epoch}
if err := bc.WriteValidatorSnapshot(batch, snapshot); err != nil {
return err
}
@ -2752,13 +2801,13 @@ func (bc *BlockChain) UpdateStakingMetaData(
return newValidators, err
}
if err := bc.WriteValidatorSnapshot(batch, &staking.ValidatorSnapshot{validator, epoch}); err != nil {
if err := bc.WriteValidatorSnapshot(batch, &staking.ValidatorSnapshot{Validator: validator, Epoch: epoch}); err != nil {
return newValidators, err
}
// For validator created at exactly the last block of an epoch, we should create the snapshot
// for next epoch too.
if newEpoch.Cmp(epoch) > 0 {
if err := bc.WriteValidatorSnapshot(batch, &staking.ValidatorSnapshot{validator, newEpoch}); err != nil {
if err := bc.WriteValidatorSnapshot(batch, &staking.ValidatorSnapshot{Validator: validator, Epoch: newEpoch}); err != nil {
return newValidators, err
}
}
@ -2831,9 +2880,9 @@ func (bc *BlockChain) prepareStakingMetaData(
// Add self delegation into the index
selfIndex := staking.DelegationIndex{
createValidator.ValidatorAddress,
uint64(0),
blockNum,
ValidatorAddress: createValidator.ValidatorAddress,
Index: uint64(0),
BlockNum: blockNum,
}
delegations, ok := newDelegations[createValidator.ValidatorAddress]
if !ok {
@ -2951,9 +3000,9 @@ func (bc *BlockChain) addDelegationIndex(
) {
// TODO(audit): change the way of indexing if we allow delegation deletion.
delegations = append(delegations, staking.DelegationIndex{
validatorAddress,
uint64(i),
blockNum,
ValidatorAddress: validatorAddress,
Index: uint64(i),
BlockNum: blockNum,
})
}
}

@ -77,12 +77,12 @@ func TestEVMStaking(t *testing.T) {
// write it to snapshot so that we can use it in edit
// use a copy because we are editing below (wrapper.Delegations)
wrapper, err := db.ValidatorWrapper(createValidator.ValidatorAddress, false, true)
err = chain.WriteValidatorSnapshot(batch, &staking.ValidatorSnapshot{wrapper, header.Epoch()})
err = chain.WriteValidatorSnapshot(batch, &staking.ValidatorSnapshot{Validator: wrapper, Epoch: header.Epoch()})
// also write the delegation so we can use it in CollectRewards
selfIndex := staking.DelegationIndex{
createValidator.ValidatorAddress,
uint64(0),
common.Big0, // block number at which delegation starts
ValidatorAddress: createValidator.ValidatorAddress,
Index: uint64(0),
BlockNum: common.Big0, // block number at which delegation starts
}
err = chain.writeDelegationsByDelegator(batch, createValidator.ValidatorAddress, []staking.DelegationIndex{selfIndex})
@ -98,10 +98,9 @@ func TestEVMStaking(t *testing.T) {
delegate := sampleDelegate(*key)
// add undelegations in epoch0
wrapper.Delegations[0].Undelegations = []staking.Undelegation{
staking.Undelegation{
new(big.Int).Mul(big.NewInt(denominations.One),
big.NewInt(10000)),
common.Big0,
{
Amount: new(big.Int).Mul(big.NewInt(denominations.One), big.NewInt(10000)),
Epoch: common.Big0,
},
}
// redelegate using epoch1, so that we can cover the locked tokens use case as well

@ -5,6 +5,7 @@ package core
import (
"encoding/json"
"errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"

@ -110,7 +110,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
}
}
hc.currentHeaderHash = hc.CurrentHeader().Hash()
headHeaderGauge.Update(hc.CurrentHeader().Number().Int64())
return hc, nil
}
@ -474,6 +474,7 @@ func (hc *HeaderChain) SetCurrentHeader(head *block.Header) error {
hc.currentHeader.Store(head)
hc.currentHeaderHash = head.Hash()
headHeaderGauge.Update(head.Number().Int64())
return nil
}
@ -530,7 +531,7 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) error {
hc.currentHeader.Store(hc.genesisHeader)
}
hc.currentHeaderHash = hc.CurrentHeader().Hash()
headHeaderGauge.Update(hc.CurrentHeader().Number().Int64())
return nil
}

@ -1,10 +1,12 @@
package rawdb
import (
"encoding/binary"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/utils"
@ -157,7 +159,7 @@ func ReadValidatorSnapshot(
Msg("Unable to decode validator snapshot from database")
return nil, err
}
s := staking.ValidatorSnapshot{&v, epoch}
s := staking.ValidatorSnapshot{Validator: &v, Epoch: epoch}
return &s, nil
}
@ -205,6 +207,74 @@ func IteratorValidatorSnapshot(iterator DatabaseIterator, cb func(addr common.Ad
return
}
func IteratorCXReceipt(iterator DatabaseIterator, cb func(it ethdb.Iterator, shardID uint32, number uint64, hash common.Hash) bool) {
preifxKey := cxReceiptPrefix
iter := iterator.NewIteratorWithPrefix(preifxKey)
defer iter.Release()
shardOffset := len(preifxKey)
numberOffset := shardOffset + 4
hashOffset := numberOffset + 8
for iter.Next() {
// validatorSnapshotKey = validatorSnapshotPrefix + addr bytes (20 bytes) + epoch bytes
key := iter.Key()
shardID := binary.BigEndian.Uint32(key[shardOffset : shardOffset+4])
number := binary.BigEndian.Uint64(key[numberOffset : numberOffset+8])
hash := common.BytesToHash(key[hashOffset : hashOffset+20])
if !cb(iter, shardID, number, hash) {
return
}
}
}
func IteratorCXReceiptsProofSpent(iterator DatabaseIterator, cb func(it ethdb.Iterator, shardID uint32, number uint64) bool) {
preifxKey := cxReceiptSpentPrefix
iter := iterator.NewIteratorWithPrefix(preifxKey)
defer iter.Release()
shardOffset := len(preifxKey)
numberOffset := shardOffset + 4
for iter.Next() {
// validatorSnapshotKey = validatorSnapshotPrefix + addr bytes (20 bytes) + epoch bytes
key := iter.Key()
shardID := binary.BigEndian.Uint32(key[shardOffset : shardOffset+4])
number := binary.BigEndian.Uint64(key[numberOffset : numberOffset+8])
if !cb(iter, shardID, number) {
return
}
}
}
func IteratorValidatorStats(iterator DatabaseIterator, cb func(it ethdb.Iterator, addr common.Address) bool) {
preifxKey := validatorStatsPrefix
iter := iterator.NewIteratorWithPrefix(preifxKey)
defer iter.Release()
addrOffset := len(preifxKey)
for iter.Next() {
// validatorSnapshotKey = validatorSnapshotPrefix + addr bytes (20 bytes) + epoch bytes
key := iter.Key()
addr := common.BytesToAddress(key[addrOffset : addrOffset+20])
if !cb(iter, addr) {
return
}
}
}
func IteratorDelegatorDelegations(iterator DatabaseIterator, cb func(it ethdb.Iterator, delegator common.Address) bool) {
preifxKey := delegatorValidatorListPrefix
iter := iterator.NewIteratorWithPrefix(preifxKey)
defer iter.Release()
addrOffset := len(preifxKey)
for iter.Next() {
// validatorSnapshotKey = validatorSnapshotPrefix + addr bytes (20 bytes) + epoch bytes
key := iter.Key()
addr := common.BytesToAddress(key[addrOffset : addrOffset+20])
if !cb(iter, addr) {
return
}
}
}
// DeleteValidatorStats ..
func DeleteValidatorStats(db DatabaseDeleter, addr common.Address) error {
if err := db.Delete(validatorStatsKey(addr)); err != nil {

@ -17,56 +17,118 @@
package state
import (
"bytes"
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/harmony-one/harmony/internal/utils"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
// DumpAccount represents an account in the state
// DumpConfig is a set of options to control what portions of the statewill be
// iterated and collected.
type DumpConfig struct {
SkipCode bool
SkipStorage bool
OnlyWithAddresses bool
Start []byte
End []byte
StateStart []byte
StateEnd []byte
Max uint64
}
// DumpCollector interface which the state trie calls during iteration
type DumpCollector interface {
// OnRoot is called with the state root
OnRoot(common.Hash)
// OnAccount is called once for each account in the trie
OnAccountStart(common.Address, DumpAccount)
// OnAccount is called once for each account in the trie
OnAccountState(_ common.Address, StateSecureKey hexutil.Bytes, key, value []byte)
OnAccountEnd(common.Address, DumpAccount)
}
// DumpAccount represents an account in the state.
type DumpAccount struct {
Balance string `json:"balance"`
Nonce uint64 `json:"nonce"`
Root string `json:"root"`
CodeHash string `json:"codeHash"`
Code string `json:"code,omitempty"`
Root hexutil.Bytes `json:"root"`
CodeHash hexutil.Bytes `json:"codeHash"`
Code hexutil.Bytes `json:"code,omitempty"`
Storage map[common.Hash]string `json:"storage,omitempty"`
Address *common.Address `json:"address,omitempty"` // Address only present in iterative (line-by-line) mode
SecureKey hexutil.Bytes `json:"key,omitempty"` // If we don't have address, we can output the key
}
// Dump represents the full dump in a collected format, as one large map
// Dump represents the full dump in a collected format, as one large map.
type Dump struct {
Root string `json:"root"`
Accounts map[common.Address]DumpAccount `json:"accounts"`
}
// iterativeDump is a 'collector'-implementation which dump output line-by-line iteratively
type iterativeDump struct {
*json.Encoder
// OnRoot implements DumpCollector interface
func (d *Dump) OnRoot(root common.Hash) {
d.Root = fmt.Sprintf("%x", root)
}
// Collector interface which the state trie calls during iteration
type collector interface {
onRoot(common.Hash)
onAccount(common.Address, DumpAccount)
// OnAccount implements DumpCollector interface
func (d *Dump) OnAccountStart(addr common.Address, account DumpAccount) {
}
func (d *Dump) onRoot(root common.Hash) {
// OnAccount implements DumpCollector interface
func (d *Dump) OnAccountState(_ common.Address, StateSecureKey hexutil.Bytes, key, value []byte) {
}
// OnAccount implements DumpCollector interface
func (d *Dump) OnAccountEnd(addr common.Address, account DumpAccount) {
d.Accounts[addr] = account
}
// IteratorDump is an implementation for iterating over data.
type IteratorDump struct {
Root string `json:"root"`
Accounts map[common.Address]DumpAccount `json:"accounts"`
Next []byte `json:"next,omitempty"` // nil if no more accounts
}
// OnRoot implements DumpCollector interface
func (d *IteratorDump) OnRoot(root common.Hash) {
d.Root = fmt.Sprintf("%x", root)
}
func (d *Dump) onAccount(addr common.Address, account DumpAccount) {
// OnAccount implements DumpCollector interface
func (d *IteratorDump) OnAccountStart(addr common.Address, account DumpAccount) {
}
// OnAccount implements DumpCollector interface
func (d *IteratorDump) OnAccountState(_ common.Address, StateSecureKey hexutil.Bytes, key, value []byte) {
}
// OnAccount implements DumpCollector interface
func (d *IteratorDump) OnAccountEnd(addr common.Address, account DumpAccount) {
d.Accounts[addr] = account
}
func (d iterativeDump) onAccount(addr common.Address, account DumpAccount) {
// iterativeDump is a DumpCollector-implementation which dumps output line-by-line iteratively.
type iterativeDump struct {
*json.Encoder
}
// OnAccount implements DumpCollector interface
func (d iterativeDump) OnAccountStart(addr common.Address, account DumpAccount) {
}
// OnAccount implements DumpCollector interface
func (d iterativeDump) OnAccountState(_ common.Address, StateSecureKey hexutil.Bytes, key, value []byte) {
}
// OnAccount implements DumpCollector interface
func (d iterativeDump) OnAccountEnd(addr common.Address, account DumpAccount) {
dumpAccount := &DumpAccount{
Balance: account.Balance,
Nonce: account.Nonce,
@ -83,80 +145,134 @@ func (d iterativeDump) onAccount(addr common.Address, account DumpAccount) {
d.Encode(dumpAccount)
}
func (d iterativeDump) onRoot(root common.Hash) {
// OnRoot implements DumpCollector interface
func (d iterativeDump) OnRoot(root common.Hash) {
d.Encode(struct {
Root common.Hash `json:"root"`
}{root})
}
func (s *DB) dump(c collector, excludeCode, excludeStorage, excludeMissingPreimages bool) {
emptyAddress := (common.Address{})
missingPreimages := 0
c.onRoot(s.trie.Hash())
it := trie.NewIterator(s.trie.NodeIterator(nil))
// DumpToCollector iterates the state according to the given options and inserts
// the items into a collector for aggregation or serialization.
func (s *DB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []byte) {
// Sanitize the input to allow nil configs
if conf == nil {
conf = new(DumpConfig)
}
var (
missingPreimages int
accounts uint64
start = time.Now()
logged = time.Now()
)
log.Info("Trie dumping started", "root", s.trie.Hash())
c.OnRoot(s.trie.Hash())
hasEnd := len(conf.End) > 0
stateStart := conf.Start
hasStateEnd := len(conf.StateEnd) > 0
it := trie.NewIterator(s.trie.NodeIterator(conf.Start))
for it.Next() {
if hasEnd && bytes.Compare(it.Key, conf.End) >= 0 {
break
}
var data Account
if err := rlp.DecodeBytes(it.Value, &data); err != nil {
panic(err)
}
addr := common.BytesToAddress(s.trie.GetKey(it.Key))
obj := newObject(nil, addr, data)
account := DumpAccount{
Balance: data.Balance.String(),
Nonce: data.Nonce,
Root: common.Bytes2Hex(data.Root[:]),
CodeHash: common.Bytes2Hex(data.CodeHash),
Balance: data.Balance.String(),
Nonce: data.Nonce,
Root: data.Root[:],
CodeHash: data.CodeHash,
SecureKey: it.Key,
}
if emptyAddress == addr {
addrBytes := s.trie.GetKey(it.Key)
if addrBytes == nil {
// Preimage missing
missingPreimages++
if excludeMissingPreimages {
if conf.OnlyWithAddresses {
continue
}
account.SecureKey = it.Key
}
if !excludeCode {
account.Code = common.Bytes2Hex(obj.Code(s.db))
addr := common.BytesToAddress(addrBytes)
obj := newObject(s, addr, data)
if !conf.SkipCode {
account.Code = obj.Code(s.db)
}
if !excludeStorage {
c.OnAccountStart(addr, account)
if !conf.SkipStorage {
account.Storage = make(map[common.Hash]string)
storageIt := trie.NewIterator(obj.getTrie(s.db).NodeIterator(nil))
storageIt := trie.NewIterator(obj.getTrie(s.db).NodeIterator(stateStart))
for storageIt.Next() {
if hasStateEnd && bytes.Compare(storageIt.Key, conf.StateEnd) >= 0 {
break
}
key := s.trie.GetKey(storageIt.Key)
c.OnAccountState(addr, storageIt.Key, key, storageIt.Value)
_, content, _, err := rlp.Split(storageIt.Value)
if err != nil {
utils.Logger().Err(err).Msg("Failed to decode the value returned by iterator")
log.Error("Failed to decode the value returned by iterator", "error", err)
continue
}
account.Storage[common.BytesToHash(s.trie.GetKey(storageIt.Key))] = common.Bytes2Hex(content)
}
stateStart = nil
hasStateEnd = false
}
c.OnAccountEnd(addr, account)
accounts++
if time.Since(logged) > 8*time.Second {
log.Info("Trie dumping in progress", "at", it.Key, "accounts", accounts,
"elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
if conf.Max > 0 && accounts >= conf.Max {
if it.Next() {
nextKey = it.Key
}
break
}
c.onAccount(addr, account)
}
if missingPreimages > 0 {
utils.Logger().Warn().Int("missing", missingPreimages).Msg("Dump incomplete due to missing preimages")
log.Warn("Dump incomplete due to missing preimages", "missing", missingPreimages)
}
log.Info("Trie dumping complete", "accounts", accounts,
"elapsed", common.PrettyDuration(time.Since(start)))
return nextKey
}
// RawDump returns the entire state an a single large object
func (s *DB) RawDump(excludeCode, excludeStorage, excludeMissingPreimages bool) Dump {
func (s *DB) RawDump(opts *DumpConfig) Dump {
dump := &Dump{
Accounts: make(map[common.Address]DumpAccount),
}
s.dump(dump, excludeCode, excludeStorage, excludeMissingPreimages)
s.DumpToCollector(dump, opts)
return *dump
}
// Dump returns a JSON string representing the entire state as a single json-object
func (s *DB) Dump(excludeCode, excludeStorage, excludeMissingPreimages bool) []byte {
dump := s.RawDump(excludeCode, excludeStorage, excludeMissingPreimages)
func (s *DB) Dump(opts *DumpConfig) []byte {
dump := s.RawDump(opts)
json, err := json.MarshalIndent(dump, "", " ")
if err != nil {
fmt.Println("dump err", err)
fmt.Println("Dump err", err)
}
return json
}
// IterativeDump dumps out accounts as json-objects, delimited by linebreaks on stdout
func (s *DB) IterativeDump(excludeCode, excludeStorage, excludeMissingPreimages bool, output *json.Encoder) {
s.dump(iterativeDump{output}, excludeCode, excludeStorage, excludeMissingPreimages)
func (s *DB) IterativeDump(opts *DumpConfig, output *json.Encoder) {
s.DumpToCollector(iterativeDump{output}, opts)
}
// IteratorDump dumps out a batch of accounts starts with the given start key
func (s *DB) IteratorDump(opts *DumpConfig) IteratorDump {
iterator := &IteratorDump{
Accounts: make(map[common.Address]DumpAccount),
}
iterator.Next = s.DumpToCollector(iterator, opts)
return *iterator
}

@ -21,15 +21,12 @@ import (
"math/big"
"testing"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
)
var toAddr = common.BytesToAddress
type stateTest struct {
db ethdb.Database
state *DB
@ -45,11 +42,11 @@ func TestDump(t *testing.T) {
s := newStateTest()
// generate a few entries
obj1 := s.state.GetOrNewStateObject(toAddr([]byte{0x01}))
obj1 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01}))
obj1.AddBalance(big.NewInt(22))
obj2 := s.state.GetOrNewStateObject(toAddr([]byte{0x01, 0x02}))
obj2 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01, 0x02}))
obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3})
obj3 := s.state.GetOrNewStateObject(toAddr([]byte{0x02}))
obj3 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x02}))
obj3.SetBalance(big.NewInt(44))
// write some of them to the trie
@ -57,34 +54,37 @@ func TestDump(t *testing.T) {
s.state.updateStateObject(obj2)
s.state.Commit(false)
// check that dump contains the state objects that are in trie
got := string(s.state.Dump(false, false, true))
// check that DumpToCollector contains the state objects that are in trie
got := string(s.state.Dump(nil))
want := `{
"root": "71edff0130dd2385947095001c73d9e28d862fc286fca2b922ca6f6f3cddfdd2",
"accounts": {
"0x0000000000000000000000000000000000000001": {
"balance": "22",
"nonce": 0,
"root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
"root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
"key": "0x1468288056310c82aa4c01a7e12a10f8111a0560e72b700555479031b86c357d"
},
"0x0000000000000000000000000000000000000002": {
"balance": "44",
"nonce": 0,
"root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
"root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
"key": "0xd52688a8f926c816ca1e079067caba944f158e764817b83fc43594370ca9cf62"
},
"0x0000000000000000000000000000000000000102": {
"balance": "0",
"nonce": 0,
"root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"codeHash": "87874902497a5bb968da31a2998d8f22e949d1ef6214bcdedd8bae24cca4b9e3",
"code": "03030303030303"
"root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"codeHash": "0x87874902497a5bb968da31a2998d8f22e949d1ef6214bcdedd8bae24cca4b9e3",
"code": "0x03030303030303",
"key": "0xa17eacbc25cda025e81db9c5c62868822c73ce097cee2a63e33a2e41268358a1"
}
}
}`
if got != want {
t.Errorf("dump mismatch:\ngot: %s\nwant: %s\n", got, want)
t.Errorf("DumpToCollector mismatch:\ngot: %s\nwant: %s\n", got, want)
}
}
@ -107,7 +107,7 @@ func TestNull(t *testing.T) {
}
func TestSnapshot(t *testing.T) {
stateobjaddr := toAddr([]byte("aa"))
stateobjaddr := common.BytesToAddress([]byte("aa"))
var storageaddr common.Hash
data1 := common.BytesToHash([]byte{42})
data2 := common.BytesToHash([]byte{43})
@ -149,8 +149,8 @@ func TestSnapshotEmpty(t *testing.T) {
func TestSnapshot2(t *testing.T) {
state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
stateobjaddr0 := toAddr([]byte("so0"))
stateobjaddr1 := toAddr([]byte("so1"))
stateobjaddr0 := common.BytesToAddress([]byte("so0"))
stateobjaddr1 := common.BytesToAddress([]byte("so1"))
var storageaddr common.Hash
data0 := common.BytesToHash([]byte{17})

@ -897,7 +897,7 @@ func TestDeleteCreateRevert(t *testing.T) {
// Create an initial state with a single contract
state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
addr := toAddr([]byte("so"))
addr := common.BytesToAddress([]byte("so"))
state.SetBalance(addr, big.NewInt(1))
root, _ := state.Commit(false)
@ -926,7 +926,7 @@ func makeValidValidatorWrapper(addr common.Address) stk.ValidatorWrapper {
MaxRate: numeric.ZeroDec(),
MaxChangeRate: numeric.ZeroDec(),
}
c := stk.Commission{cr, big.NewInt(300)}
c := stk.Commission{CommissionRates: cr, UpdateHeight: big.NewInt(300)}
d := stk.Description{
Name: "Wayne",
Identity: "wen",

@ -299,7 +299,7 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
var cxReceipt *types.CXReceipt
// Do not create cxReceipt if EVM call failed
if txType == types.SubtractionOnly && !failedExe {
cxReceipt = &types.CXReceipt{tx.Hash(), msg.From(), msg.To(), tx.ShardID(), tx.ToShardID(), msg.Value()}
cxReceipt = &types.CXReceipt{TxHash: tx.Hash(), From: msg.From(), To: msg.To(), ShardID: tx.ShardID(), ToShardID: tx.ToShardID(), Amount: msg.Value()}
} else {
cxReceipt = nil
}

@ -927,7 +927,7 @@ func TestBlock_EncodeRLP(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
b := &Block{header: &block.Header{tt.header}}
b := &Block{header: &block.Header{Header: tt.header}}
w := &bytes.Buffer{}
err := b.EncodeRLP(w)
if (err != nil) != tt.wantErr {

@ -20,6 +20,7 @@ import (
"io"
"math/big"
"sync/atomic"
"time"
"github.com/harmony-one/harmony/internal/params"
@ -43,6 +44,9 @@ type EthTransaction struct {
hash atomic.Value
size atomic.Value
from atomic.Value
// time at which the node received the tx
// and not the time set by the sender
time time.Time
}
type ethTxdata struct {
@ -113,7 +117,7 @@ func newEthTransaction(nonce uint64, to *common.Address, amount *big.Int, gasLim
d.Price.Set(gasPrice)
}
return &EthTransaction{data: d}
return &EthTransaction{data: d, time: time.Now()}
}
// From returns the sender address of the transaction
@ -121,6 +125,11 @@ func (tx *EthTransaction) From() *atomic.Value {
return &tx.from
}
// Time returns the time at which the transaction was received by the node
func (tx *EthTransaction) Time() time.Time {
return tx.time
}
// V value of the transaction signature
func (tx *EthTransaction) V() *big.Int {
return tx.data.V
@ -180,6 +189,7 @@ func (tx *EthTransaction) Protected() bool {
func (tx *EthTransaction) Copy() *EthTransaction {
var tx2 EthTransaction
tx2.data.CopyFrom(&tx.data)
tx2.time = tx.time
return &tx2
}
@ -205,6 +215,8 @@ func (tx *EthTransaction) ConvertToHmy() *Transaction {
copy := tx2.Hash()
d2.Hash = &copy
tx2.time = tx.time
return &tx2
}
@ -219,6 +231,7 @@ func (tx *EthTransaction) DecodeRLP(s *rlp.Stream) error {
err := s.Decode(&tx.data)
if err == nil {
tx.size.Store(common.StorageSize(rlp.ListSize(size)))
tx.time = time.Now()
}
return err

@ -23,6 +23,7 @@ import (
"io"
"math/big"
"sync/atomic"
"time"
"github.com/harmony-one/harmony/internal/params"
@ -100,6 +101,9 @@ type Transaction struct {
hash atomic.Value
size atomic.Value
from atomic.Value
// time at which the node received the tx
// and not the time set by the sender
time time.Time
}
// String print mode string
@ -224,7 +228,7 @@ func newTransaction(nonce uint64, to *common.Address, shardID uint32, amount *bi
d.Price.Set(gasPrice)
}
return &Transaction{data: d}
return &Transaction{data: d, time: time.Now()}
}
func newCrossShardTransaction(nonce uint64, to *common.Address, shardID uint32, toShardID uint32, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction {
@ -251,7 +255,7 @@ func newCrossShardTransaction(nonce uint64, to *common.Address, shardID uint32,
d.Price.Set(gasPrice)
}
return &Transaction{data: d}
return &Transaction{data: d, time: time.Now()}
}
// From returns the sender address of the transaction
@ -309,6 +313,11 @@ func (tx *Transaction) ToShardID() uint32 {
return tx.data.ToShardID
}
// Time returns the time at which the transaction was received by the node
func (tx *Transaction) Time() time.Time {
return tx.time
}
// Protected returns whether the transaction is protected from replay protection.
func (tx *Transaction) Protected() bool {
return isProtectedV(tx.data.V)
@ -334,6 +343,7 @@ func (tx *Transaction) DecodeRLP(s *rlp.Stream) error {
err := s.Decode(&tx.data)
if err == nil {
tx.size.Store(common.StorageSize(rlp.ListSize(size)))
tx.time = time.Now()
}
return err
@ -448,6 +458,8 @@ func (tx *Transaction) ConvertToEth() *EthTransaction {
copy := tx2.Hash()
d2.Hash = &copy
tx2.time = tx.time
return &tx2
}
@ -500,6 +512,7 @@ func (tx *Transaction) RawSignatureValues() (*big.Int, *big.Int, *big.Int) {
func (tx *Transaction) Copy() *Transaction {
var tx2 Transaction
tx2.data.CopyFrom(&tx.data)
tx2.time = tx.time
return &tx2
}
@ -550,12 +563,40 @@ func (s *TxByPrice) Pop() interface{} {
return x
}
// TxByPriceAndTime implements both the sort and the heap interface, making it useful
// for all at once sorting as well as individually adding and removing elements.
type TxByPriceAndTime Transactions
func (s TxByPriceAndTime) Len() int { return len(s) }
func (s TxByPriceAndTime) Less(i, j int) bool {
// If the prices are equal, use the time the transaction was first seen for
// deterministic sorting
cmp := s[i].data.Price.Cmp(s[j].data.Price)
if cmp == 0 {
return s[i].time.Before(s[j].time)
}
return cmp > 0
}
func (s TxByPriceAndTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s *TxByPriceAndTime) Push(x interface{}) {
*s = append(*s, x.(*Transaction))
}
func (s *TxByPriceAndTime) Pop() interface{} {
old := *s
n := len(old)
x := old[n-1]
*s = old[0 : n-1]
return x
}
// TransactionsByPriceAndNonce represents a set of transactions that can return
// transactions in a profit-maximizing sorted order, while supporting removing
// entire batches of transactions for non-executable accounts.
type TransactionsByPriceAndNonce struct {
txs map[common.Address]Transactions // Per account nonce-sorted list of transactions
heads TxByPrice // Next transaction for each unique account (price heap)
heads TxByPriceAndTime // Next transaction for each unique account (price heap)
signer Signer // Signer for the set of transactions
ethSigner Signer // Signer for the set of transactions
}
@ -567,7 +608,7 @@ type TransactionsByPriceAndNonce struct {
// if after providing it to the constructor.
func NewTransactionsByPriceAndNonce(hmySigner Signer, ethSigner Signer, txs map[common.Address]Transactions) *TransactionsByPriceAndNonce {
// Initialize a price based heap with the head transactions
heads := make(TxByPrice, 0, len(txs))
heads := make(TxByPriceAndTime, 0, len(txs))
for from, accTxs := range txs {
if accTxs.Len() == 0 {
continue

@ -21,9 +21,11 @@ import (
"encoding/json"
"math/big"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/harmony-one/harmony/internal/params"
)
func defaultTestKey() (*ecdsa.PrivateKey, common.Address) {
@ -133,3 +135,56 @@ func TestTransactionJSON(t *testing.T) {
}
}
}
// Tests that if multiple transactions have the same price, the ones seen earlier
// are prioritized to avoid network spam attacks aiming for a specific ordering.
func TestTransactionTimeSort(t *testing.T) {
// Generate a batch of accounts to start with
keys := make([]*ecdsa.PrivateKey, 5)
for i := 0; i < len(keys); i++ {
keys[i], _ = crypto.GenerateKey()
}
signer := HomesteadSigner{}
// Generate a batch of transactions with overlapping prices, but different creation times
groups := map[common.Address]Transactions{}
for start, key := range keys {
addr := crypto.PubkeyToAddress(key.PublicKey)
tx, _ := SignTx(NewTransaction(0, common.Address{}, 0, big.NewInt(100), 100, big.NewInt(1), nil), signer, key)
tx.time = time.Unix(0, int64(len(keys)-start))
groups[addr] = append(groups[addr], tx)
}
// Sort the transactions and cross check the nonce ordering
config := params.TestChainConfig
txset := NewTransactionsByPriceAndNonce(
NewEIP155Signer(config.ChainID),
NewEIP155Signer(config.EthCompatibleChainID),
groups,
)
txs := Transactions{}
for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
txs = append(txs, tx)
txset.Shift()
}
if len(txs) != len(keys) {
t.Errorf("expected %d transactions, found %d", len(keys), len(txs))
}
for i, txi := range txs {
fromi, _ := Sender(signer, txi)
if i+1 < len(txs) {
next := txs[i+1]
fromNext, _ := Sender(signer, next)
if txi.GasPrice().Cmp(next.GasPrice()) < 0 {
t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice())
}
// Make sure time order is ascending if the txs have the same gas price
if txi.GasPrice().Cmp(next.GasPrice()) == 0 && txi.time.After(next.time) {
t.Errorf("invalid received time ordering: tx #%d (A=%x T=%v) > tx #%d (A=%x T=%v)", i, fromi[:4], txi.time, i+1, fromNext[:4], next.time)
}
}
}
}

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build VERIFY_EVM_INTEGER_POOL
// +build VERIFY_EVM_INTEGER_POOL
package vm

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build !VERIFY_EVM_INTEGER_POOL
// +build !VERIFY_EVM_INTEGER_POOL
package vm

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build gofuzz
// +build gofuzz
package runtime

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build darwin || dragonfly || freebsd || linux || nacl || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
package rpc

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build !cgo && !windows
// +build !cgo,!windows
package rpc

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build js
// +build js
package rpc

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build darwin || dragonfly || freebsd || linux || nacl || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
package rpc

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build windows
// +build windows
package rpc

@ -5,7 +5,7 @@ go 1.16
require (
github.com/VictoriaMetrics/fastcache v1.5.7 // indirect
github.com/Workiva/go-datastructures v1.0.50
github.com/allegro/bigcache v1.2.1 // indirect
github.com/allegro/bigcache v1.2.1
github.com/aristanetworks/goarista v0.0.0-20190607111240-52c2a7864a08 // indirect
github.com/aws/aws-sdk-go v1.30.1
github.com/beevik/ntp v0.3.0
@ -16,13 +16,11 @@ require (
github.com/deckarep/golang-set v1.7.1
github.com/ethereum/go-ethereum v1.9.25
github.com/fjl/memsize v0.0.0-20180929194037-2a09253e352a // indirect
github.com/garslo/gogen v0.0.0-20170307003452-d6ebae628c7c // indirect
github.com/golang/mock v1.6.0
github.com/golang/protobuf v1.5.2
github.com/golangci/golangci-lint v1.22.2
github.com/gorilla/mux v1.8.0
github.com/gorilla/websocket v1.4.2
github.com/harmony-ek/gencodec v0.0.0-20190215044613-e6740dbdd846
github.com/harmony-one/abool v1.0.1
github.com/harmony-one/bls v0.0.6
github.com/harmony-one/taggedrlp v0.1.4
@ -53,13 +51,14 @@ require (
github.com/spf13/viper v1.6.1
github.com/stretchr/testify v1.7.0
github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee
go.uber.org/ratelimit v0.1.0
go.uber.org/zap v1.16.0
golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba
golang.org/x/tools v0.1.7
golang.org/x/tools v0.1.7 // indirect
google.golang.org/grpc v1.33.2
google.golang.org/protobuf v1.26.0
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c

@ -209,8 +209,6 @@ github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/garslo/gogen v0.0.0-20170307003452-d6ebae628c7c h1:uYNKzPntb8c6DKvP9EfrBjkLkU7pM4lM+uuHSIa8UtU=
github.com/garslo/gogen v0.0.0-20170307003452-d6ebae628c7c/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@ -389,8 +387,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU=
github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48=
github.com/harmony-ek/gencodec v0.0.0-20190215044613-e6740dbdd846 h1:mYQ5htv2sj7m/mwlonteCTD80ODMlPUQTwbKJk+hi3Q=
github.com/harmony-ek/gencodec v0.0.0-20190215044613-e6740dbdd846/go.mod h1:YZcPnufUw70msUSudLvxcQOSpnZJgaMS9WIU8IGEtBg=
github.com/harmony-one/abool v1.0.1 h1:SjXLmrr3W8h6lY37gRuWtLiRknUOchnUnsXJWK6Gbm4=
github.com/harmony-one/abool v1.0.1/go.mod h1:9sq0PJzb1SqRpKrpEV4Ttvm9WV5uud8sfrsPw3AIBJA=
github.com/harmony-one/bls v0.0.6 h1:KG4q4JwdkPf3DtFvJmAgMRWT6QdY1A/wqN/Qt+S4VaQ=
@ -553,7 +549,6 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ=

@ -203,6 +203,7 @@ func (d *Downloader) loop() {
time.Sleep(5 * time.Second)
trigger()
}()
time.Sleep(1 * time.Second)
continue
}
d.logger.Info().Int("block added", addedBN).

@ -123,7 +123,7 @@ func TestLrSyncIter_FetchAndInsertBlocks(t *testing.T) {
targetBN := uint64(1000)
chain := newTestBlockChain(0, nil)
protocol := newTestSyncProtocol(targetBN, 32, nil)
ctx, _ := context.WithCancel(context.Background())
ctx := context.Background()
lsi := &lrSyncIter{
bc: chain,
@ -155,7 +155,7 @@ func TestLrSyncIter_FetchAndInsertBlocks_ErrRequest(t *testing.T) {
}
chain := newTestBlockChain(0, nil)
protocol := newTestSyncProtocol(targetBN, 32, errHook)
ctx, _ := context.WithCancel(context.Background())
ctx := context.Background()
lsi := &lrSyncIter{
bc: chain,
@ -187,7 +187,7 @@ func TestLrSyncIter_FetchAndInsertBlocks_ErrInsert(t *testing.T) {
}
chain := newTestBlockChain(0, errHook)
protocol := newTestSyncProtocol(targetBN, 32, nil)
ctx, _ := context.WithCancel(context.Background())
ctx := context.Background()
lsi := &lrSyncIter{
bc: chain,
@ -219,7 +219,7 @@ func TestLrSyncIter_FetchAndInsertBlocks_RandomErr(t *testing.T) {
}
chain := newTestBlockChain(0, errHook)
protocol := newTestSyncProtocol(targetBN, 32, errHook)
ctx, _ := context.WithCancel(context.Background())
ctx := context.Background()
lsi := &lrSyncIter{
bc: chain,

@ -24,7 +24,9 @@ import (
func (d *Downloader) doShortRangeSync() (int, error) {
numShortRangeCounterVec.With(d.promLabels()).Inc()
srCtx, _ := context.WithTimeout(d.ctx, shortRangeTimeout)
srCtx, cancel := context.WithTimeout(d.ctx, shortRangeTimeout)
defer cancel()
sh := &srHelper{
syncProtocol: d.syncProtocol,
ctx: srCtx,
@ -120,6 +122,7 @@ func (sh *srHelper) getHashChain(curBN uint64) ([]common.Hash, []sttypes.StreamI
func (sh *srHelper) getBlocksByHashes(hashes []common.Hash, whitelist []sttypes.StreamID) ([]*types.Block, []sttypes.StreamID, error) {
ctx, cancel := context.WithCancel(sh.ctx)
defer cancel()
m := newGetBlocksByHashManager(hashes, whitelist)
var (

@ -34,7 +34,7 @@ import (
const sampleNumber = 3 // Number of transactions sampled in a block
var DefaultMaxPrice = big.NewInt(1 * params.Ether)
var DefaultMaxPrice = big.NewInt(5e11) // 500 gwei is the max suggested limit
type GasPriceConfig struct {
Blocks int

@ -153,9 +153,9 @@ func New(
// Setup gas price oracle
gpoParams := GasPriceConfig{
Blocks: 20,
Percentile: 60,
Default: big.NewInt(3e10),
Blocks: 20, // take all eligible txs past 20 blocks and sort them
Percentile: 40, // get the 40th percentile when sorted in an ascending manner
Default: big.NewInt(3e10), // minimum of 30 gwei
}
gpo := NewOracle(backend, gpoParams)
backend.gpo = gpo

@ -330,6 +330,8 @@ func (hmy *Harmony) GetValidatorInformation(
if defaultReply.CurrentlyInCommittee {
defaultReply.Performance = &staking.CurrentEpochPerformance{
CurrentSigningPercentage: *computed,
Epoch: hmy.BeaconChain.CurrentBlock().Header().Number().Uint64(),
Block: hmy.BeaconChain.CurrentBlock().Header().Epoch().Uint64(),
}
}

@ -44,7 +44,7 @@ func (hmy *Harmony) ResendCx(ctx context.Context, txID common.Hash) (uint64, boo
if tx.ShardID() == tx.ToShardID() || blk.Header().ShardID() != tx.ShardID() {
return 0, false
}
entry := core.CxEntry{blockHash, tx.ToShardID()}
entry := core.CxEntry{BlockHash: blockHash, ToShardID: tx.ToShardID()}
success := hmy.CxPool.Add(entry)
return blockNum, success
}

@ -477,6 +477,9 @@ func applySlashes(
return false
})
// The Leader of the block gets all slashing rewards.
slashRewardBeneficiary := header.Coinbase()
// Do the slashing by groups in the sorted order
for _, key := range sortedKeys {
records := groupedRecords[key]
@ -510,6 +513,7 @@ func applySlashes(
state,
records,
rate,
slashRewardBeneficiary,
); err != nil {
return errors.New("[Finalize] could not apply slash")
}

@ -498,7 +498,7 @@ func distributeRewardBeforeAggregateEpoch(bc engine.ChainReader, state *state.DB
if err != nil {
return network.EmptyPayout, errors.Wrapf(err, "shard 0 block %d reward error with bitmap %x", header.Number(), header.LastCommitBitmap())
}
subComm := shard.Committee{shard.BeaconChainShardID, members}
subComm := shard.Committee{ShardID: shard.BeaconChainShardID, Slots: members}
if err := availability.IncrementValidatorSigningCounts(
beaconChain,

@ -28,6 +28,7 @@ type HarmonyConfig struct {
Legacy *LegacyConfig `toml:",omitempty"`
Prometheus *PrometheusConfig `toml:",omitempty"`
DNSSync DnsSync
ShardData ShardDataConfig
}
type DnsSync struct {
@ -66,6 +67,14 @@ type GeneralConfig struct {
EnablePruneBeaconChain bool
}
type ShardDataConfig struct {
EnableShardData bool
DiskCount int
ShardCount int
CacheTime int
CacheSize int
}
type ConsensusConfig struct {
MinPeers int
AggregateSig bool

@ -3,12 +3,22 @@ package shardchain
import (
"fmt"
"path"
"path/filepath"
"time"
"github.com/harmony-one/harmony/internal/shardchain/leveldb_shard"
"github.com/harmony-one/harmony/internal/shardchain/local_cache"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
)
const (
LDBDirPrefix = "harmony_db"
LDBShardDirPrefix = "harmony_sharddb"
)
// DBFactory is a blockchain database factory.
type DBFactory interface {
// NewChainDB returns a new database for the blockchain for
@ -23,7 +33,7 @@ type LDBFactory struct {
// NewChainDB returns a new LDB for the blockchain for given shard.
func (f *LDBFactory) NewChainDB(shardID uint32) (ethdb.Database, error) {
dir := path.Join(f.RootDir, fmt.Sprintf("harmony_db_%d", shardID))
dir := path.Join(f.RootDir, fmt.Sprintf("%s_%d", LDBDirPrefix, shardID))
return rawdb.NewLevelDBDatabase(dir, 256, 1024, "")
}
@ -34,3 +44,26 @@ type MemDBFactory struct{}
func (f *MemDBFactory) NewChainDB(shardID uint32) (ethdb.Database, error) {
return rawdb.NewMemoryDatabase(), nil
}
// LDBShardFactory is a merged Multi-LDB-backed blockchain database factory.
type LDBShardFactory struct {
RootDir string // directory in which to put shard databases in.
DiskCount int
ShardCount int
CacheTime int
CacheSize int
}
// NewChainDB returns a new memDB for the blockchain for given shard.
func (f *LDBShardFactory) NewChainDB(shardID uint32) (ethdb.Database, error) {
dir := filepath.Join(f.RootDir, fmt.Sprintf("%s_%d", LDBShardDirPrefix, shardID))
shard, err := leveldb_shard.NewLeveldbShard(dir, f.DiskCount, f.ShardCount)
if err != nil {
return nil, err
}
return rawdb.NewDatabase(local_cache.NewLocalCacheDatabase(shard, local_cache.CacheConfig{
CacheTime: time.Duration(f.CacheTime) * time.Minute,
CacheSize: f.CacheSize,
})), nil
}

@ -0,0 +1,37 @@
package leveldb_shard
import (
"hash/crc32"
"sync"
"sync/atomic"
)
func mapDBIndex(key []byte, dbCount uint32) uint32 {
return crc32.ChecksumIEEE(key) % dbCount
}
func parallelRunAndReturnErr(parallelNum int, cb func(index int) error) error {
wg := sync.WaitGroup{}
errAtomic := atomic.Value{}
for i := 0; i < parallelNum; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
err := cb(i)
if err != nil {
errAtomic.Store(err)
}
}(i)
}
wg.Wait()
if err := errAtomic.Load(); err != nil {
return errAtomic.Load().(error)
} else {
return nil
}
}

@ -0,0 +1,200 @@
package leveldb_shard
import (
"bytes"
"encoding/binary"
"fmt"
"path/filepath"
"strings"
"sync"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/comparer"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
)
type LeveldbShard struct {
dbs []*leveldb.DB
dbCount uint32
}
var shardIdxKey = []byte("__DB_SHARED_INDEX__")
func NewLeveldbShard(savePath string, diskCount int, diskShards int) (shard *LeveldbShard, err error) {
shard = &LeveldbShard{
dbs: make([]*leveldb.DB, diskCount*diskShards),
dbCount: uint32(diskCount * diskShards),
}
// clean when error
defer func() {
if err != nil {
for _, db := range shard.dbs {
if db != nil {
_ = db.Close()
}
}
shard = nil
}
}()
levelDBOptions := &opt.Options{
OpenFilesCacheCapacity: 128,
WriteBuffer: 8 << 20, //8MB, max memory occupyv = 8*2*diskCount*diskShards
BlockCacheCapacity: 16 << 20, //16MB
Filter: filter.NewBloomFilter(8),
DisableSeeksCompaction: true,
}
// async open
wg := sync.WaitGroup{}
for i := 0; i < diskCount; i++ {
for j := 0; j < diskShards; j++ {
shardPath := filepath.Join(savePath, fmt.Sprintf("disk%02d", i), fmt.Sprintf("block%02d", j))
dbIndex := i*diskShards + j
wg.Add(1)
go func() {
defer wg.Done()
ldb, openErr := leveldb.OpenFile(shardPath, levelDBOptions)
if openErr != nil {
err = openErr
return
}
indexByte := make([]byte, 8)
binary.BigEndian.PutUint64(indexByte, uint64(dbIndex))
inDBIndex, getErr := ldb.Get(shardIdxKey, nil)
if getErr != nil {
if getErr == leveldb.ErrNotFound {
putErr := ldb.Put(shardIdxKey, indexByte, nil)
if putErr != nil {
err = putErr
return
}
} else {
err = getErr
return
}
} else if bytes.Compare(indexByte, inDBIndex) != 0 {
err = fmt.Errorf("db shard index error, need %v, got %v", indexByte, inDBIndex)
return
}
shard.dbs[dbIndex] = ldb
}()
}
}
wg.Wait()
return shard, err
}
func (l *LeveldbShard) mapDB(key []byte) *leveldb.DB {
return l.dbs[mapDBIndex(key, l.dbCount)]
}
// Has retrieves if a key is present in the key-value data store.
func (l *LeveldbShard) Has(key []byte) (bool, error) {
return l.mapDB(key).Has(key, nil)
}
// Get retrieves the given key if it's present in the key-value data store.
func (l *LeveldbShard) Get(key []byte) ([]byte, error) {
return l.mapDB(key).Get(key, nil)
}
// Put inserts the given value into the key-value data store.
func (l *LeveldbShard) Put(key []byte, value []byte) error {
return l.mapDB(key).Put(key, value, nil)
}
// Delete removes the key from the key-value data store.
func (l *LeveldbShard) Delete(key []byte) error {
return l.mapDB(key).Delete(key, nil)
}
// NewBatch creates a write-only database that buffers changes to its host db
// until a final write is called.
func (l *LeveldbShard) NewBatch() ethdb.Batch {
return NewLeveldbShardBatch(l)
}
// NewIterator creates a binary-alphabetical iterator over the entire keyspace
// contained within the key-value database.
func (l *LeveldbShard) NewIterator() ethdb.Iterator {
return l.iterator(nil)
}
// NewIteratorWithStart creates a binary-alphabetical iterator over a subset of
// database content starting at a particular initial key (or after, if it does
// not exist).
func (l *LeveldbShard) NewIteratorWithStart(start []byte) ethdb.Iterator {
return l.iterator(&util.Range{Start: start})
}
// NewIteratorWithPrefix creates a binary-alphabetical iterator over a subset
// of database content with a particular key prefix.
func (l *LeveldbShard) NewIteratorWithPrefix(prefix []byte) ethdb.Iterator {
return l.iterator(util.BytesPrefix(prefix))
}
func (l *LeveldbShard) iterator(slice *util.Range) ethdb.Iterator {
iters := make([]iterator.Iterator, l.dbCount)
for i, db := range l.dbs {
iter := db.NewIterator(slice, nil)
iters[i] = iter
}
return iterator.NewMergedIterator(iters, comparer.DefaultComparer, true)
}
// Stat returns a particular internal stat of the database.
func (l *LeveldbShard) Stat(property string) (string, error) {
sb := strings.Builder{}
for i, db := range l.dbs {
getProperty, err := db.GetProperty(property)
if err != nil {
return "", err
}
sb.WriteString(fmt.Sprintf("=== shard %02d ===\n", i))
sb.WriteString(getProperty)
sb.WriteString("\n")
}
return sb.String(), nil
}
// Compact flattens the underlying data store for the given key range. In essence,
// deleted and overwritten versions are discarded, and the data is rearranged to
// reduce the cost of operations needed to access them.
//
// A nil start is treated as a key before all keys in the data store; a nil limit
// is treated as a key after all keys in the data store. If both is nil then it
// will compact entire data store.
func (l *LeveldbShard) Compact(start []byte, limit []byte) (err error) {
return parallelRunAndReturnErr(int(l.dbCount), func(i int) error {
return l.dbs[i].CompactRange(util.Range{Start: start, Limit: limit})
})
}
// Close all the DB
func (l *LeveldbShard) Close() error {
for _, db := range l.dbs {
err := db.Close()
if err != nil {
return err
}
}
return nil
}

@ -0,0 +1,119 @@
package leveldb_shard
import (
"runtime"
"sync"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/syndtr/goleveldb/leveldb"
)
var batchesPool = sync.Pool{
New: func() interface{} {
return &leveldb.Batch{}
},
}
type LeveldbShardBatch struct {
shard *LeveldbShard
batches []*leveldb.Batch
batchesCount uint32
}
func NewLeveldbShardBatch(shard *LeveldbShard) *LeveldbShardBatch {
shardBatch := &LeveldbShardBatch{
batches: make([]*leveldb.Batch, shard.dbCount),
batchesCount: shard.dbCount,
shard: shard,
}
for i := uint32(0); i < shard.dbCount; i++ {
shardBatch.batches[i] = batchesPool.Get().(*leveldb.Batch)
}
runtime.SetFinalizer(shardBatch, func(o *LeveldbShardBatch) {
for _, batch := range o.batches {
batch.Reset()
batchesPool.Put(batch)
}
o.batches = nil
})
return shardBatch
}
func (l *LeveldbShardBatch) mapBatch(key []byte) *leveldb.Batch {
return l.batches[mapDBIndex(key, l.batchesCount)]
}
// Put inserts the given value into the key-value data store.
func (l *LeveldbShardBatch) Put(key []byte, value []byte) error {
l.mapBatch(key).Put(key, value)
return nil
}
// Delete removes the key from the key-value data store.
func (l *LeveldbShardBatch) Delete(key []byte) error {
l.mapBatch(key).Delete(key)
return nil
}
// ValueSize retrieves the amount of data queued up for writing.
func (l *LeveldbShardBatch) ValueSize() int {
size := 0
for _, batch := range l.batches {
size += batch.Len()
}
return size
}
// Write flushes any accumulated data to disk.
func (l *LeveldbShardBatch) Write() (err error) {
return parallelRunAndReturnErr(int(l.batchesCount), func(i int) error {
return l.shard.dbs[i].Write(l.batches[i], nil)
})
}
// Reset resets the batch for reuse.
func (l *LeveldbShardBatch) Reset() {
for _, batch := range l.batches {
batch.Reset()
}
}
// Replay replays the batch contents.
func (l *LeveldbShardBatch) Replay(w ethdb.KeyValueWriter) error {
for _, batch := range l.batches {
err := batch.Replay(&replayer{writer: w})
if err != nil {
return err
}
}
return nil
}
// replayer is a small wrapper to implement the correct replay methods.
type replayer struct {
writer ethdb.KeyValueWriter
failure error
}
// Put inserts the given value into the key-value data store.
func (r *replayer) Put(key, value []byte) {
// If the replay already failed, stop executing ops
if r.failure != nil {
return
}
r.failure = r.writer.Put(key, value)
}
// Delete removes the key from the key-value data store.
func (r *replayer) Delete(key []byte) {
// If the replay already failed, stop executing ops
if r.failure != nil {
return
}
r.failure = r.writer.Delete(key)
}

@ -0,0 +1,22 @@
package local_cache
import (
"reflect"
"unsafe"
)
func String(b []byte) (s string) {
if len(b) == 0 {
return ""
}
return *(*string)(unsafe.Pointer(&b))
}
func StringBytes(s string) []byte {
var b []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&b))
hdr.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
hdr.Cap = len(s)
hdr.Len = len(s)
return b
}

@ -0,0 +1,83 @@
package local_cache
import (
"sync"
"github.com/ethereum/go-ethereum/ethdb"
)
type LocalCacheBatch struct {
db *LocalCacheDatabase
lock sync.Mutex
size int
batchWriteKey [][]byte
batchWriteValue [][]byte
batchDeleteKey [][]byte
}
func newLocalCacheBatch(db *LocalCacheDatabase) *LocalCacheBatch {
return &LocalCacheBatch{db: db}
}
func (b *LocalCacheBatch) Put(key []byte, value []byte) error {
b.lock.Lock()
defer b.lock.Unlock()
b.batchWriteKey = append(b.batchWriteKey, key)
b.batchWriteValue = append(b.batchWriteValue, value)
b.size += len(key) + len(value)
return nil
}
func (b *LocalCacheBatch) Delete(key []byte) error {
b.lock.Lock()
defer b.lock.Unlock()
b.batchDeleteKey = append(b.batchDeleteKey, key)
b.size += len(key)
return nil
}
func (b *LocalCacheBatch) ValueSize() int {
return b.size
}
func (b *LocalCacheBatch) Write() error {
b.lock.Lock()
defer b.lock.Unlock()
return b.db.batchWrite(b)
}
func (b *LocalCacheBatch) Reset() {
b.lock.Lock()
defer b.lock.Unlock()
b.batchWriteKey = b.batchWriteKey[:0]
b.batchWriteValue = b.batchWriteValue[:0]
b.batchDeleteKey = b.batchDeleteKey[:0]
b.size = 0
}
func (b *LocalCacheBatch) Replay(w ethdb.KeyValueWriter) error {
if len(b.batchWriteKey) > 0 {
for i, key := range b.batchWriteKey {
err := w.Put(key, b.batchWriteValue[i])
if err != nil {
return err
}
}
}
if len(b.batchDeleteKey) > 0 {
for _, key := range b.batchDeleteKey {
err := w.Delete(key)
if err != nil {
return err
}
}
}
return nil
}

@ -0,0 +1,121 @@
package local_cache
import (
"bytes"
"time"
"github.com/allegro/bigcache"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/harmony-one/harmony/internal/utils"
)
type cacheWrapper struct {
*bigcache.BigCache
}
type CacheConfig struct {
CacheTime time.Duration
CacheSize int
}
func (c *cacheWrapper) Put(key []byte, value []byte) error {
return c.BigCache.Set(String(key), value)
}
func (c *cacheWrapper) Delete(key []byte) error {
return c.BigCache.Delete(String(key))
}
type LocalCacheDatabase struct {
ethdb.KeyValueStore
enableReadCache bool
deleteMap map[string]bool
readCache *cacheWrapper
}
func NewLocalCacheDatabase(remoteDB ethdb.KeyValueStore, cacheConfig CacheConfig) *LocalCacheDatabase {
config := bigcache.DefaultConfig(cacheConfig.CacheTime)
config.HardMaxCacheSize = cacheConfig.CacheSize
config.MaxEntriesInWindow = cacheConfig.CacheSize * 4 * int(cacheConfig.CacheTime.Seconds())
cache, _ := bigcache.NewBigCache(config)
db := &LocalCacheDatabase{
KeyValueStore: remoteDB,
enableReadCache: true,
deleteMap: make(map[string]bool),
readCache: &cacheWrapper{cache},
}
go func() {
for range time.Tick(time.Minute) {
utils.Logger().Info().
Interface("stats", cache.Stats()).
Int("count", cache.Len()).
Int("size", cache.Capacity()).
Msg("local-cache stats")
}
}()
return db
}
func (c *LocalCacheDatabase) Has(key []byte) (bool, error) {
return c.KeyValueStore.Has(key)
}
func (c *LocalCacheDatabase) Get(key []byte) (ret []byte, err error) {
if c.enableReadCache {
if bytes.Compare(key, []byte("LastBlock")) != 0 {
strKey := String(key)
ret, err = c.readCache.Get(strKey)
if err == nil {
return ret, nil
}
defer func() {
if err == nil {
_ = c.readCache.Set(strKey, ret)
}
}()
}
}
return c.KeyValueStore.Get(key)
}
func (c *LocalCacheDatabase) Put(key []byte, value []byte) error {
if c.enableReadCache {
_ = c.readCache.Put(key, value)
}
return c.KeyValueStore.Put(key, value)
}
func (c *LocalCacheDatabase) Delete(key []byte) error {
if c.enableReadCache {
_ = c.readCache.Delete(key)
}
return c.KeyValueStore.Delete(key)
}
func (c *LocalCacheDatabase) NewBatch() ethdb.Batch {
return newLocalCacheBatch(c)
}
func (c *LocalCacheDatabase) batchWrite(b *LocalCacheBatch) error {
if c.enableReadCache {
_ = b.Replay(c.readCache)
}
batch := c.KeyValueStore.NewBatch()
err := b.Replay(batch)
if err != nil {
return err
}
return batch.Write()
}

@ -649,7 +649,7 @@ func (node *Node) StartPubSub() error {
}
allTopics = append(
allTopics, u{
NamedTopic: p2p.NamedTopic{string(key), topicHandle},
NamedTopic: p2p.NamedTopic{Name: string(key), Topic: topicHandle},
consensusBound: isCon,
},
)
@ -787,19 +787,6 @@ func (node *Node) StartPubSub() error {
nodeP2PMessageCounterVec.With(prometheus.Labels{"type": "ignored"}).Inc()
return libp2p_pubsub.ValidationReject
}
select {
case <-ctx.Done():
if errors.Is(ctx.Err(), context.DeadlineExceeded) ||
errors.Is(ctx.Err(), context.Canceled) {
utils.Logger().Warn().
Str("topic", topicNamed).Msg("[context] exceeded validation deadline")
}
errChan <- withError{errors.WithStack(ctx.Err()), nil}
default:
return libp2p_pubsub.ValidationAccept
}
return libp2p_pubsub.ValidationReject
},
// WithValidatorTimeout is an option that sets a timeout for an (asynchronous) topic validator. By default there is no timeout in asynchronous validators.
// TODO: Currently this timeout is useless. Verify me.

@ -30,7 +30,7 @@ func (gi *genesisInitializer) InitChainDB(db ethdb.Database, shardID uint32) err
if err != nil {
return errors.New("cannot find local shard in genesis")
}
shardState = &shard.State{nil, []shard.Committee{*subComm}}
shardState = &shard.State{Shards: []shard.Committee{*subComm}}
}
gi.node.SetupGenesisBlock(db, shardID, shardState)
return nil

@ -155,15 +155,14 @@ func TestVerifyVRF(t *testing.T) {
spKey := bls.SerializedPublicKey{}
spKey.FromLibBLSPublicKey(pubKey)
curNodeID := shard.Slot{
ecdsaAddr,
spKey,
nil,
EcdsaAddress: ecdsaAddr,
BLSPublicKey: spKey,
}
com.Slots = append(com.Slots, curNodeID)
shardState.Epoch = big.NewInt(1)
shardState.Shards = append(shardState.Shards, com)
node.Consensus.LeaderPubKey = &bls.PublicKeyWrapper{spKey, pubKey}
node.Consensus.LeaderPubKey = &bls.PublicKeyWrapper{Bytes: spKey, Object: pubKey}
node.Worker.GetCurrentHeader().SetEpoch(big.NewInt(1))
node.Consensus.GenerateVrfAndProof(node.Worker.GetCurrentHeader())
block, _ := node.Worker.FinalizeNewBlock(

@ -578,6 +578,28 @@ func (node *Node) CalculateResponse(request *downloader_pb.DownloaderRequest, in
Msg("[SYNC] extra node registered")
}
case downloader_pb.DownloaderRequest_BLOCKBYHEIGHT:
if len(request.Heights) == 0 {
return response, errors.New("empty heights list provided")
}
if len(request.Heights) > int(legacysync.SyncLoopBatchSize) {
return response, errors.New("exceed size limit")
}
out := make([][]byte, 0, len(request.Heights))
for _, v := range request.Heights {
block := node.Blockchain().GetBlockByNumber(v)
if block == nil {
return response, errors.Errorf("no block with height %d found", v)
}
blockBytes, err := node.getEncodedBlockWithSigByHeight(v)
if err != nil {
return response, errors.Errorf("failed to get block")
}
out = append(out, blockBytes)
}
response.Payload = out
}
return response, nil
@ -679,6 +701,26 @@ func (node *Node) getEncodedBlockWithSigByHash(hash common.Hash) ([]byte, error)
return b, nil
}
func (node *Node) getEncodedBlockWithSigByHeight(height uint64) ([]byte, error) {
blk := node.Blockchain().GetBlockByNumber(height)
if blk == nil {
return nil, errBlockNotExist
}
sab, err := node.getCommitSigAndBitmapFromChildOrDB(blk)
if err != nil {
return nil, err
}
bwh := legacysync.BlockWithSig{
Block: blk,
CommitSigAndBitmap: sab,
}
b, err := rlp.EncodeToBytes(bwh)
if err != nil {
return nil, err
}
return b, nil
}
func (node *Node) getEncodedBlockWithSigFromBlock(block *types.Block) ([]byte, error) {
bwh := legacysync.BlockWithSig{
Block: block,

@ -69,6 +69,12 @@ func (w *Worker) CommitSortedTransactions(
coinbase common.Address,
) {
for {
if w.current.gasPool.Gas() < 30000000 {
// Temporary solution to reduce the fullness of the block. Break here when the available gas left hit 30M.
// Effectively making the gas limit 50M (since 80M is the default gas limit)
utils.Logger().Info().Uint64("have", w.current.gasPool.Gas()).Uint64("want", params.TxGas).Msg("[Temp Gas Limit] Not enough gas for further transactions")
break
}
// If we don't have enough gas for any further transactions then we're done
if w.current.gasPool.Gas() < params.TxGas {
utils.Logger().Info().Uint64("have", w.current.gasPool.Gas()).Uint64("want", params.TxGas).Msg("Not enough gas for further transactions")

@ -45,6 +45,7 @@ func NewDHTDiscovery(host libp2p_host.Host, opt DHTConfig) (Discovery, error) {
ctx, cancel := context.WithCancel(context.Background())
dht, err := libp2p_dht.New(ctx, host, opts...)
if err != nil {
cancel()
return nil, err
}
d := libp2p_dis.NewRoutingDiscovery(dht)

@ -106,8 +106,10 @@ func NewHost(cfg HostConfig) (Host, error) {
libp2p.Identity(key),
libp2p.EnableNATService(),
libp2p.ForceReachabilityPublic(),
libp2p.BandwidthReporter(newCounter()),
)
if err != nil {
cancel()
return nil, errors.Wrapf(err, "cannot initialize libp2p host")
}
@ -117,6 +119,7 @@ func NewHost(cfg HostConfig) (Host, error) {
DiscConcurrency: cfg.DiscConcurrency,
})
if err != nil {
cancel()
return nil, errors.Wrap(err, "cannot create DHT discovery")
}
@ -168,6 +171,7 @@ func NewHost(cfg HostConfig) (Host, error) {
pubsub, err := libp2p_pubsub.NewGossipSub(ctx, p2pHost, options...)
if err != nil {
cancel()
return nil, errors.Wrapf(err, "cannot initialize libp2p pub-sub")
}

@ -0,0 +1,38 @@
//package p2p
package p2p
import (
eth_metrics "github.com/ethereum/go-ethereum/metrics"
"github.com/libp2p/go-libp2p-core/metrics"
)
const (
// ingressMeterName is the prefix of the per-packet inbound metrics.
ingressMeterName = "p2p/ingress"
// egressMeterName is the prefix of the per-packet outbound metrics.
egressMeterName = "p2p/egress"
)
var (
ingressTrafficMeter = eth_metrics.NewRegisteredMeter(ingressMeterName, nil)
egressTrafficMeter = eth_metrics.NewRegisteredMeter(egressMeterName, nil)
)
// Counter is a wrapper around a metrics.BandwidthCounter that meters both the
// inbound and outbound network traffic.
type Counter struct {
*metrics.BandwidthCounter
}
func newCounter() *Counter {
return &Counter{metrics.NewBandwidthCounter()}
}
func (c *Counter) LogRecvMessage(size int64) {
ingressTrafficMeter.Mark(size)
}
func (c *Counter) LogSentMessage(size int64) {
egressTrafficMeter.Mark(size)
}

@ -25,7 +25,8 @@ func TestRequestManager_Request_Normal(t *testing.T) {
defer ts.Close()
req := makeTestRequest(100)
ctx, _ := context.WithTimeout(context.Background(), 1*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
res := <-ts.rm.doRequestAsync(ctx, req)
if res.err != nil {
@ -92,7 +93,8 @@ func TestRequestManager_RemoveStream(t *testing.T) {
defer ts.Close()
req := makeTestRequest(100)
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
resC := ts.rm.doRequestAsync(ctx, req)
time.Sleep(defTestSleep)
@ -153,7 +155,8 @@ func TestRequestManager_StaleDelivery(t *testing.T) {
defer ts.Close()
req := makeTestRequest(100)
ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
resC := ts.rm.doRequestAsync(ctx, req)
time.Sleep(2 * time.Second)
@ -192,7 +195,8 @@ func TestRequestManager_cancelWaitings(t *testing.T) {
ts.Start()
defer ts.Close()
ctx1, _ := context.WithTimeout(context.Background(), 1*time.Second)
ctx1, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
ctx2, cancel2 := context.WithTimeout(context.Background(), 1*time.Second)
resC1 := ts.rm.doRequestAsync(ctx1, req1)
resC2 := ts.rm.doRequestAsync(ctx2, req2)
@ -245,7 +249,8 @@ func TestRequestManager_Close(t *testing.T) {
ts := newTestSuite(delayF, respF, 3)
ts.Start()
ctx, _ := context.WithTimeout(context.Background(), 2*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
resC := ts.rm.doRequestAsync(ctx, makeTestRequest(0))
time.Sleep(100 * time.Millisecond)
ts.Close()
@ -266,7 +271,8 @@ func TestRequestManager_Request_Blacklist(t *testing.T) {
defer ts.Close()
req := makeTestRequest(100)
ctx, _ := context.WithTimeout(context.Background(), 1*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
res := <-ts.rm.doRequestAsync(ctx, req, WithBlacklist([]sttypes.StreamID{
makeStreamID(0),
makeStreamID(1),
@ -293,7 +299,8 @@ func TestRequestManager_Request_Whitelist(t *testing.T) {
defer ts.Close()
req := makeTestRequest(100)
ctx, _ := context.WithTimeout(context.Background(), 1*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
res := <-ts.rm.doRequestAsync(ctx, req, WithWhitelist([]sttypes.StreamID{
makeStreamID(3),
}))

@ -0,0 +1,40 @@
package streammanager
import (
"container/list"
"time"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/whyrusleeping/timecache"
)
const (
coolDownPeriod = 1 * time.Minute
)
type coolDownCache struct {
timeCache *timecache.TimeCache
}
func newCoolDownCache() *coolDownCache {
tl := timecache.NewTimeCache(coolDownPeriod)
return &coolDownCache{
timeCache: tl,
}
}
// Has check and add the peer ID to the cache
func (cache *coolDownCache) Has(id peer.ID) bool {
has := cache.timeCache.Has(string(id))
if !has {
cache.timeCache.Add(string(id))
}
return has
}
// Reset the cool down cache
func (cache *coolDownCache) Reset() {
cache.timeCache.Q = list.New()
cache.timeCache.M = make(map[string]time.Time)
}

@ -6,15 +6,15 @@ import (
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/ethereum/go-ethereum/event"
"github.com/harmony-one/abool"
"github.com/harmony-one/harmony/internal/utils"
sttypes "github.com/harmony-one/harmony/p2p/stream/types"
"github.com/libp2p/go-libp2p-core/network"
libp2p_peer "github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/protocol"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/rs/zerolog"
)
@ -49,7 +49,9 @@ type streamManager struct {
stopCh chan stopTask
discCh chan discTask
curTask interface{}
coolDown *abool.AtomicBool
// utils
coolDownCache *coolDownCache
addStreamFeed event.Feed
removeStreamFeed event.Feed
logger zerolog.Logger
@ -72,21 +74,22 @@ func newStreamManager(pid sttypes.ProtoID, host host, pf peerFinder, handleStrea
protoSpec, _ := sttypes.ProtoIDToProtoSpec(pid)
return &streamManager{
myProtoID: pid,
myProtoSpec: protoSpec,
config: c,
streams: newStreamSet(),
host: host,
pf: pf,
handleStream: handleStream,
addStreamCh: make(chan addStreamTask),
rmStreamCh: make(chan rmStreamTask),
stopCh: make(chan stopTask),
discCh: make(chan discTask, 1), // discCh is a buffered channel to avoid overuse of goroutine
logger: logger,
ctx: ctx,
cancel: cancel,
myProtoID: pid,
myProtoSpec: protoSpec,
config: c,
streams: newStreamSet(),
host: host,
pf: pf,
handleStream: handleStream,
addStreamCh: make(chan addStreamTask),
rmStreamCh: make(chan rmStreamTask),
stopCh: make(chan stopTask),
discCh: make(chan discTask, 1), // discCh is a buffered channel to avoid overuse of goroutine
coolDown: abool.New(),
coolDownCache: newCoolDownCache(),
logger: logger,
ctx: ctx,
cancel: cancel,
}
}
@ -121,16 +124,27 @@ func (sm *streamManager) loop() {
}
case <-sm.discCh:
// cancel last discovery
if sm.coolDown.IsSet() {
sm.logger.Info().Msg("skipping discover for cool down")
continue
}
if discCancel != nil {
discCancel()
discCancel() // cancel last discovery
}
discCtx, discCancel = context.WithCancel(sm.ctx)
go func() {
err := sm.discoverAndSetupStream(discCtx)
discovered, err := sm.discoverAndSetupStream(discCtx)
if err != nil {
sm.logger.Err(err)
}
if discovered == 0 {
// start discover cool down
sm.coolDown.Set()
go func() {
time.Sleep(coolDownPeriod)
sm.coolDown.UnSet()
}()
}
}()
case addStream := <-sm.addStreamCh:
@ -142,6 +156,9 @@ func (sm *streamManager) loop() {
rmStream.errC <- err
case stop := <-sm.stopCh:
if discCancel != nil {
discCancel()
}
sm.cancel()
sm.removeAllStreamOnClose()
stop.done <- struct{}{}
@ -278,18 +295,21 @@ func (sm *streamManager) removeAllStreamOnClose() {
sm.streams = newStreamSet()
}
func (sm *streamManager) discoverAndSetupStream(discCtx context.Context) error {
func (sm *streamManager) discoverAndSetupStream(discCtx context.Context) (int, error) {
peers, err := sm.discover(discCtx)
if err != nil {
return errors.Wrap(err, "failed to discover")
return 0, errors.Wrap(err, "failed to discover")
}
discoverCounterVec.With(prometheus.Labels{"topic": string(sm.myProtoID)}).Inc()
connecting := 0
for peer := range peers {
if peer.ID == sm.host.ID() {
if peer.ID == sm.host.ID() || sm.coolDownCache.Has(peer.ID) {
// If the peer has the same ID and was just connected, skip.
continue
}
discoveredPeersCounterVec.With(prometheus.Labels{"topic": string(sm.myProtoID)}).Inc()
connecting += 1
go func(pid libp2p_peer.ID) {
// The ctx here is using the module context instead of discover context
err := sm.setupStreamWithPeer(sm.ctx, pid)
@ -299,7 +319,7 @@ func (sm *streamManager) discoverAndSetupStream(discCtx context.Context) error {
}
}(peer.ID)
}
return nil
return connecting, nil
}
func (sm *streamManager) discover(ctx context.Context) (<-chan libp2p_peer.AddrInfo, error) {
@ -312,8 +332,12 @@ func (sm *streamManager) discover(ctx context.Context) (<-chan libp2p_peer.AddrI
return nil, nil
}
ctx, _ = context.WithTimeout(ctx, discTimeout)
return sm.pf.FindPeers(ctx, protoID, discBatch)
ctx2, cancel := context.WithTimeout(ctx, discTimeout)
go func() { // avoid context leak
<-time.After(discTimeout)
cancel()
}()
return sm.pf.FindPeers(ctx2, protoID, discBatch)
}
func (sm *streamManager) setupStreamWithPeer(ctx context.Context, pid libp2p_peer.ID) error {

@ -44,12 +44,12 @@ func NewBaseStream(st libp2p_network.Stream) *BaseStream {
}
// StreamID is the unique identifier for the stream. It has the value of
// libp2p_network.Stream.ID()
// libp2p_network_peer.ID
type StreamID string
// Meta return the StreamID of the stream
// ID return the StreamID of the stream
func (st *BaseStream) ID() StreamID {
return StreamID(st.raw.Conn().ID())
return StreamID(st.raw.Conn().RemotePeer().String())
}
// ProtoID return the remote protocol ID of the stream

@ -32,10 +32,10 @@ const (
var (
// ReadTimeout ..
ReadTimeout = 30 * time.Second
ReadTimeout = 60 * time.Second
// WriteTimeout ..
WriteTimeout = 30 * time.Second
WriteTimeout = 60 * time.Second
// IdleTimeout ..
IdleTimeout = 120 * time.Second

@ -24,23 +24,25 @@ RUN go mod tidy
RUN make linux_static && \
cp ./bin/harmony /root/harmony && \
cp ./rosetta/infra/run.sh /root/run.sh
cp ./rosetta/infra/run.sh /root/run.sh && \
cp ./rosetta/infra/rclone.conf /root/rclone.conf
RUN cp ./rosetta/infra/harmony-pstn.conf /root/harmony-pstn.conf && \
cp ./rosetta/infra/harmony-mainnet.conf /root/harmony-mainnet.conf && \
cp ./.hmy/rosetta_local_fix.csv /root/rosetta_local_fix.csv
# Execution
FROM ubuntu:latest
FROM ubuntu:20.04
RUN apt update -y && \
apt install libgmp-dev libssl-dev -y && \
apt -y clean all
apt install libgmp-dev libssl-dev ca-certificates rclone -y && \
apt -y clean all \
WORKDIR /root
COPY --from=build /root/harmony /root/harmony
COPY --from=build /root/run.sh /root/run.sh
COPY --from=build /root/rclone.conf /root/.config/rclone/rclone.conf
COPY --from=build /root/harmony-pstn.conf /root/harmony-pstn.conf
COPY --from=build /root/harmony-mainnet.conf /root/harmony-mainnet.conf
COPY --from=build /root/rosetta_local_fix.csv /root/rosetta_local_fix.csv

@ -0,0 +1,29 @@
version: "2"
services:
online-node:
build:
context: .
dockerfile: Dockerfile
privileged: true
ports:
- "8080:9700"
environment:
- "MODE=online"
- "NETWORK=testnet"
volumes:
- "./data:/root/data/"
labels:
service_group: rosetta
offline-node:
build:
context: .
dockerfile: Dockerfile
privileged: true
ports:
- "8081:9700"
environment:
- "MODE=offline"
- "NETWORK=testnet"
labels:
service_group: rosetta

@ -0,0 +1,29 @@
version: "2"
services:
online-node:
build:
context: .
dockerfile: Dockerfile
privileged: true
ports:
- "8080:9700"
environment:
- "MODE=online"
- "NETWORK=mainnet-22816573"
volumes:
- "./data:/root/data/"
labels:
service_group: rosetta
offline-node:
build:
context: .
dockerfile: Dockerfile
privileged: true
ports:
- "8081:9700"
environment:
- "MODE=offline"
- "NETWORK=mainnet"
labels:
service_group: rosetta

@ -92,6 +92,13 @@ Version = "2.5.1"
RosettaFixFile = "./rosetta_local_fix.csv"
AccountSlots = 16
[ShardData]
EnableShardData = true
DiskCount = 8
ShardCount = 4
CacheTime = 10
CacheSize = 512
[WS]
AuthPort = 9801
Enabled = true

@ -91,6 +91,13 @@ Version = "2.5.1"
BlacklistFile = "./.hmy/blacklist.txt"
AccountSlots = 16
[ShardData]
EnableShardData = false
DiskCount = 8
ShardCount = 4
CacheTime = 10
CacheSize = 512
[WS]
AuthPort = 9801
Enabled = true

@ -0,0 +1,8 @@
[release]
type = s3
provider = AWS
env_auth = false
region = us-west-1
acl = public-read
server_side_encryption = AES256
storage_class = REDUCED_REDUNDANCY

@ -1,12 +1,38 @@
#!/usr/bin/env bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
DATA="$DIR/data"
LOGS="$DATA/logs"
BASE_ARGS=(--http.ip "0.0.0.0" --ws.ip "0.0.0.0" --http.rosetta --node_type "explorer" --datadir "$DATA" --log.dir "$LOGS")
DATA_NAME="${DATA_NAME:=harmony_sharddb_0}"
MAINNET_22816573_SNAPSHOT="release:pub.harmony.one/mainnet.min.22816573/harmony_sharddb_0"
case "$NETWORK" in
mainnet)
CONFIG_PATH="-c /root/harmony-mainnet.conf"
;;
mainnet-22816573)
CONFIG_PATH="-c /root/harmony-mainnet.conf"
rclone -P -L sync $MAINNET_22816573_SNAPSHOT $DATA/$DATA_NAME --transfers=64
;;
testnet)
CONFIG_PATH="-c /root/harmony-pstn.conf"
;;
*)
echo "unknown network"
exit 1
;;
esac
if [ "$MODE" = "offline" ]; then
BASE_ARGS=(--datadir "$DATA" --log.dir "$LOGS" --run.offline)
else
BASE_ARGS=(--datadir "$DATA" --log.dir "$LOGS")
fi
mkdir -p "$LOGS"
echo -e NODE ARGS: \" "$@" "${BASE_ARGS[@]}" \"
echo "NODE VERSION: $(./harmony --version)"
"$DIR/harmony" "$@" "${BASE_ARGS[@]}"
echo -e NODE ARGS: \" $CONFIG_PATH "$@" "${BASE_ARGS[@]}" \"
echo "NODE VERSION: $($DIR/harmony --version)"
"$DIR/harmony" $CONFIG_PATH "$@" "${BASE_ARGS[@]}"

@ -164,10 +164,10 @@ func (s *BlockAPI) BlockTransaction(
}
return response, rosettaError2
}
state, _, err := s.hmy.StateAndHeaderByNumber(ctx, rpc.BlockNumber(request.BlockIdentifier.Index))
state, _, err := s.hmy.StateAndHeaderByNumber(ctx, rpc.BlockNumber(blk.NumberU64()))
if state == nil || err != nil {
return nil, common.NewError(common.BlockNotFoundError, map[string]interface{}{
"message": fmt.Sprintf("block state not found for block %v", request.BlockIdentifier.Index),
"message": fmt.Sprintf("block state not found for block %v", blk.NumberU64()),
})
}

@ -19,7 +19,7 @@ import (
const (
// DefaultGasPrice ..
DefaultGasPrice = denominations.Nano
DefaultGasPrice = 30 * denominations.Nano
)
// ConstructAPI implements the server.ConstructAPIServicer interface.

@ -112,9 +112,9 @@ func unpackWrappedTransactionFromString(
Details: createValidatorMsg.Details,
},
CommissionRates: stakingTypes.CommissionRates{
Rate: numeric.Dec{createValidatorMsg.CommissionRate},
MaxRate: numeric.Dec{createValidatorMsg.MaxCommissionRate},
MaxChangeRate: numeric.Dec{createValidatorMsg.MaxChangeRate},
Rate: numeric.Dec{Int: createValidatorMsg.CommissionRate},
MaxRate: numeric.Dec{Int: createValidatorMsg.MaxCommissionRate},
MaxChangeRate: numeric.Dec{Int: createValidatorMsg.MaxChangeRate},
},
MinSelfDelegation: createValidatorMsg.MinSelfDelegation,
MaxTotalDelegation: createValidatorMsg.MaxTotalDelegation,
@ -156,7 +156,7 @@ func unpackWrappedTransactionFromString(
SecurityContact: editValidatorMsg.SecurityContact,
Details: editValidatorMsg.Details,
},
CommissionRate: &numeric.Dec{editValidatorMsg.CommissionRate},
CommissionRate: &numeric.Dec{Int: editValidatorMsg.CommissionRate},
MinSelfDelegation: editValidatorMsg.MinSelfDelegation,
MaxTotalDelegation: editValidatorMsg.MaxTotalDelegation,
SlotKeyToAdd: slotKeyToAdd.(*bls.SerializedPublicKey),

@ -480,9 +480,9 @@ func stakingCreateValidatorTransaction(key *ecdsa.PrivateKey) (*stakingTypes.Sta
Details: "Don't mess with me!!!",
},
CommissionRates: stakingTypes.CommissionRates{
Rate: numeric.Dec{new(big.Int).SetUint64(100000000000000000)},
MaxRate: numeric.Dec{new(big.Int).SetUint64(900000000000000000)},
MaxChangeRate: numeric.Dec{new(big.Int).SetUint64(50000000000000000)},
Rate: numeric.Dec{Int: new(big.Int).SetUint64(100000000000000000)},
MaxRate: numeric.Dec{Int: new(big.Int).SetUint64(900000000000000000)},
MaxChangeRate: numeric.Dec{Int: new(big.Int).SetUint64(50000000000000000)},
},
MinSelfDelegation: new(big.Int).Mul(new(big.Int).SetInt64(10), big.NewInt(1e18)),
MaxTotalDelegation: new(big.Int).Mul(new(big.Int).SetInt64(3000), big.NewInt(1e18)),
@ -531,7 +531,7 @@ func stakingEditValidatorTransaction(key *ecdsa.PrivateKey) (*stakingTypes.Staki
SecurityContact: "Bob",
Details: "Don't mess with me!!!",
},
CommissionRate: &numeric.Dec{new(big.Int).SetUint64(100000000000000000)},
CommissionRate: &numeric.Dec{Int: new(big.Int).SetUint64(100000000000000000)},
MinSelfDelegation: new(big.Int).Mul(new(big.Int).SetInt64(10), big.NewInt(1e18)),
MaxTotalDelegation: new(big.Int).Mul(new(big.Int).SetInt64(3000), big.NewInt(1e18)),
SlotKeyToRemove: &slotKeyToRemove,

@ -215,9 +215,9 @@ func constructCreateValidatorTransaction(
Details: createValidatorMsg.Details,
},
CommissionRates: types2.CommissionRates{
Rate: numeric.Dec{createValidatorMsg.CommissionRate},
MaxRate: numeric.Dec{createValidatorMsg.MaxCommissionRate},
MaxChangeRate: numeric.Dec{createValidatorMsg.MaxChangeRate},
Rate: numeric.Dec{Int: createValidatorMsg.CommissionRate},
MaxRate: numeric.Dec{Int: createValidatorMsg.MaxCommissionRate},
MaxChangeRate: numeric.Dec{Int: createValidatorMsg.MaxChangeRate},
},
MinSelfDelegation: new(big.Int).Mul(createValidatorMsg.MinSelfDelegation, big.NewInt(1e18)),
MaxTotalDelegation: new(big.Int).Mul(createValidatorMsg.MaxTotalDelegation, big.NewInt(1e18)),
@ -286,7 +286,7 @@ func constructEditValidatorTransaction(
SecurityContact: editValidatorMsg.SecurityContact,
Details: editValidatorMsg.Details,
},
CommissionRate: &numeric.Dec{editValidatorMsg.CommissionRate},
CommissionRate: &numeric.Dec{Int: editValidatorMsg.CommissionRate},
MinSelfDelegation: new(big.Int).Mul(editValidatorMsg.MinSelfDelegation, big.NewInt(1e18)),
MaxTotalDelegation: new(big.Int).Mul(editValidatorMsg.MaxTotalDelegation, big.NewInt(1e18)),
SlotKeyToAdd: &slotKeyToAdd,

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save