Suggestion 4: Add Fault Tests (#58)

* Drop tests
pull/61/head
Vuk Gavrilovic 2 years ago committed by GitHub
parent d6431af3ac
commit 503b703b40
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 217
      core/drop_test.go
  2. 29
      core/helpers_test.go

@ -1,19 +1,216 @@
package core package core
import ( import (
"bytes"
"github.com/0xPolygon/go-ibft/messages"
"github.com/0xPolygon/go-ibft/messages/proto"
"math/rand"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestDropAllAndRecover(t *testing.T) {
t.Parallel()
var (
numNodes = uint64(6)
insertedBlocks = make([][]byte, numNodes)
)
cluster := newCluster(
numNodes,
func(c *cluster) {
for nodeIndex, node := range c.nodes {
i := nodeIndex
currentNode := node
node.core = NewIBFT(
mockLogger{},
&mockBackend{
isValidBlockFn: isValidProposal,
isValidProposalHashFn: isValidProposalHash,
isValidSenderFn: nil,
isValidCommittedSealFn: nil,
isProposerFn: c.isProposer,
idFn: node.addr,
buildProposalFn: buildValidProposal,
buildPrePrepareMessageFn: node.buildPrePrepare,
buildPrepareMessageFn: node.buildPrepare,
buildCommitMessageFn: node.buildCommit,
buildRoundChangeMessageFn: node.buildRoundChange,
insertBlockFn: func(proposal []byte, _ []*messages.CommittedSeal) {
insertedBlocks[i] = proposal
},
hasQuorumFn: c.hasQuorumFn,
},
&mockTransport{multicastFn: func(message *proto.Message) {
if currentNode.offline {
return
}
c.gossip(message)
}},
)
}
},
)
// Progress the chain to claim it works ok by default
err := cluster.progressToHeight(5*time.Second, 1)
assert.NoError(t, err, "unable to reach height: %w", err)
assert.Equal(t, uint64(1), cluster.latestHeight)
assertValidInsertedBlocks(t, insertedBlocks) // Make sure the inserted blocks are valid
insertedBlocks = make([][]byte, numNodes) // Purge
// Stop all nodes and make sure no blocks are written
cluster.stopN(len(cluster.nodes))
cluster.progressToHeight(5*time.Second, 2)
assertNInsertedBlocks(t, 0, insertedBlocks)
// Start all and expect valid blocks to be written again
cluster.startN(len(cluster.nodes))
cluster.progressToHeight(5*time.Second, 10)
assertValidInsertedBlocks(t, insertedBlocks) // Make sure the inserted blocks are valid
}
func assertNInsertedBlocks(t *testing.T, n int, blocks [][]byte) {
t.Helper()
writtenBlocks := 0
for _, block := range blocks {
if !bytes.Equal(block, nil) {
writtenBlocks++
}
}
assert.True(t, n == writtenBlocks)
}
func assertValidInsertedBlocks(t *testing.T, blocks [][]byte) {
t.Helper()
for _, block := range blocks {
assert.True(t, bytes.Equal(block, validProposal))
}
}
func TestMaxFaultyDroppingMessages(t *testing.T) {
t.Parallel()
cluster := newCluster(
6,
func(c *cluster) {
for _, node := range c.nodes {
currentNode := node
node.core = NewIBFT(
mockLogger{},
&mockBackend{
isValidBlockFn: isValidProposal,
isValidProposalHashFn: isValidProposalHash,
isValidSenderFn: nil,
isValidCommittedSealFn: nil,
isProposerFn: c.isProposer,
idFn: node.addr,
buildProposalFn: buildValidProposal,
buildPrePrepareMessageFn: node.buildPrePrepare,
buildPrepareMessageFn: node.buildPrepare,
buildCommitMessageFn: node.buildCommit,
buildRoundChangeMessageFn: node.buildRoundChange,
insertBlockFn: nil,
hasQuorumFn: c.hasQuorumFn,
},
&mockTransport{multicastFn: func(message *proto.Message) {
if currentNode.faulty && rand.Intn(100) < 50 {
return
}
c.gossip(message)
}},
)
}
},
)
cluster.makeNFaulty(int(cluster.maxFaulty()))
assert.NoError(t, cluster.progressToHeight(40*time.Second, 5))
assert.Equal(t, uint64(5), cluster.latestHeight)
}
func TestAllFailAndGraduallyRecover(t *testing.T) {
t.Parallel()
var (
numNodes = uint64(6)
insertedBlocks = make([][]byte, numNodes)
)
cluster := newCluster(
numNodes,
func(c *cluster) {
for nodeIndex, node := range c.nodes {
nodeIndex := nodeIndex
currentNode := node
node.core = NewIBFT(
mockLogger{},
&mockBackend{
isValidBlockFn: isValidProposal,
isValidProposalHashFn: isValidProposalHash,
isValidSenderFn: nil,
isValidCommittedSealFn: nil,
isProposerFn: c.isProposer,
idFn: node.addr,
buildProposalFn: buildValidProposal,
buildPrePrepareMessageFn: node.buildPrePrepare,
buildPrepareMessageFn: node.buildPrepare,
buildCommitMessageFn: node.buildCommit,
buildRoundChangeMessageFn: node.buildRoundChange,
insertBlockFn: func(proposal []byte, _ []*messages.CommittedSeal) {
insertedBlocks[nodeIndex] = proposal
},
hasQuorumFn: c.hasQuorumFn,
},
&mockTransport{multicastFn: func(msg *proto.Message) {
if !currentNode.offline {
for _, node := range c.nodes {
node.core.AddMessage(msg)
}
}
}},
)
}
},
)
// Start the main run loops
cluster.runGradualSequence(1, 10*time.Second)
// Wait until the main run loops finish
cluster.wg.Wait()
// Make sure the inserted blocks match what node 0 proposed
for _, block := range insertedBlocks {
assert.True(t, bytes.Equal(block, validProposal))
}
}
/* /*
Scenario: Scenario:
1. Cluster can reach height 5 1. Cluster can reach height 5
2. Stop MaxFaulty+1 nodes 2. Stop MaxFaulty+1 nodes
3. Cluster cannot reach height 10 3. Cluster cannot reach height 10
4. Start MaxFaulty+1 nodes 4. Start MaxFaulty+1 nodes
5. Cluster can reach height 10 5. Cluster can reach height 10
*/ */
func TestDropMaxFaultyPlusOne(t *testing.T) { func TestDropMaxFaultyPlusOne(t *testing.T) {
t.Parallel() t.Parallel()
@ -68,10 +265,10 @@ func TestDropMaxFaultyPlusOne(t *testing.T) {
} }
/* /*
Scenario: Scenario:
1. Cluster can reach height 5 1. Cluster can reach height 5
2. Stop MaxFaulty nodes 2. Stop MaxFaulty nodes
3. Cluster can still reach height 10 3. Cluster can still reach height 10
*/ */
func TestDropMaxFaulty(t *testing.T) { func TestDropMaxFaulty(t *testing.T) {
t.Parallel() t.Parallel()

@ -4,6 +4,7 @@ import (
"bytes" "bytes"
"context" "context"
"errors" "errors"
"math/rand"
"sync" "sync"
"time" "time"
@ -40,6 +41,7 @@ type node struct {
core *IBFT core *IBFT
address []byte address []byte
offline bool offline bool
faulty bool
byzantine bool byzantine bool
} }
@ -131,6 +133,27 @@ func newCluster(num uint64, init func(*cluster)) *cluster {
return c return c
} }
func (c *cluster) runGradualSequence(height uint64, timeout time.Duration) {
ctx, _ := context.WithTimeout(context.Background(), timeout)
for nodeIndex, n := range c.nodes {
c.wg.Add(1)
go func(ctx context.Context, ordinal int, node *node) {
// Start the main run loop for the node
runDelay := ordinal * rand.Intn(1000)
select {
case <-ctx.Done():
case <-time.After(time.Duration(runDelay) * time.Millisecond):
node.core.RunSequence(ctx, height)
}
c.wg.Done()
}(ctx, nodeIndex+1, n)
}
}
func (c *cluster) runSequence(ctx context.Context, height uint64) <-chan struct{} { func (c *cluster) runSequence(ctx context.Context, height uint64) <-chan struct{} {
done := make(chan struct{}) done := make(chan struct{})
@ -224,6 +247,12 @@ func (c *cluster) makeNByzantine(num int) {
} }
} }
func (c *cluster) makeNFaulty(num int) {
for i := 0; i < num; i++ {
c.nodes[i].faulty = true
}
}
func (c *cluster) stopN(num int) { func (c *cluster) stopN(num int) {
for i := 0; i < num; i++ { for i := 0; i < num; i++ {
c.nodes[i].offline = true c.nodes[i].offline = true

Loading…
Cancel
Save