diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index e9b6fbe482..d5f0f63c37 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -493,7 +493,7 @@ func (app *Application) LoadSnapshotChunk(_ context.Context, req *abci.RequestLo app.mu.Lock() defer app.mu.Unlock() - chunk, err := app.snapshots.LoadChunk(req.Height, req.Format, req.Chunk) + chunk, err := app.snapshots.LoadChunk(req.Height, req.Version, req.ChunkId) if err != nil { return &abci.ResponseLoadSnapshotChunk{}, err } @@ -523,7 +523,11 @@ func (app *Application) ApplySnapshotChunk(_ context.Context, req *abci.RequestA if app.offerSnapshot == nil { return &abci.ResponseApplySnapshotChunk{}, fmt.Errorf("no restore in progress") } - app.offerSnapshot.addChunk(int(req.Index), req.Chunk) + + resp := &abci.ResponseApplySnapshotChunk{ + Result: abci.ResponseApplySnapshotChunk_ACCEPT, + NextChunks: app.offerSnapshot.addChunk(req.ChunkId, req.Chunk), + } if app.offerSnapshot.isFull() { chunks := app.offerSnapshot.bytes() @@ -538,11 +542,10 @@ func (app *Application) ApplySnapshotChunk(_ context.Context, req *abci.RequestA "snapshot_height", app.offerSnapshot.snapshot.Height, "snapshot_apphash", app.offerSnapshot.appHash, ) + resp.Result = abci.ResponseApplySnapshotChunk_COMPLETE_SNAPSHOT app.offerSnapshot = nil } - resp := &abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT} - app.logger.Debug("ApplySnapshotChunk", "resp", resp) return resp, nil } @@ -556,7 +559,9 @@ func (app *Application) createSnapshot() error { if err != nil { return fmt.Errorf("create snapshot: %w", err) } - app.logger.Info("created state sync snapshot", "height", height, "apphash", app.LastCommittedState.GetAppHash()) + app.logger.Info("created state sync snapshot", + "height", height, + "apphash", app.LastCommittedState.GetAppHash()) err = app.snapshots.Prune(maxSnapshotCount) if err != nil { return fmt.Errorf("prune snapshots: %w", err) diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 14a36d4a3c..caee1a4a4c 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -493,24 +493,20 @@ func TestSnapshots(t *testing.T) { }) require.NoError(t, err) assert.Equal(t, types.ResponseOfferSnapshot_ACCEPT, respOffer.Result) + loaded, err := app.LoadSnapshotChunk(ctx, &types.RequestLoadSnapshotChunk{ + Height: recentSnapshot.Height, + ChunkId: recentSnapshot.Hash, + Version: recentSnapshot.Version, + }) + require.NoError(t, err) - for chunk := uint32(0); chunk < recentSnapshot.Chunks; chunk++ { - loaded, err := app.LoadSnapshotChunk(ctx, &types.RequestLoadSnapshotChunk{ - Height: recentSnapshot.Height, - Chunk: chunk, - Format: recentSnapshot.Format, - }) - require.NoError(t, err) - - applied, err := dstApp.ApplySnapshotChunk(ctx, &types.RequestApplySnapshotChunk{ - Index: chunk, - Chunk: loaded.Chunk, - Sender: "app", - }) - require.NoError(t, err) - assert.Equal(t, types.ResponseApplySnapshotChunk_ACCEPT, applied.Result) - } - + applied, err := dstApp.ApplySnapshotChunk(ctx, &types.RequestApplySnapshotChunk{ + ChunkId: recentSnapshot.Hash, + Chunk: loaded.Chunk, + Sender: "app", + }) + require.NoError(t, err) + assert.Equal(t, types.ResponseApplySnapshotChunk_COMPLETE_SNAPSHOT, applied.Result) infoResp, err := dstApp.Info(ctx, &types.RequestInfo{}) require.NoError(t, err) assertRespInfo(t, int64(recentSnapshot.Height), appHashes[snapshotHeight], *infoResp) diff --git a/abci/example/kvstore/snapshots.go b/abci/example/kvstore/snapshots.go index 919f640d1d..375ef89771 100644 --- a/abci/example/kvstore/snapshots.go +++ b/abci/example/kvstore/snapshots.go @@ -3,10 +3,10 @@ package kvstore import ( "bytes" + "encoding/hex" "encoding/json" "errors" "fmt" - "math" "os" "path/filepath" @@ -15,6 +15,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" tmbytes "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/libs/ds" ) const ( @@ -27,11 +28,17 @@ const ( // SnapshotStore stores state sync snapshots. Snapshots are stored simply as // JSON files, and chunks are generated on-the-fly by splitting the JSON data // into fixed-size chunks. -type SnapshotStore struct { - sync.RWMutex - dir string - metadata []abci.Snapshot -} +type ( + SnapshotStore struct { + sync.RWMutex + dir string + metadata []abci.Snapshot + } + chunkItem struct { + Data []byte `json:"data"` + NextChunkIDs [][]byte `json:"nextChunkIDs"` + } +) // NewSnapshotStore creates a new snapshot store. func NewSnapshotStore(dir string) (*SnapshotStore, error) { @@ -49,7 +56,7 @@ func NewSnapshotStore(dir string) (*SnapshotStore, error) { // called internally on construction. func (s *SnapshotStore) loadMetadata() error { file := filepath.Join(s.dir, "metadata.json") - metadata := []abci.Snapshot{} + var metadata []abci.Snapshot bz, err := os.ReadFile(file) switch { @@ -96,10 +103,9 @@ func (s *SnapshotStore) Create(state State) (abci.Snapshot, error) { } height := state.GetHeight() snapshot := abci.Snapshot{ - Height: uint64(height), - Format: 1, - Hash: crypto.Checksum(bz), - Chunks: byteChunks(bz), + Height: uint64(height), + Version: 1, + Hash: crypto.Checksum(bz), } err = os.WriteFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", height)), bz, 0644) if err != nil { @@ -152,16 +158,18 @@ func (s *SnapshotStore) List() ([]*abci.Snapshot, error) { } // LoadChunk loads a snapshot chunk. -func (s *SnapshotStore) LoadChunk(height uint64, format uint32, chunk uint32) ([]byte, error) { +func (s *SnapshotStore) LoadChunk(height uint64, version uint32, chunkID []byte) ([]byte, error) { s.RLock() defer s.RUnlock() for _, snapshot := range s.metadata { - if snapshot.Height == height && snapshot.Format == format { - bz, err := os.ReadFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", height))) + if snapshot.Height == height && snapshot.Version == version { + bz, err := os.ReadFile(filepath.Join(s.dir, fmt.Sprintf("%d.json", height))) if err != nil { return nil, err } - return byteChunk(bz, chunk), nil + chunks := makeChunks(bz, snapshotChunkSize) + item := makeChunkItem(chunks, chunkID) + return json.Marshal(item) } } return nil, nil @@ -170,54 +178,79 @@ func (s *SnapshotStore) LoadChunk(height uint64, format uint32, chunk uint32) ([ type offerSnapshot struct { snapshot *abci.Snapshot appHash tmbytes.HexBytes - chunks [][]byte - chunkCnt int + chunks *ds.OrderedMap[string, []byte] } func newOfferSnapshot(snapshot *abci.Snapshot, appHash tmbytes.HexBytes) *offerSnapshot { return &offerSnapshot{ snapshot: snapshot, appHash: appHash, - chunks: make([][]byte, snapshot.Chunks), - chunkCnt: 0, + chunks: ds.NewOrderedMap[string, []byte](), } } -func (s *offerSnapshot) addChunk(index int, chunk []byte) { - if s.chunks[index] != nil { - return +func (s *offerSnapshot) addChunk(chunkID tmbytes.HexBytes, data []byte) [][]byte { + chunkIDStr := chunkID.String() + if s.chunks.Has(chunkIDStr) { + return nil } - s.chunks[index] = chunk - s.chunkCnt++ + var item chunkItem + err := json.Unmarshal(data, &item) + if err != nil { + panic("failed to decode a chunk data: " + err.Error()) + } + s.chunks.Put(chunkIDStr, item.Data) + return item.NextChunkIDs } func (s *offerSnapshot) isFull() bool { - return s.chunkCnt == int(s.snapshot.Chunks) + return bytes.Equal(crypto.Checksum(s.bytes()), s.snapshot.Hash) } func (s *offerSnapshot) bytes() []byte { + chunks := s.chunks.Values() buf := bytes.NewBuffer(nil) - for _, chunk := range s.chunks { + for _, chunk := range chunks { buf.Write(chunk) } return buf.Bytes() } -// byteChunk returns the chunk at a given index from the full byte slice. -func byteChunk(bz []byte, index uint32) []byte { - start := int(index * snapshotChunkSize) - end := int((index + 1) * snapshotChunkSize) - switch { - case start >= len(bz): - return nil - case end >= len(bz): - return bz[start:] - default: - return bz[start:end] +// makeChunkItem returns the chunk at a given index from the full byte slice. +func makeChunkItem(chunks *ds.OrderedMap[string, []byte], chunkID []byte) chunkItem { + chunkIDStr := hex.EncodeToString(chunkID) + val, ok := chunks.Get(chunkIDStr) + if !ok { + panic("chunk not found") } + chunkIDs := chunks.Keys() + ci := chunkItem{Data: val} + i := 0 + for ; i < len(chunkIDs) && chunkIDs[i] != chunkIDStr; i++ { + } + if i+1 < len(chunkIDs) { + data, err := hex.DecodeString(chunkIDs[i+1]) + if err != nil { + panic(err) + } + ci.NextChunkIDs = [][]byte{data} + } + return ci } -// byteChunks calculates the number of chunks in the byte slice. -func byteChunks(bz []byte) uint32 { - return uint32(math.Ceil(float64(len(bz)) / snapshotChunkSize)) +func makeChunks(bz []byte, chunkSize int) *ds.OrderedMap[string, []byte] { + chunks := ds.NewOrderedMap[string, []byte]() + totalHash := hex.EncodeToString(crypto.Checksum(bz)) + key := totalHash + for i := 0; i < len(bz); i += chunkSize { + j := i + chunkSize + if j > len(bz) { + j = len(bz) + } + if i > 1 { + key = hex.EncodeToString(crypto.Checksum(bz[i:j])) + } + chunks.Put(key, append([]byte(nil), bz[i:j]...)) + } + return chunks } diff --git a/abci/example/kvstore/snapshots_test.go b/abci/example/kvstore/snapshots_test.go new file mode 100644 index 0000000000..2b75b9b391 --- /dev/null +++ b/abci/example/kvstore/snapshots_test.go @@ -0,0 +1,37 @@ +package kvstore + +import ( + "encoding/hex" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestChunkItem(t *testing.T) { + const size = 64 + chunks := makeChunks(makeBytes(1032), size) + keys := chunks.Keys() + values := chunks.Values() + for i, key := range keys { + chunkID, err := hex.DecodeString(key) + require.NoError(t, err) + item := makeChunkItem(chunks, chunkID) + require.Equal(t, values[i], item.Data) + if i+1 < len(keys) { + nextChunkID, err := hex.DecodeString(keys[i+1]) + require.NoError(t, err) + require.Equal(t, [][]byte{nextChunkID}, item.NextChunkIDs) + } else { + require.Nil(t, item.NextChunkIDs) + } + } +} + +func makeBytes(size int) []byte { + bz := make([]byte, size) + for i := 0; i < size; i++ { + bz[i] = byte(rand.Int63n(256)) + } + return bz +} diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index e6b831ead6..024e6774f7 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -130,12 +130,13 @@ func (ResponseOfferSnapshot_Result) EnumDescriptor() ([]byte, []int) { type ResponseApplySnapshotChunk_Result int32 const ( - ResponseApplySnapshotChunk_UNKNOWN ResponseApplySnapshotChunk_Result = 0 - ResponseApplySnapshotChunk_ACCEPT ResponseApplySnapshotChunk_Result = 1 - ResponseApplySnapshotChunk_ABORT ResponseApplySnapshotChunk_Result = 2 - ResponseApplySnapshotChunk_RETRY ResponseApplySnapshotChunk_Result = 3 - ResponseApplySnapshotChunk_RETRY_SNAPSHOT ResponseApplySnapshotChunk_Result = 4 - ResponseApplySnapshotChunk_REJECT_SNAPSHOT ResponseApplySnapshotChunk_Result = 5 + ResponseApplySnapshotChunk_UNKNOWN ResponseApplySnapshotChunk_Result = 0 + ResponseApplySnapshotChunk_ACCEPT ResponseApplySnapshotChunk_Result = 1 + ResponseApplySnapshotChunk_ABORT ResponseApplySnapshotChunk_Result = 2 + ResponseApplySnapshotChunk_RETRY ResponseApplySnapshotChunk_Result = 3 + ResponseApplySnapshotChunk_RETRY_SNAPSHOT ResponseApplySnapshotChunk_Result = 4 + ResponseApplySnapshotChunk_REJECT_SNAPSHOT ResponseApplySnapshotChunk_Result = 5 + ResponseApplySnapshotChunk_COMPLETE_SNAPSHOT ResponseApplySnapshotChunk_Result = 6 ) var ResponseApplySnapshotChunk_Result_name = map[int32]string{ @@ -145,15 +146,17 @@ var ResponseApplySnapshotChunk_Result_name = map[int32]string{ 3: "RETRY", 4: "RETRY_SNAPSHOT", 5: "REJECT_SNAPSHOT", + 6: "COMPLETE_SNAPSHOT", } var ResponseApplySnapshotChunk_Result_value = map[string]int32{ - "UNKNOWN": 0, - "ACCEPT": 1, - "ABORT": 2, - "RETRY": 3, - "RETRY_SNAPSHOT": 4, - "REJECT_SNAPSHOT": 5, + "UNKNOWN": 0, + "ACCEPT": 1, + "ABORT": 2, + "RETRY": 3, + "RETRY_SNAPSHOT": 4, + "REJECT_SNAPSHOT": 5, + "COMPLETE_SNAPSHOT": 6, } func (x ResponseApplySnapshotChunk_Result) String() string { @@ -1024,9 +1027,9 @@ func (m *RequestOfferSnapshot) GetAppHash() []byte { // Used during state sync to retrieve snapshot chunks from peers. type RequestLoadSnapshotChunk struct { - Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` - Chunk uint32 `protobuf:"varint,3,opt,name=chunk,proto3" json:"chunk,omitempty"` + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + ChunkId []byte `protobuf:"bytes,3,opt,name=chunk_id,json=chunkId,proto3" json:"chunk_id,omitempty"` } func (m *RequestLoadSnapshotChunk) Reset() { *m = RequestLoadSnapshotChunk{} } @@ -1069,18 +1072,18 @@ func (m *RequestLoadSnapshotChunk) GetHeight() uint64 { return 0 } -func (m *RequestLoadSnapshotChunk) GetFormat() uint32 { +func (m *RequestLoadSnapshotChunk) GetVersion() uint32 { if m != nil { - return m.Format + return m.Version } return 0 } -func (m *RequestLoadSnapshotChunk) GetChunk() uint32 { +func (m *RequestLoadSnapshotChunk) GetChunkId() []byte { if m != nil { - return m.Chunk + return m.ChunkId } - return 0 + return nil } // Applies a snapshot chunk. @@ -1096,9 +1099,9 @@ func (m *RequestLoadSnapshotChunk) GetChunk() uint32 { // it will reject the snapshot and try a different one via OfferSnapshot. The application should be prepared to reset // and accept it or abort as appropriate. type RequestApplySnapshotChunk struct { - Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` - Chunk []byte `protobuf:"bytes,2,opt,name=chunk,proto3" json:"chunk,omitempty"` - Sender string `protobuf:"bytes,3,opt,name=sender,proto3" json:"sender,omitempty"` + ChunkId []byte `protobuf:"bytes,1,opt,name=chunk_id,json=chunkId,proto3" json:"chunk_id,omitempty"` + Chunk []byte `protobuf:"bytes,2,opt,name=chunk,proto3" json:"chunk,omitempty"` + Sender string `protobuf:"bytes,3,opt,name=sender,proto3" json:"sender,omitempty"` } func (m *RequestApplySnapshotChunk) Reset() { *m = RequestApplySnapshotChunk{} } @@ -1134,11 +1137,11 @@ func (m *RequestApplySnapshotChunk) XXX_DiscardUnknown() { var xxx_messageInfo_RequestApplySnapshotChunk proto.InternalMessageInfo -func (m *RequestApplySnapshotChunk) GetIndex() uint32 { +func (m *RequestApplySnapshotChunk) GetChunkId() []byte { if m != nil { - return m.Index + return m.ChunkId } - return 0 + return nil } func (m *RequestApplySnapshotChunk) GetChunk() []byte { @@ -2820,11 +2823,13 @@ type ResponseApplySnapshotChunk struct { Result ResponseApplySnapshotChunk_Result `protobuf:"varint,1,opt,name=result,proto3,enum=tendermint.abci.ResponseApplySnapshotChunk_Result" json:"result,omitempty"` // Refetch and reapply the given chunks, regardless of `result`. // Only the listed chunks will be refetched, and reapplied in sequential order. - RefetchChunks []uint32 `protobuf:"varint,2,rep,packed,name=refetch_chunks,json=refetchChunks,proto3" json:"refetch_chunks,omitempty"` + RefetchChunks [][]byte `protobuf:"bytes,2,rep,name=refetch_chunks,json=refetchChunks,proto3" json:"refetch_chunks,omitempty"` // Reject the given P2P senders, regardless of `Result`. Any chunks already applied will not be refetched // unless explicitly requested, but queued chunks from these senders will be discarded, and new chunks // or other snapshots rejected. RejectSenders []string `protobuf:"bytes,3,rep,name=reject_senders,json=rejectSenders,proto3" json:"reject_senders,omitempty"` + // Next chunks provides the list of chunks that should be requested next, if any. + NextChunks [][]byte `protobuf:"bytes,4,rep,name=next_chunks,json=nextChunks,proto3" json:"next_chunks,omitempty"` } func (m *ResponseApplySnapshotChunk) Reset() { *m = ResponseApplySnapshotChunk{} } @@ -2867,7 +2872,7 @@ func (m *ResponseApplySnapshotChunk) GetResult() ResponseApplySnapshotChunk_Resu return ResponseApplySnapshotChunk_UNKNOWN } -func (m *ResponseApplySnapshotChunk) GetRefetchChunks() []uint32 { +func (m *ResponseApplySnapshotChunk) GetRefetchChunks() [][]byte { if m != nil { return m.RefetchChunks } @@ -2881,6 +2886,13 @@ func (m *ResponseApplySnapshotChunk) GetRejectSenders() []string { return nil } +func (m *ResponseApplySnapshotChunk) GetNextChunks() [][]byte { + if m != nil { + return m.NextChunks + } + return nil +} + type ResponsePrepareProposal struct { // Possibly modified list of transactions that have been picked as part of the proposed block. TxRecords []*TxRecord `protobuf:"bytes,1,rep,name=tx_records,json=txRecords,proto3" json:"tx_records,omitempty"` @@ -4135,8 +4147,7 @@ func (m *Misbehavior) GetTotalVotingPower() int64 { type Snapshot struct { Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` - Chunks uint32 `protobuf:"varint,3,opt,name=chunks,proto3" json:"chunks,omitempty"` + Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` Metadata []byte `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"` } @@ -4181,16 +4192,9 @@ func (m *Snapshot) GetHeight() uint64 { return 0 } -func (m *Snapshot) GetFormat() uint32 { - if m != nil { - return m.Format - } - return 0 -} - -func (m *Snapshot) GetChunks() uint32 { +func (m *Snapshot) GetVersion() uint32 { if m != nil { - return m.Chunks + return m.Version } return 0 } @@ -4271,233 +4275,233 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 3604 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5b, 0x4d, 0x70, 0x1b, 0xc7, - 0xb1, 0xc6, 0xe2, 0x1f, 0x8d, 0xbf, 0xe5, 0x90, 0x92, 0x20, 0x48, 0x22, 0xe9, 0xd5, 0xb3, 0x25, - 0xcb, 0x36, 0x69, 0x4b, 0xcf, 0x96, 0xfd, 0xec, 0xf7, 0x5e, 0x81, 0x20, 0xf4, 0x48, 0x89, 0x22, - 0xe9, 0x25, 0x48, 0x97, 0x9f, 0x63, 0xaf, 0x97, 0xc0, 0x90, 0x58, 0x0b, 0xc0, 0xae, 0x77, 0x17, - 0x34, 0xe8, 0x63, 0x12, 0xe7, 0xe0, 0x93, 0x6f, 0xc9, 0xc5, 0xc7, 0x1c, 0x73, 0xc9, 0x29, 0x95, - 0x43, 0x52, 0xb9, 0x39, 0x95, 0x8b, 0x8f, 0xb9, 0x44, 0x71, 0xc9, 0x97, 0x54, 0x6e, 0x39, 0xe5, - 0x96, 0x4a, 0xcd, 0xcf, 0xfe, 0x01, 0xbb, 0xf8, 0xb1, 0x5c, 0x95, 0xca, 0x0d, 0xd3, 0xd3, 0xdd, - 0x3b, 0xd3, 0xd3, 0xd3, 0xdd, 0xf3, 0xcd, 0x00, 0xae, 0xd8, 0xb8, 0xdf, 0xc6, 0x66, 0x4f, 0xeb, - 0xdb, 0xeb, 0xea, 0x71, 0x4b, 0x5b, 0xb7, 0xcf, 0x0d, 0x6c, 0xad, 0x19, 0xa6, 0x6e, 0xeb, 0xa8, - 0xec, 0x75, 0xae, 0x91, 0xce, 0xea, 0x35, 0x1f, 0x77, 0xcb, 0x3c, 0x37, 0x6c, 0x7d, 0xdd, 0x30, - 0x75, 0xfd, 0x84, 0xf1, 0x57, 0xfd, 0xca, 0xa8, 0x9e, 0xf5, 0xb6, 0x6a, 0x75, 0x78, 0xe7, 0xd5, - 0xb1, 0xce, 0xe3, 0xae, 0xde, 0x7a, 0x14, 0xd9, 0xeb, 0x1b, 0x48, 0xa0, 0x97, 0x7f, 0xf7, 0x11, - 0x3e, 0x77, 0x7a, 0xaf, 0x8d, 0xc9, 0x1a, 0xaa, 0xa9, 0xf6, 0x9c, 0xee, 0x65, 0x5f, 0xf7, 0x19, - 0x36, 0x2d, 0x4d, 0xef, 0x07, 0x94, 0xaf, 0x9c, 0xea, 0xfa, 0x69, 0x17, 0xaf, 0xd3, 0xd6, 0xf1, - 0xe0, 0x64, 0xdd, 0xd6, 0x7a, 0xd8, 0xb2, 0xd5, 0x9e, 0xc1, 0x19, 0x96, 0x4e, 0xf5, 0x53, 0x9d, - 0xfe, 0x5c, 0x27, 0xbf, 0x18, 0x55, 0xfa, 0x2c, 0x07, 0x19, 0x19, 0x7f, 0x3c, 0xc0, 0x96, 0x8d, - 0x6e, 0x43, 0x12, 0xb7, 0x3a, 0x7a, 0x45, 0x58, 0x15, 0x6e, 0xe6, 0x6f, 0x5f, 0x5d, 0x1b, 0xb1, - 0xdb, 0x1a, 0xe7, 0x6b, 0xb4, 0x3a, 0xfa, 0x56, 0x4c, 0xa6, 0xbc, 0xe8, 0x55, 0x48, 0x9d, 0x74, - 0x07, 0x56, 0xa7, 0x12, 0xa7, 0x42, 0xd7, 0xa2, 0x84, 0xee, 0x11, 0xa6, 0xad, 0x98, 0xcc, 0xb8, - 0xc9, 0xa7, 0xb4, 0xfe, 0x89, 0x5e, 0x49, 0x4c, 0xfe, 0xd4, 0x76, 0xff, 0x84, 0x7e, 0x8a, 0xf0, - 0xa2, 0x0d, 0x00, 0xad, 0xaf, 0xd9, 0x4a, 0xab, 0xa3, 0x6a, 0xfd, 0x4a, 0x92, 0x4a, 0x3e, 0x13, - 0x2d, 0xa9, 0xd9, 0x75, 0xc2, 0xb8, 0x15, 0x93, 0x73, 0x9a, 0xd3, 0x20, 0xc3, 0xfd, 0x78, 0x80, - 0xcd, 0xf3, 0x4a, 0x6a, 0xf2, 0x70, 0xdf, 0x26, 0x4c, 0x64, 0xb8, 0x94, 0x1b, 0xbd, 0x05, 0xd9, - 0x56, 0x07, 0xb7, 0x1e, 0x29, 0xf6, 0xb0, 0x92, 0xa1, 0x92, 0x2b, 0x51, 0x92, 0x75, 0xc2, 0xd7, - 0x1c, 0x6e, 0xc5, 0xe4, 0x4c, 0x8b, 0xfd, 0x44, 0xbb, 0x50, 0xea, 0x6a, 0x96, 0xad, 0x58, 0x7d, - 0xd5, 0xb0, 0x3a, 0xba, 0x6d, 0x55, 0xf2, 0x54, 0xc7, 0xb3, 0x51, 0x3a, 0x76, 0x34, 0xcb, 0x3e, - 0x70, 0x98, 0xb7, 0x62, 0x72, 0xb1, 0xeb, 0x27, 0x10, 0x7d, 0xfa, 0xc9, 0x09, 0x36, 0x5d, 0x85, - 0x95, 0xc2, 0x64, 0x7d, 0x7b, 0x84, 0xdb, 0x91, 0x27, 0xfa, 0x74, 0x3f, 0x01, 0xbd, 0x07, 0x8b, - 0x5d, 0x5d, 0x6d, 0xbb, 0xea, 0x94, 0x56, 0x67, 0xd0, 0x7f, 0x54, 0x29, 0x52, 0xa5, 0xcf, 0x47, - 0x0e, 0x52, 0x57, 0xdb, 0x8e, 0x8a, 0x3a, 0x11, 0xd8, 0x8a, 0xc9, 0x0b, 0xdd, 0x51, 0x22, 0xfa, - 0x00, 0x96, 0x54, 0xc3, 0xe8, 0x9e, 0x8f, 0x6a, 0x2f, 0x51, 0xed, 0xb7, 0xa2, 0xb4, 0xd7, 0x88, - 0xcc, 0xa8, 0x7a, 0xa4, 0x8e, 0x51, 0x51, 0x13, 0x44, 0xc3, 0xc4, 0x86, 0x6a, 0x62, 0xc5, 0x30, - 0x75, 0x43, 0xb7, 0xd4, 0x6e, 0xa5, 0x4c, 0x75, 0xdf, 0x88, 0xd2, 0xbd, 0xcf, 0xf8, 0xf7, 0x39, - 0xfb, 0x56, 0x4c, 0x2e, 0x1b, 0x41, 0x12, 0xd3, 0xaa, 0xb7, 0xb0, 0x65, 0x79, 0x5a, 0xc5, 0x69, - 0x5a, 0x29, 0x7f, 0x50, 0x6b, 0x80, 0x84, 0x1a, 0x90, 0xc7, 0x43, 0x22, 0xae, 0x9c, 0xe9, 0x36, - 0xae, 0x2c, 0x50, 0x85, 0x52, 0xe4, 0x3e, 0xa3, 0xac, 0x47, 0xba, 0x8d, 0xb7, 0x62, 0x32, 0x60, - 0xb7, 0x85, 0x54, 0xb8, 0x70, 0x86, 0x4d, 0xed, 0xe4, 0x9c, 0xaa, 0x51, 0x68, 0x0f, 0x89, 0x07, - 0x15, 0x44, 0x15, 0xbe, 0x10, 0xa5, 0xf0, 0x88, 0x0a, 0x11, 0x15, 0x0d, 0x47, 0x64, 0x2b, 0x26, - 0x2f, 0x9e, 0x8d, 0x93, 0x89, 0x8b, 0x9d, 0x68, 0x7d, 0xb5, 0xab, 0x7d, 0x8a, 0x15, 0x1a, 0xe0, - 0x2a, 0x8b, 0x93, 0x5d, 0xec, 0x1e, 0xe7, 0xde, 0x20, 0xcc, 0xc4, 0xc5, 0x4e, 0xfc, 0x84, 0x8d, - 0x0c, 0xa4, 0xce, 0xd4, 0xee, 0x00, 0xdf, 0x4f, 0x66, 0xd3, 0x62, 0xe6, 0x7e, 0x32, 0x9b, 0x15, - 0x73, 0xf7, 0x93, 0xd9, 0x9c, 0x08, 0xf7, 0x93, 0x59, 0x10, 0xf3, 0xd2, 0x0d, 0xc8, 0xfb, 0xc2, - 0x0b, 0xaa, 0x40, 0xa6, 0x87, 0x2d, 0x4b, 0x3d, 0xc5, 0x34, 0x1a, 0xe5, 0x64, 0xa7, 0x29, 0x95, - 0xa0, 0xe0, 0x0f, 0x29, 0xd2, 0x17, 0x82, 0x2b, 0x49, 0xa2, 0x05, 0x91, 0xe4, 0xe1, 0xd1, 0x91, - 0xe4, 0x4d, 0x74, 0x1d, 0x8a, 0x74, 0x2a, 0x8a, 0xd3, 0x4f, 0x42, 0x56, 0x52, 0x2e, 0x50, 0xe2, - 0x11, 0x67, 0x5a, 0x81, 0xbc, 0x71, 0xdb, 0x70, 0x59, 0x12, 0x94, 0x05, 0x8c, 0xdb, 0x86, 0xc3, - 0xf0, 0x0c, 0x14, 0xc8, 0xbc, 0x5d, 0x8e, 0x24, 0xfd, 0x48, 0x9e, 0xd0, 0x38, 0x8b, 0xf4, 0xe3, - 0x04, 0x88, 0xa3, 0x61, 0x08, 0xbd, 0x0e, 0x49, 0x12, 0x91, 0x79, 0x70, 0xad, 0xae, 0xb1, 0x70, - 0xbd, 0xe6, 0x84, 0xeb, 0xb5, 0xa6, 0x13, 0xae, 0x37, 0xb2, 0x5f, 0x3d, 0x5e, 0x89, 0x7d, 0xf1, - 0xe7, 0x15, 0x41, 0xa6, 0x12, 0xe8, 0x32, 0x09, 0x3e, 0xaa, 0xd6, 0x57, 0xb4, 0x36, 0x1d, 0x72, - 0x8e, 0x44, 0x16, 0x55, 0xeb, 0x6f, 0xb7, 0xd1, 0x0e, 0x88, 0x2d, 0xbd, 0x6f, 0xe1, 0xbe, 0x35, - 0xb0, 0x14, 0x96, 0x2e, 0x78, 0x48, 0x0d, 0x04, 0x46, 0x96, 0x27, 0xea, 0x0e, 0xe7, 0x3e, 0x65, - 0x94, 0xcb, 0xad, 0x20, 0x01, 0xed, 0x42, 0xf1, 0x4c, 0xed, 0x6a, 0x6d, 0xd5, 0xd6, 0x4d, 0xc5, - 0xc2, 0x36, 0x8f, 0xb1, 0xd7, 0xc7, 0xd6, 0xfc, 0xc8, 0xe1, 0x3a, 0xc0, 0xf6, 0xa1, 0xd1, 0x56, - 0x6d, 0xbc, 0x91, 0xfc, 0xea, 0xf1, 0x8a, 0x20, 0x17, 0xce, 0x7c, 0x3d, 0xe8, 0x39, 0x28, 0xab, - 0x86, 0xa1, 0x58, 0xb6, 0x6a, 0x63, 0xe5, 0xf8, 0xdc, 0xc6, 0x16, 0x0d, 0xbb, 0x05, 0xb9, 0xa8, - 0x1a, 0xc6, 0x01, 0xa1, 0x6e, 0x10, 0x22, 0x7a, 0x16, 0x4a, 0x24, 0x42, 0x6b, 0x6a, 0x57, 0xe9, - 0x60, 0xed, 0xb4, 0x63, 0x57, 0xd2, 0xab, 0xc2, 0xcd, 0x84, 0x5c, 0xe4, 0xd4, 0x2d, 0x4a, 0x44, - 0x6b, 0xb0, 0xe8, 0xb0, 0xb5, 0x74, 0x13, 0x3b, 0xbc, 0x24, 0x1e, 0x17, 0xe5, 0x05, 0xde, 0x55, - 0xd7, 0x4d, 0xcc, 0xf8, 0xa5, 0xb6, 0xeb, 0x29, 0x34, 0x9a, 0x23, 0x04, 0xc9, 0xb6, 0x6a, 0xab, - 0x74, 0x05, 0x0a, 0x32, 0xfd, 0x4d, 0x68, 0x86, 0x6a, 0x77, 0xb8, 0x5d, 0xe9, 0x6f, 0x74, 0x11, - 0xd2, 0x5c, 0x75, 0x82, 0x0e, 0x83, 0xb7, 0xd0, 0x12, 0xa4, 0x0c, 0x53, 0x3f, 0xc3, 0xd4, 0x2c, - 0x59, 0x99, 0x35, 0x24, 0x19, 0x4a, 0xc1, 0xc8, 0x8f, 0x4a, 0x10, 0xb7, 0x87, 0xfc, 0x2b, 0x71, - 0x7b, 0x88, 0x5e, 0x86, 0x24, 0x59, 0x00, 0xfa, 0x8d, 0x52, 0x48, 0xae, 0xe3, 0x72, 0xcd, 0x73, - 0x03, 0xcb, 0x94, 0x53, 0xba, 0x08, 0x4b, 0x61, 0x99, 0x40, 0xea, 0xb8, 0xf4, 0x40, 0x44, 0x47, - 0xaf, 0x42, 0xd6, 0x4d, 0x05, 0xcc, 0xbf, 0x2e, 0x8f, 0x7d, 0xc5, 0x61, 0x96, 0x5d, 0x56, 0xe2, - 0x58, 0x64, 0x7d, 0x3a, 0x2a, 0x4f, 0xdf, 0x05, 0x39, 0xa3, 0x1a, 0xc6, 0x96, 0x6a, 0x75, 0xa4, - 0x0f, 0xa1, 0x12, 0x15, 0xe6, 0x7d, 0xf6, 0x11, 0xe8, 0xee, 0x70, 0xec, 0x73, 0x11, 0xd2, 0x27, - 0xba, 0xd9, 0x53, 0x6d, 0xaa, 0xac, 0x28, 0xf3, 0x16, 0xb1, 0x1b, 0x0b, 0xf9, 0x09, 0x4a, 0x66, - 0x0d, 0x49, 0x81, 0xcb, 0x91, 0xa1, 0x9e, 0x88, 0x68, 0xfd, 0x36, 0x66, 0x56, 0x2c, 0xca, 0xac, - 0xe1, 0x29, 0x62, 0x83, 0x65, 0x0d, 0xf2, 0x59, 0x8b, 0xce, 0x95, 0xea, 0xcf, 0xc9, 0xbc, 0x25, - 0xfd, 0x2d, 0x09, 0x17, 0xc3, 0x03, 0x3e, 0x5a, 0x85, 0x42, 0x4f, 0x1d, 0x2a, 0xf6, 0x90, 0x7b, - 0xa5, 0x40, 0xd7, 0x19, 0x7a, 0xea, 0xb0, 0x39, 0x64, 0x2e, 0x29, 0x42, 0xc2, 0x1e, 0x5a, 0x95, - 0xf8, 0x6a, 0xe2, 0x66, 0x41, 0x26, 0x3f, 0xd1, 0x43, 0x58, 0xe8, 0xea, 0x2d, 0xb5, 0xab, 0x74, - 0x55, 0xcb, 0x56, 0x5a, 0x7a, 0xaf, 0xa7, 0xd9, 0x7c, 0xaf, 0x5d, 0x19, 0x5f, 0x52, 0xda, 0x4d, - 0xe2, 0x11, 0xdd, 0x18, 0x31, 0xb9, 0x4c, 0x65, 0x77, 0x54, 0xcb, 0x66, 0x5d, 0x68, 0x13, 0xf2, - 0x3d, 0xcd, 0x3a, 0xc6, 0x1d, 0xf5, 0x4c, 0xd3, 0xcd, 0x4a, 0x72, 0x35, 0x11, 0x5a, 0x07, 0x3d, - 0xf4, 0x78, 0xb8, 0x26, 0xbf, 0x98, 0x6f, 0x29, 0x52, 0x01, 0x57, 0x75, 0x82, 0x4d, 0x7a, 0xee, - 0x60, 0xf3, 0x32, 0x2c, 0xf5, 0xf1, 0xd0, 0x56, 0xdc, 0x8d, 0x6c, 0x31, 0xff, 0xc8, 0x50, 0x93, - 0x23, 0xd2, 0xe7, 0xee, 0x7e, 0x8b, 0xb8, 0x0a, 0x59, 0x15, 0x53, 0x1f, 0xf4, 0xdb, 0x95, 0xec, - 0xaa, 0x70, 0x33, 0x25, 0xb3, 0x06, 0xba, 0x0b, 0x15, 0xba, 0x49, 0x59, 0xe4, 0x22, 0x11, 0x16, - 0xb7, 0x9d, 0x1d, 0x9b, 0xa3, 0x8b, 0x7a, 0x81, 0xf4, 0xd3, 0xd8, 0xb8, 0x43, 0x7b, 0xf9, 0x2e, - 0x5f, 0x87, 0x25, 0x96, 0x71, 0xb1, 0x49, 0x52, 0x2f, 0x59, 0x24, 0x3a, 0x00, 0xa0, 0x03, 0x58, - 0x70, 0xfa, 0xf6, 0x4d, 0xbd, 0x39, 0xa4, 0xdf, 0x7f, 0xd9, 0x15, 0x68, 0x2b, 0xc4, 0x9d, 0x9d, - 0xc0, 0x9c, 0xa7, 0xce, 0x89, 0x9c, 0xbe, 0x9a, 0xe1, 0x86, 0xf0, 0xbb, 0x5e, 0x8a, 0x28, 0x8c, - 0x97, 0x81, 0xbc, 0xcb, 0x0b, 0x97, 0x5e, 0x06, 0x59, 0x81, 0xfc, 0xc7, 0x03, 0xdd, 0x1c, 0xf4, - 0xd8, 0x90, 0x8a, 0x74, 0x48, 0xc0, 0x48, 0x74, 0xdb, 0xfc, 0x36, 0xe5, 0xf3, 0xb9, 0x60, 0xee, - 0xe7, 0x1e, 0x25, 0x78, 0x1e, 0x75, 0xe0, 0x1b, 0xb8, 0xdf, 0xa9, 0xe2, 0xb3, 0x3a, 0x95, 0x3b, - 0xb7, 0x68, 0xbf, 0x4a, 0x7c, 0x37, 0xbf, 0x42, 0x90, 0xa4, 0x33, 0x4c, 0xb2, 0x50, 0x49, 0x7e, - 0x47, 0xfa, 0x9a, 0xbb, 0xfe, 0x69, 0xff, 0xfa, 0x3b, 0x1e, 0x98, 0xf9, 0xde, 0x3c, 0x30, 0x1b, - 0xe9, 0x81, 0xdf, 0xd9, 0xd7, 0x9a, 0x70, 0x71, 0x44, 0x50, 0x19, 0xd0, 0x74, 0x46, 0xbd, 0x6d, - 0xa4, 0xc8, 0x77, 0x92, 0xa8, 0x4f, 0x91, 0xbc, 0x18, 0xd0, 0xcb, 0x52, 0x61, 0xa4, 0x07, 0xe7, - 0xe7, 0xf5, 0xe0, 0xc2, 0x2c, 0x1e, 0x5c, 0x7c, 0x1a, 0x0f, 0x2e, 0x8d, 0x79, 0xf0, 0x21, 0x2c, - 0x8c, 0x95, 0x9f, 0xae, 0x3b, 0x08, 0xa1, 0xee, 0x10, 0x0f, 0x77, 0x87, 0x84, 0xcf, 0x1d, 0xa4, - 0x6f, 0x04, 0xa8, 0x46, 0x57, 0xa1, 0xa1, 0x1f, 0x78, 0x05, 0x2e, 0x78, 0xd5, 0x88, 0xdf, 0x8e, - 0x2c, 0xfa, 0x23, 0xb7, 0xd3, 0x33, 0xe4, 0x84, 0xcc, 0xcd, 0xc6, 0x94, 0xf4, 0xbb, 0xe8, 0x43, - 0x28, 0x07, 0xeb, 0x67, 0x52, 0x9e, 0x90, 0xed, 0xf2, 0x1f, 0x63, 0xdb, 0xc5, 0xb3, 0x85, 0x3b, - 0x66, 0xb9, 0x74, 0xe6, 0x6f, 0x5a, 0xd2, 0x1f, 0xe2, 0x6e, 0x76, 0x0e, 0x14, 0xc3, 0xe8, 0x0d, - 0x48, 0xf3, 0x9d, 0x2d, 0xcc, 0xba, 0xb3, 0xb9, 0xc0, 0xe8, 0x6e, 0x8e, 0x3f, 0xdd, 0x6e, 0x4e, - 0x84, 0x2e, 0x5f, 0x32, 0xdc, 0x54, 0x29, 0xbf, 0xa9, 0x5e, 0x82, 0x14, 0x3b, 0x05, 0xb0, 0x84, - 0x72, 0x69, 0x7c, 0x5f, 0xd0, 0xa9, 0xca, 0x8c, 0x0b, 0xd5, 0x20, 0xcb, 0x2a, 0x6d, 0xad, 0xcd, - 0x03, 0xc0, 0xe5, 0x08, 0x89, 0xed, 0xcd, 0x8d, 0xfc, 0x93, 0xc7, 0x2b, 0x19, 0xde, 0x90, 0x33, - 0x54, 0x6e, 0xbb, 0x2d, 0xfd, 0x2e, 0x07, 0x59, 0x19, 0x5b, 0x06, 0x71, 0x61, 0xb4, 0x01, 0x39, - 0x3c, 0x6c, 0x61, 0xc3, 0x76, 0xaa, 0xfa, 0xf0, 0x53, 0x13, 0xe3, 0x6e, 0x38, 0x9c, 0xe4, 0xe4, - 0xef, 0x8a, 0xa1, 0x3b, 0x1c, 0xdc, 0x88, 0xc6, 0x29, 0xb8, 0xb8, 0x1f, 0xdd, 0x78, 0xcd, 0x41, - 0x37, 0x58, 0xa2, 0x5f, 0x8e, 0x94, 0x1a, 0x81, 0x37, 0xee, 0x70, 0x78, 0x23, 0x39, 0xe5, 0x63, - 0x01, 0x7c, 0xa3, 0x1e, 0xc0, 0x37, 0x52, 0x53, 0xa6, 0x19, 0x01, 0x70, 0xbc, 0xe6, 0x00, 0x1c, - 0xe9, 0x29, 0x23, 0x1e, 0x41, 0x38, 0xfe, 0x7b, 0x0c, 0xe1, 0x58, 0x8d, 0x14, 0x0d, 0x81, 0x38, - 0xf6, 0xc6, 0x20, 0x8e, 0x2c, 0x55, 0xf2, 0x5c, 0xa4, 0x92, 0x29, 0x18, 0xc7, 0xde, 0x18, 0xc6, - 0x91, 0x9b, 0xa2, 0x70, 0x0a, 0xc8, 0xf1, 0x83, 0x70, 0x90, 0x03, 0x22, 0x61, 0x08, 0x3e, 0xcc, - 0xd9, 0x50, 0x0e, 0x25, 0x02, 0xe5, 0xc8, 0x47, 0x9e, 0xc8, 0x99, 0xfa, 0x99, 0x61, 0x8e, 0xc3, - 0x10, 0x98, 0x83, 0x15, 0x2f, 0x37, 0x23, 0x95, 0xcf, 0x80, 0x73, 0x1c, 0x86, 0xe0, 0x1c, 0xc5, - 0xa9, 0x6a, 0xa7, 0x02, 0x1d, 0xf7, 0x82, 0x40, 0x47, 0x29, 0xe2, 0x1c, 0xe9, 0x6d, 0xd9, 0x08, - 0xa4, 0xe3, 0x38, 0x0a, 0xe9, 0x60, 0x08, 0xcf, 0x8b, 0x91, 0x1a, 0xe7, 0x80, 0x3a, 0xf6, 0xc6, - 0xa0, 0x0e, 0x71, 0x8a, 0xa7, 0xcd, 0x88, 0x75, 0x48, 0xcf, 0x93, 0x5c, 0x3a, 0x12, 0x94, 0x48, - 0x80, 0xc5, 0xa6, 0xa9, 0x9b, 0x1c, 0x9d, 0x60, 0x0d, 0xe9, 0x26, 0x39, 0xab, 0x7a, 0x01, 0x68, - 0x02, 0xfe, 0x51, 0x86, 0x62, 0x20, 0xe8, 0x48, 0xbf, 0x12, 0x3c, 0x59, 0x8a, 0x80, 0xf8, 0xcf, - 0xb9, 0x39, 0x7e, 0xce, 0xf5, 0xa1, 0x22, 0xf1, 0x20, 0x2a, 0xb2, 0x02, 0x79, 0x7f, 0xcd, 0xc1, - 0x01, 0x0f, 0xd5, 0xab, 0x35, 0x6e, 0xc1, 0x02, 0xad, 0x4e, 0x59, 0x44, 0x0f, 0x24, 0x8d, 0x32, - 0xe9, 0x60, 0x56, 0x60, 0xd9, 0xe3, 0x25, 0x58, 0xf4, 0xf1, 0xba, 0x87, 0x4b, 0x76, 0xea, 0x17, - 0x5d, 0xee, 0x1a, 0x3f, 0x65, 0xfe, 0x25, 0xee, 0x59, 0xc8, 0x43, 0x4a, 0xc2, 0x40, 0x0d, 0xe1, - 0x3b, 0x83, 0x1a, 0xd1, 0x87, 0x5c, 0xf4, 0x1e, 0x2c, 0x05, 0xf0, 0x0e, 0xa7, 0xf8, 0x4b, 0xcc, - 0x07, 0x7b, 0xc4, 0x7c, 0xb5, 0x88, 0xdb, 0x83, 0xde, 0x87, 0x2b, 0xb4, 0x8c, 0x8d, 0x28, 0x30, - 0x93, 0xb3, 0x15, 0x98, 0x97, 0x88, 0x8e, 0x7a, 0x48, 0x91, 0x19, 0x01, 0x86, 0xa4, 0xa2, 0xc0, - 0x90, 0xbf, 0x0b, 0x9e, 0xdf, 0xb8, 0x70, 0x48, 0x4b, 0x6f, 0x63, 0x7e, 0xc4, 0xa6, 0xbf, 0xc9, - 0x21, 0xa5, 0xab, 0x9f, 0x72, 0x17, 0x21, 0x3f, 0x09, 0x97, 0x0b, 0xd4, 0xe7, 0x78, 0xa2, 0x72, - 0x4f, 0xe7, 0xcc, 0x0b, 0xf8, 0xe9, 0x5c, 0x84, 0xc4, 0x23, 0x7c, 0xce, 0xd7, 0x9a, 0xfc, 0x24, - 0x7c, 0x74, 0x23, 0xd0, 0x5c, 0x54, 0x90, 0x59, 0x03, 0xbd, 0x0e, 0x39, 0x7a, 0xdb, 0xa2, 0xe8, - 0x86, 0xc5, 0x53, 0x4d, 0xa0, 0x22, 0x62, 0x37, 0x23, 0x6b, 0xfb, 0x84, 0x67, 0xcf, 0xb0, 0xe4, - 0xac, 0xc1, 0x7f, 0xf9, 0x6a, 0x96, 0x6c, 0xa0, 0x66, 0xb9, 0x0a, 0x39, 0x32, 0x7a, 0xcb, 0x50, - 0x5b, 0x98, 0xa6, 0x89, 0x9c, 0xec, 0x11, 0xa4, 0xdf, 0x08, 0x50, 0x1e, 0xc9, 0x5c, 0xa1, 0x73, - 0x77, 0xb6, 0x4d, 0x3c, 0x08, 0x0f, 0x8d, 0xcd, 0xfe, 0x1a, 0xc0, 0xa9, 0x6a, 0x29, 0x9f, 0xa8, - 0x7d, 0x1b, 0xb7, 0xb9, 0x09, 0x72, 0xa7, 0xaa, 0xf5, 0x0e, 0x25, 0x04, 0x07, 0x93, 0x1a, 0x19, - 0x8c, 0x0f, 0xac, 0x48, 0xfb, 0xc1, 0x0a, 0x54, 0x85, 0xac, 0x61, 0x6a, 0xba, 0xa9, 0xd9, 0xe7, - 0xd4, 0x26, 0x09, 0xd9, 0x6d, 0x4b, 0xfb, 0x70, 0x21, 0x34, 0x69, 0xa2, 0xbb, 0x90, 0xf3, 0xf2, - 0xad, 0x40, 0x6b, 0xc3, 0x09, 0xb8, 0x8f, 0xc7, 0x4b, 0x4c, 0x72, 0x21, 0x34, 0x6d, 0xa2, 0x06, - 0xa4, 0x4d, 0x6c, 0x0d, 0xba, 0xac, 0x56, 0x2d, 0xdd, 0x7e, 0x69, 0xb6, 0x74, 0x4b, 0xa8, 0x83, - 0xae, 0x2d, 0x73, 0x61, 0xe9, 0x03, 0x48, 0x33, 0x0a, 0xca, 0x43, 0xe6, 0x70, 0xf7, 0xc1, 0xee, - 0xde, 0x3b, 0xbb, 0x62, 0x0c, 0x01, 0xa4, 0x6b, 0xf5, 0x7a, 0x63, 0xbf, 0x29, 0x0a, 0x28, 0x07, - 0xa9, 0xda, 0xc6, 0x9e, 0xdc, 0x14, 0xe3, 0x84, 0x2c, 0x37, 0xee, 0x37, 0xea, 0x4d, 0x31, 0x81, - 0x16, 0xa0, 0xc8, 0x7e, 0x2b, 0xf7, 0xf6, 0xe4, 0x87, 0xb5, 0xa6, 0x98, 0xf4, 0x91, 0x0e, 0x1a, - 0xbb, 0x9b, 0x0d, 0x59, 0x4c, 0x49, 0xaf, 0xc0, 0xe5, 0xc8, 0x04, 0xed, 0xc1, 0x44, 0x82, 0x0f, - 0x26, 0x92, 0x7e, 0x16, 0x27, 0x27, 0x90, 0xa8, 0xac, 0x8b, 0xee, 0x8f, 0x4c, 0xfc, 0xf6, 0x1c, - 0x29, 0x7b, 0x64, 0xf6, 0xe8, 0x59, 0x28, 0x99, 0xf8, 0x04, 0xdb, 0xad, 0x0e, 0xab, 0x02, 0x18, - 0x8e, 0x54, 0x94, 0x8b, 0x9c, 0x4a, 0x85, 0x2c, 0xc6, 0xf6, 0x11, 0x6e, 0xd9, 0x0a, 0x73, 0x02, - 0x8b, 0x9e, 0xd6, 0x73, 0x84, 0x8d, 0x50, 0x0f, 0x18, 0x51, 0xfa, 0x70, 0x2e, 0x5b, 0xe6, 0x20, - 0x25, 0x37, 0x9a, 0xf2, 0xbb, 0x62, 0x02, 0x21, 0x28, 0xd1, 0x9f, 0xca, 0xc1, 0x6e, 0x6d, 0xff, - 0x60, 0x6b, 0x8f, 0xd8, 0x72, 0x11, 0xca, 0x8e, 0x2d, 0x1d, 0x62, 0x4a, 0xfa, 0x75, 0x02, 0x2e, - 0x45, 0xd4, 0x0c, 0xe8, 0x75, 0x00, 0x7b, 0xa8, 0x98, 0xb8, 0xa5, 0x9b, 0xed, 0x68, 0x27, 0x6b, - 0x0e, 0x65, 0xca, 0x21, 0xe7, 0x6c, 0xfe, 0x6b, 0x62, 0xe0, 0x7d, 0x8b, 0x2b, 0x25, 0xb3, 0xb2, - 0x38, 0x46, 0x71, 0x2d, 0xe4, 0xd0, 0x85, 0x5b, 0x44, 0x31, 0xb5, 0x2d, 0x55, 0x4c, 0xf9, 0xd1, - 0xbb, 0x70, 0x69, 0x24, 0x3f, 0xf0, 0xa0, 0x6a, 0x85, 0x5d, 0x0a, 0x86, 0xa7, 0x89, 0x0b, 0xc1, - 0x34, 0xc1, 0x82, 0xaa, 0x35, 0x01, 0x10, 0x48, 0x3d, 0x05, 0x20, 0x10, 0x95, 0x67, 0xd2, 0xf3, - 0xc2, 0xeb, 0x21, 0x79, 0x46, 0xfa, 0x65, 0x60, 0xf1, 0x82, 0x65, 0xd8, 0x1e, 0xa4, 0x2d, 0x5b, - 0xb5, 0x07, 0x16, 0x77, 0xea, 0xbb, 0xb3, 0xd6, 0x74, 0x6b, 0xce, 0x8f, 0x03, 0x2a, 0x2e, 0x73, - 0x35, 0xff, 0x96, 0x6b, 0x1a, 0x65, 0xfd, 0xd4, 0xf7, 0x61, 0xfd, 0x57, 0xa1, 0x14, 0x34, 0x55, - 0xf4, 0x26, 0xf5, 0xa2, 0x5c, 0x5c, 0xea, 0xc2, 0x62, 0x08, 0xa4, 0x80, 0xee, 0xf2, 0x9b, 0x02, - 0xb6, 0x5a, 0xd7, 0xc7, 0xa7, 0x1c, 0x60, 0xf7, 0x2e, 0x0c, 0x48, 0xd2, 0xf1, 0x6a, 0x63, 0xb6, - 0x30, 0x1e, 0x41, 0x6a, 0x01, 0x1a, 0xaf, 0xb4, 0xc3, 0xe0, 0x0f, 0xe1, 0x29, 0xe0, 0x8f, 0x9f, - 0x0b, 0x70, 0x65, 0x42, 0xf5, 0x8d, 0xde, 0x1e, 0xf1, 0xc5, 0x37, 0xe6, 0xa9, 0xdd, 0xd7, 0x18, - 0x2d, 0xe8, 0x8d, 0xd2, 0x1d, 0x28, 0xf8, 0xe9, 0xb3, 0x99, 0xfe, 0x27, 0xbe, 0xdc, 0x17, 0xc4, - 0x69, 0xb6, 0x20, 0x8d, 0xcf, 0x70, 0xdf, 0xcd, 0xa5, 0x17, 0xc7, 0xed, 0x40, 0xba, 0x37, 0x2a, - 0xa4, 0xe6, 0xfb, 0xeb, 0xe3, 0x15, 0x91, 0x71, 0xbf, 0xa8, 0xf7, 0x34, 0x1b, 0xf7, 0x0c, 0xfb, - 0x5c, 0xe6, 0xf2, 0xe8, 0x3a, 0x14, 0x4d, 0x6c, 0x93, 0x10, 0x12, 0x80, 0xc8, 0x0a, 0x8c, 0xc8, - 0x2b, 0xb2, 0xdf, 0x0b, 0x00, 0x1e, 0xf0, 0xe3, 0x01, 0x2f, 0x82, 0x1f, 0x78, 0x19, 0xc1, 0xeb, - 0xe2, 0xa3, 0x78, 0x1d, 0xba, 0x01, 0x65, 0x56, 0x6c, 0x5b, 0xda, 0x69, 0x5f, 0xb5, 0x07, 0x26, - 0xe6, 0x30, 0x4f, 0x89, 0x92, 0x0f, 0x1c, 0x2a, 0x7a, 0x0f, 0x2e, 0xdb, 0x1d, 0x13, 0x5b, 0x1d, - 0xbd, 0xdb, 0x56, 0x46, 0x17, 0x9e, 0x5d, 0x3f, 0xac, 0x4c, 0x71, 0x38, 0xf9, 0x92, 0xab, 0xe1, - 0x28, 0xb8, 0xf8, 0x9f, 0x42, 0x8a, 0xda, 0x86, 0x14, 0x4c, 0xae, 0x07, 0xe7, 0xb8, 0x73, 0xbe, - 0x0f, 0xa0, 0xda, 0xb6, 0xa9, 0x1d, 0x0f, 0xc8, 0x76, 0x8e, 0x8f, 0x7f, 0xca, 0xb3, 0x6d, 0xcd, - 0xe1, 0xdb, 0xb8, 0xca, 0x8d, 0xbc, 0xe4, 0x89, 0xfa, 0x0c, 0xed, 0x53, 0x28, 0xed, 0x42, 0x29, - 0x28, 0xeb, 0x54, 0xa2, 0x6c, 0x0c, 0xc1, 0x4a, 0x94, 0x55, 0xb6, 0xbc, 0x12, 0x75, 0xeb, 0xd8, - 0x04, 0xbb, 0xd0, 0xa3, 0x0d, 0xe9, 0x1f, 0x02, 0x14, 0xfc, 0x61, 0x6a, 0xe6, 0x62, 0x91, 0x17, - 0xcf, 0x89, 0xf1, 0xe2, 0x39, 0x19, 0x59, 0x3e, 0xa6, 0x46, 0xcb, 0xc7, 0xcb, 0x90, 0x25, 0xdd, - 0x03, 0x0b, 0xb7, 0xf9, 0x2d, 0x68, 0xe6, 0x54, 0xb5, 0x0e, 0x2d, 0xdc, 0xf6, 0xf9, 0x67, 0xe6, - 0x29, 0xfd, 0x33, 0x50, 0xa3, 0x66, 0x47, 0x0b, 0xe6, 0xcf, 0x04, 0xc8, 0xba, 0x93, 0x0f, 0x5e, - 0xf6, 0x05, 0x70, 0x42, 0x66, 0xbb, 0xb8, 0xff, 0x86, 0x8e, 0x5d, 0x7d, 0x26, 0xdc, 0xab, 0xcf, - 0x37, 0xdd, 0xaa, 0x2a, 0x0a, 0x09, 0xf3, 0x5b, 0xda, 0x01, 0x3f, 0x79, 0x11, 0xf9, 0x53, 0x3e, - 0x0e, 0x52, 0x4e, 0xa0, 0xff, 0x82, 0xb4, 0xda, 0x72, 0xf1, 0xbf, 0x52, 0x08, 0x30, 0xe6, 0xb0, - 0xae, 0x35, 0x87, 0x35, 0xca, 0x29, 0x73, 0x09, 0x3e, 0xaa, 0xb8, 0x33, 0x2a, 0xe9, 0x7f, 0x89, - 0x5e, 0xc6, 0x13, 0x8c, 0x19, 0x25, 0x80, 0xc3, 0xdd, 0x87, 0x7b, 0x9b, 0xdb, 0xf7, 0xb6, 0x1b, - 0x9b, 0xbc, 0xae, 0xda, 0xdc, 0x6c, 0x6c, 0x8a, 0x71, 0xc2, 0x27, 0x37, 0x1e, 0xee, 0x1d, 0x35, - 0x36, 0xc5, 0x84, 0x54, 0x83, 0x9c, 0x9b, 0x25, 0xe8, 0xb5, 0xb0, 0xfe, 0x09, 0x36, 0xb9, 0x81, - 0x58, 0x03, 0x2d, 0x43, 0x7e, 0x1c, 0xb3, 0x26, 0x07, 0x1f, 0x06, 0x55, 0x4b, 0xbf, 0x10, 0xa0, - 0xec, 0xea, 0xe0, 0x75, 0xc2, 0x9b, 0x90, 0x31, 0x06, 0xc7, 0x8a, 0xe3, 0xbb, 0x23, 0x48, 0xaf, - 0x73, 0x2e, 0x1a, 0x1c, 0x77, 0xb5, 0xd6, 0x03, 0x7c, 0xce, 0xb3, 0x52, 0xda, 0x18, 0x1c, 0x3f, - 0x60, 0x2e, 0xce, 0x86, 0x11, 0x9f, 0x30, 0x8c, 0xc4, 0xc8, 0x30, 0xd0, 0x0d, 0x28, 0xf4, 0xf5, - 0x36, 0x56, 0xd4, 0x76, 0xdb, 0xc4, 0x16, 0x4b, 0xb6, 0x39, 0xae, 0x39, 0x4f, 0x7a, 0x6a, 0xac, - 0x43, 0xfa, 0x46, 0x00, 0x34, 0x9e, 0x19, 0xd1, 0x01, 0x2c, 0x78, 0xc9, 0xd5, 0xc9, 0xd8, 0x2c, - 0x7c, 0xae, 0x46, 0x67, 0xd6, 0xc0, 0xe1, 0x59, 0x3c, 0x0b, 0x92, 0x49, 0x15, 0xb6, 0xe4, 0x85, - 0x2a, 0x83, 0xce, 0x97, 0x1a, 0x25, 0x3e, 0xa3, 0x51, 0x62, 0x32, 0x72, 0xe5, 0xdd, 0x9e, 0xd1, - 0x50, 0x9a, 0x18, 0xbb, 0xfa, 0x30, 0xa0, 0xd2, 0x1c, 0x13, 0xe3, 0xf3, 0x8c, 0x1a, 0x92, 0xf0, - 0x34, 0x43, 0x92, 0xee, 0x80, 0xf8, 0xb6, 0xfb, 0x7d, 0xfe, 0xa5, 0x91, 0x61, 0x0a, 0x63, 0xc3, - 0x3c, 0x83, 0x2c, 0x89, 0xbe, 0x34, 0x69, 0xfc, 0x0f, 0xe4, 0x5c, 0xeb, 0xb9, 0x2f, 0x4b, 0x22, - 0xcd, 0xce, 0x47, 0xe2, 0x89, 0xa0, 0x5b, 0xb0, 0x40, 0xf2, 0x86, 0x73, 0x01, 0xc9, 0xe0, 0xaf, - 0x38, 0x8d, 0x86, 0x65, 0xd6, 0xb1, 0xe3, 0x60, 0x36, 0x24, 0xc1, 0x8b, 0xac, 0x10, 0xc0, 0xed, - 0x7f, 0xc5, 0x00, 0xc8, 0x79, 0x69, 0x04, 0x05, 0x64, 0x6b, 0x58, 0x0c, 0x54, 0x22, 0xd2, 0x8f, - 0xe2, 0x90, 0xf7, 0x5d, 0x88, 0xa0, 0xff, 0x0c, 0x14, 0x55, 0xab, 0x93, 0x2e, 0x4f, 0x7c, 0x15, - 0x55, 0x60, 0x62, 0xf1, 0xf9, 0x27, 0x16, 0x75, 0x15, 0xe5, 0xdc, 0x8b, 0x26, 0xe7, 0xbe, 0x17, - 0x7d, 0x11, 0x90, 0xad, 0xdb, 0x6a, 0x97, 0x24, 0x6f, 0xad, 0x7f, 0xaa, 0xb0, 0xdd, 0xce, 0x12, - 0x88, 0x48, 0x7b, 0x8e, 0x68, 0xc7, 0x3e, 0xa1, 0x4b, 0x3f, 0x14, 0x20, 0xeb, 0x9e, 0xea, 0xe7, - 0x7d, 0xb1, 0x71, 0x11, 0xd2, 0xfc, 0xe0, 0xca, 0x9e, 0x6c, 0xf0, 0x56, 0xe8, 0xb5, 0x70, 0x15, - 0xb2, 0x3d, 0x6c, 0xab, 0x34, 0x1b, 0x32, 0xec, 0xc7, 0x6d, 0xdf, 0x7a, 0x03, 0xf2, 0xbe, 0xc7, - 0x2d, 0x24, 0x41, 0xee, 0x36, 0xde, 0x11, 0x63, 0xd5, 0xcc, 0xe7, 0x5f, 0xae, 0x26, 0x76, 0xf1, - 0x27, 0xa8, 0x42, 0xa2, 0x6a, 0x7d, 0xab, 0x51, 0x7f, 0x20, 0x0a, 0xd5, 0xfc, 0xe7, 0x5f, 0xae, - 0x66, 0x64, 0x4c, 0xaf, 0x14, 0x6e, 0x3d, 0x80, 0xf2, 0xc8, 0xc2, 0x04, 0x43, 0x35, 0x82, 0xd2, - 0xe6, 0xe1, 0xfe, 0xce, 0x76, 0xbd, 0xd6, 0x6c, 0x28, 0x47, 0x7b, 0xcd, 0x86, 0x28, 0xa0, 0x4b, - 0xb0, 0xb8, 0xb3, 0xfd, 0x7f, 0x5b, 0x4d, 0xa5, 0xbe, 0xb3, 0xdd, 0xd8, 0x6d, 0x2a, 0xb5, 0x66, - 0xb3, 0x56, 0x7f, 0x20, 0xc6, 0x6f, 0xff, 0x09, 0xa0, 0x5c, 0xdb, 0xa8, 0x6f, 0x93, 0xa3, 0xbb, - 0xd6, 0x52, 0x69, 0xe0, 0xaf, 0x43, 0x92, 0x22, 0xad, 0x13, 0x9f, 0xb9, 0x56, 0x27, 0xdf, 0x13, - 0xa1, 0x7b, 0x90, 0xa2, 0x20, 0x2c, 0x9a, 0xfc, 0xee, 0xb5, 0x3a, 0xe5, 0xe2, 0x88, 0x0c, 0x86, - 0x6e, 0xa7, 0x89, 0x0f, 0x61, 0xab, 0x93, 0xef, 0x91, 0xd0, 0x0e, 0x64, 0x1c, 0x7c, 0x6b, 0xda, - 0xeb, 0xd4, 0xea, 0xd4, 0xcb, 0x1d, 0x32, 0x35, 0x86, 0x13, 0x4e, 0x7e, 0x23, 0x5b, 0x9d, 0x72, - 0xc3, 0x84, 0x64, 0xc8, 0x79, 0xd0, 0xee, 0xf4, 0xe7, 0xba, 0xd5, 0x19, 0x6e, 0xbc, 0xd0, 0x07, - 0x50, 0x0c, 0x22, 0x61, 0xb3, 0xbd, 0xa4, 0xad, 0xce, 0x78, 0x1b, 0x45, 0xf4, 0x07, 0x61, 0xb1, - 0xd9, 0x5e, 0xd6, 0x56, 0x67, 0xbc, 0x9c, 0x42, 0x1f, 0xc1, 0xc2, 0x38, 0x6c, 0x35, 0xfb, 0x43, - 0xdb, 0xea, 0x1c, 0xd7, 0x55, 0xa8, 0x07, 0x28, 0x04, 0xee, 0x9a, 0xe3, 0xdd, 0x6d, 0x75, 0x9e, - 0xdb, 0x2b, 0xd4, 0x86, 0xf2, 0x28, 0x84, 0x34, 0xeb, 0x3b, 0xdc, 0xea, 0xcc, 0x37, 0x59, 0xec, - 0x2b, 0x41, 0xac, 0x63, 0xd6, 0x77, 0xb9, 0xd5, 0x99, 0x2f, 0xb6, 0xd0, 0x21, 0x80, 0xef, 0xbc, - 0x3c, 0xc3, 0x3b, 0xdd, 0xea, 0x2c, 0x57, 0x5c, 0xc8, 0x80, 0xc5, 0xb0, 0x03, 0xf2, 0x3c, 0xcf, - 0x76, 0xab, 0x73, 0xdd, 0x7c, 0x11, 0x7f, 0x0e, 0x1e, 0x75, 0x67, 0x7b, 0xc6, 0x5b, 0x9d, 0xf1, - 0x0a, 0x6c, 0xa3, 0xf1, 0xd5, 0x93, 0x65, 0xe1, 0xeb, 0x27, 0xcb, 0xc2, 0x37, 0x4f, 0x96, 0x85, - 0x2f, 0xbe, 0x5d, 0x8e, 0x7d, 0xfd, 0xed, 0x72, 0xec, 0x8f, 0xdf, 0x2e, 0xc7, 0xfe, 0xff, 0x85, - 0x53, 0xcd, 0xee, 0x0c, 0x8e, 0xd7, 0x5a, 0x7a, 0x6f, 0xdd, 0xff, 0xff, 0x86, 0xb0, 0xbf, 0x6b, - 0x1c, 0xa7, 0x69, 0x16, 0xbc, 0xf3, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x68, 0x23, 0xdd, - 0xce, 0x31, 0x00, 0x00, + // 3616 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5b, 0x4d, 0x70, 0x1b, 0x57, + 0x72, 0xc6, 0xe0, 0x1f, 0x8d, 0x5f, 0x3e, 0x52, 0x12, 0x04, 0x49, 0x24, 0x3d, 0x8a, 0x2d, 0x59, + 0xb6, 0x49, 0x5b, 0x8a, 0x2d, 0x3b, 0x76, 0x92, 0x02, 0x41, 0x28, 0xa0, 0x44, 0x91, 0xf4, 0x10, + 0xa4, 0xcb, 0x71, 0xec, 0xa9, 0x21, 0xf0, 0x48, 0x8c, 0x05, 0x60, 0xc6, 0x33, 0x03, 0x0a, 0xf4, + 0x35, 0x71, 0x0e, 0x3e, 0xf9, 0x96, 0x93, 0x8f, 0x39, 0xe6, 0x92, 0x53, 0x2a, 0x87, 0xa4, 0xf6, + 0xe6, 0xad, 0xbd, 0xf8, 0xb8, 0x97, 0xd5, 0xba, 0xe4, 0xcb, 0xd6, 0xde, 0xf6, 0xb4, 0xb7, 0xad, + 0xad, 0xf7, 0x33, 0xbf, 0x98, 0xc1, 0x8f, 0xe5, 0xaa, 0xad, 0xbd, 0xe1, 0xf5, 0xeb, 0xee, 0x79, + 0x3f, 0xfd, 0xba, 0xfb, 0x7d, 0xfd, 0x00, 0xd7, 0x2c, 0x3c, 0xec, 0x62, 0x63, 0xa0, 0x0e, 0xad, + 0x4d, 0xe5, 0xa4, 0xa3, 0x6e, 0x5a, 0x17, 0x3a, 0x36, 0x37, 0x74, 0x43, 0xb3, 0x34, 0x54, 0x76, + 0x3b, 0x37, 0x48, 0x67, 0xed, 0x86, 0x87, 0xbb, 0x63, 0x5c, 0xe8, 0x96, 0xb6, 0xa9, 0x1b, 0x9a, + 0x76, 0xca, 0xf8, 0x6b, 0x5e, 0x65, 0x54, 0xcf, 0x66, 0x57, 0x31, 0x7b, 0xbc, 0xf3, 0xfa, 0x44, + 0xe7, 0x49, 0x5f, 0xeb, 0x3c, 0x89, 0xec, 0xf5, 0x0c, 0xc4, 0xd7, 0xcb, 0xbf, 0xfb, 0x04, 0x5f, + 0xd8, 0xbd, 0x37, 0x26, 0x64, 0x75, 0xc5, 0x50, 0x06, 0x76, 0xf7, 0xaa, 0xa7, 0xfb, 0x1c, 0x1b, + 0xa6, 0xaa, 0x0d, 0x7d, 0xca, 0xd7, 0xce, 0x34, 0xed, 0xac, 0x8f, 0x37, 0x69, 0xeb, 0x64, 0x74, + 0xba, 0x69, 0xa9, 0x03, 0x6c, 0x5a, 0xca, 0x40, 0xe7, 0x0c, 0x2b, 0x67, 0xda, 0x99, 0x46, 0x7f, + 0x6e, 0x92, 0x5f, 0x8c, 0x2a, 0x7e, 0x95, 0x83, 0x8c, 0x84, 0xbf, 0x18, 0x61, 0xd3, 0x42, 0x77, + 0x21, 0x89, 0x3b, 0x3d, 0xad, 0x2a, 0xac, 0x0b, 0xb7, 0xf3, 0x77, 0xaf, 0x6f, 0x04, 0xd6, 0x6d, + 0x83, 0xf3, 0x35, 0x3b, 0x3d, 0xad, 0x15, 0x93, 0x28, 0x2f, 0x7a, 0x1b, 0x52, 0xa7, 0xfd, 0x91, + 0xd9, 0xab, 0xc6, 0xa9, 0xd0, 0x8d, 0x28, 0xa1, 0x07, 0x84, 0xa9, 0x15, 0x93, 0x18, 0x37, 0xf9, + 0x94, 0x3a, 0x3c, 0xd5, 0xaa, 0x89, 0xe9, 0x9f, 0xda, 0x19, 0x9e, 0xd2, 0x4f, 0x11, 0x5e, 0xb4, + 0x05, 0xa0, 0x0e, 0x55, 0x4b, 0xee, 0xf4, 0x14, 0x75, 0x58, 0x4d, 0x52, 0xc9, 0x97, 0xa2, 0x25, + 0x55, 0xab, 0x41, 0x18, 0x5b, 0x31, 0x29, 0xa7, 0xda, 0x0d, 0x32, 0xdc, 0x2f, 0x46, 0xd8, 0xb8, + 0xa8, 0xa6, 0xa6, 0x0f, 0xf7, 0x43, 0xc2, 0x44, 0x86, 0x4b, 0xb9, 0xd1, 0x07, 0x90, 0xed, 0xf4, + 0x70, 0xe7, 0x89, 0x6c, 0x8d, 0xab, 0x19, 0x2a, 0xb9, 0x16, 0x25, 0xd9, 0x20, 0x7c, 0xed, 0x71, + 0x2b, 0x26, 0x65, 0x3a, 0xec, 0x27, 0xda, 0x83, 0x52, 0x5f, 0x35, 0x2d, 0xd9, 0x1c, 0x2a, 0xba, + 0xd9, 0xd3, 0x2c, 0xb3, 0x9a, 0xa7, 0x3a, 0x5e, 0x8e, 0xd2, 0xb1, 0xab, 0x9a, 0xd6, 0xa1, 0xcd, + 0xdc, 0x8a, 0x49, 0xc5, 0xbe, 0x97, 0x40, 0xf4, 0x69, 0xa7, 0xa7, 0xd8, 0x70, 0x14, 0x56, 0x0b, + 0xd3, 0xf5, 0xed, 0x13, 0x6e, 0x5b, 0x9e, 0xe8, 0xd3, 0xbc, 0x04, 0xf4, 0x09, 0x2c, 0xf7, 0x35, + 0xa5, 0xeb, 0xa8, 0x93, 0x3b, 0xbd, 0xd1, 0xf0, 0x49, 0xb5, 0x48, 0x95, 0xbe, 0x1a, 0x39, 0x48, + 0x4d, 0xe9, 0xda, 0x2a, 0x1a, 0x44, 0xa0, 0x15, 0x93, 0x96, 0xfa, 0x41, 0x22, 0xfa, 0x0c, 0x56, + 0x14, 0x5d, 0xef, 0x5f, 0x04, 0xb5, 0x97, 0xa8, 0xf6, 0x3b, 0x51, 0xda, 0xeb, 0x44, 0x26, 0xa8, + 0x1e, 0x29, 0x13, 0x54, 0xd4, 0x86, 0x8a, 0x6e, 0x60, 0x5d, 0x31, 0xb0, 0xac, 0x1b, 0x9a, 0xae, + 0x99, 0x4a, 0xbf, 0x5a, 0xa6, 0xba, 0x6f, 0x45, 0xe9, 0x3e, 0x60, 0xfc, 0x07, 0x9c, 0xbd, 0x15, + 0x93, 0xca, 0xba, 0x9f, 0xc4, 0xb4, 0x6a, 0x1d, 0x6c, 0x9a, 0xae, 0xd6, 0xca, 0x2c, 0xad, 0x94, + 0xdf, 0xaf, 0xd5, 0x47, 0x42, 0x4d, 0xc8, 0xe3, 0x31, 0x11, 0x97, 0xcf, 0x35, 0x0b, 0x57, 0x97, + 0xa8, 0x42, 0x31, 0xf2, 0x9c, 0x51, 0xd6, 0x63, 0xcd, 0xc2, 0xad, 0x98, 0x04, 0xd8, 0x69, 0x21, + 0x05, 0x2e, 0x9d, 0x63, 0x43, 0x3d, 0xbd, 0xa0, 0x6a, 0x64, 0xda, 0x43, 0xfc, 0x41, 0x15, 0x51, + 0x85, 0xaf, 0x45, 0x29, 0x3c, 0xa6, 0x42, 0x44, 0x45, 0xd3, 0x16, 0x69, 0xc5, 0xa4, 0xe5, 0xf3, + 0x49, 0x32, 0x31, 0xb1, 0x53, 0x75, 0xa8, 0xf4, 0xd5, 0x2f, 0xb1, 0x4c, 0x1d, 0x5c, 0x75, 0x79, + 0xba, 0x89, 0x3d, 0xe0, 0xdc, 0x5b, 0x84, 0x99, 0x98, 0xd8, 0xa9, 0x97, 0xb0, 0x95, 0x81, 0xd4, + 0xb9, 0xd2, 0x1f, 0xe1, 0x87, 0xc9, 0x6c, 0xba, 0x92, 0x79, 0x98, 0xcc, 0x66, 0x2b, 0xb9, 0x87, + 0xc9, 0x6c, 0xae, 0x02, 0x0f, 0x93, 0x59, 0xa8, 0xe4, 0xc5, 0x5b, 0x90, 0xf7, 0xb8, 0x17, 0x54, + 0x85, 0xcc, 0x00, 0x9b, 0xa6, 0x72, 0x86, 0xa9, 0x37, 0xca, 0x49, 0x76, 0x53, 0x2c, 0x41, 0xc1, + 0xeb, 0x52, 0xc4, 0x6f, 0x04, 0x47, 0x92, 0x78, 0x0b, 0x22, 0xc9, 0xdd, 0xa3, 0x2d, 0xc9, 0x9b, + 0xe8, 0x26, 0x14, 0xe9, 0x54, 0x64, 0xbb, 0x9f, 0xb8, 0xac, 0xa4, 0x54, 0xa0, 0xc4, 0x63, 0xce, + 0xb4, 0x06, 0x79, 0xfd, 0xae, 0xee, 0xb0, 0x24, 0x28, 0x0b, 0xe8, 0x77, 0x75, 0x9b, 0xe1, 0x25, + 0x28, 0x90, 0x79, 0x3b, 0x1c, 0x49, 0xfa, 0x91, 0x3c, 0xa1, 0x71, 0x16, 0xf1, 0xdf, 0x12, 0x50, + 0x09, 0xba, 0x21, 0xf4, 0x2e, 0x24, 0x89, 0x47, 0xe6, 0xce, 0xb5, 0xb6, 0xc1, 0xdc, 0xf5, 0x86, + 0xed, 0xae, 0x37, 0xda, 0xb6, 0xbb, 0xde, 0xca, 0x7e, 0xf7, 0x6c, 0x2d, 0xf6, 0xcd, 0x6f, 0xd7, + 0x04, 0x89, 0x4a, 0xa0, 0xab, 0xc4, 0xf9, 0x28, 0xea, 0x50, 0x56, 0xbb, 0x74, 0xc8, 0x39, 0xe2, + 0x59, 0x14, 0x75, 0xb8, 0xd3, 0x45, 0xbb, 0x50, 0xe9, 0x68, 0x43, 0x13, 0x0f, 0xcd, 0x91, 0x29, + 0xb3, 0x70, 0xc1, 0x5d, 0xaa, 0xcf, 0x31, 0xb2, 0x38, 0xd1, 0xb0, 0x39, 0x0f, 0x28, 0xa3, 0x54, + 0xee, 0xf8, 0x09, 0x68, 0x0f, 0x8a, 0xe7, 0x4a, 0x5f, 0xed, 0x2a, 0x96, 0x66, 0xc8, 0x26, 0xb6, + 0xb8, 0x8f, 0xbd, 0x39, 0xb1, 0xe7, 0xc7, 0x36, 0xd7, 0x21, 0xb6, 0x8e, 0xf4, 0xae, 0x62, 0xe1, + 0xad, 0xe4, 0x77, 0xcf, 0xd6, 0x04, 0xa9, 0x70, 0xee, 0xe9, 0x41, 0xaf, 0x40, 0x59, 0xd1, 0x75, + 0xd9, 0xb4, 0x14, 0x0b, 0xcb, 0x27, 0x17, 0x16, 0x36, 0xa9, 0xdb, 0x2d, 0x48, 0x45, 0x45, 0xd7, + 0x0f, 0x09, 0x75, 0x8b, 0x10, 0xd1, 0xcb, 0x50, 0x22, 0x1e, 0x5a, 0x55, 0xfa, 0x72, 0x0f, 0xab, + 0x67, 0x3d, 0xab, 0x9a, 0x5e, 0x17, 0x6e, 0x27, 0xa4, 0x22, 0xa7, 0xb6, 0x28, 0x11, 0x6d, 0xc0, + 0xb2, 0xcd, 0xd6, 0xd1, 0x0c, 0x6c, 0xf3, 0x12, 0x7f, 0x5c, 0x94, 0x96, 0x78, 0x57, 0x43, 0x33, + 0x30, 0xe3, 0x17, 0xbb, 0x8e, 0xa5, 0x50, 0x6f, 0x8e, 0x10, 0x24, 0xbb, 0x8a, 0xa5, 0xd0, 0x1d, + 0x28, 0x48, 0xf4, 0x37, 0xa1, 0xe9, 0x8a, 0xd5, 0xe3, 0xeb, 0x4a, 0x7f, 0xa3, 0xcb, 0x90, 0xe6, + 0xaa, 0x13, 0x74, 0x18, 0xbc, 0x85, 0x56, 0x20, 0xa5, 0x1b, 0xda, 0x39, 0xa6, 0xcb, 0x92, 0x95, + 0x58, 0x43, 0x94, 0xa0, 0xe4, 0xf7, 0xfc, 0xa8, 0x04, 0x71, 0x6b, 0xcc, 0xbf, 0x12, 0xb7, 0xc6, + 0xe8, 0x4d, 0x48, 0x92, 0x0d, 0xa0, 0xdf, 0x28, 0x85, 0xc4, 0x3a, 0x2e, 0xd7, 0xbe, 0xd0, 0xb1, + 0x44, 0x39, 0xc5, 0xcb, 0xb0, 0x12, 0x16, 0x09, 0xc4, 0x9e, 0x43, 0xf7, 0x79, 0x74, 0xf4, 0x36, + 0x64, 0x9d, 0x50, 0xc0, 0xec, 0xeb, 0xea, 0xc4, 0x57, 0x6c, 0x66, 0xc9, 0x61, 0x25, 0x86, 0x45, + 0xf6, 0xa7, 0xa7, 0xf0, 0xf0, 0x5d, 0x90, 0x32, 0x8a, 0xae, 0xb7, 0x14, 0xb3, 0x27, 0x9e, 0x41, + 0x35, 0xca, 0xcd, 0x7b, 0xd6, 0x47, 0xa0, 0xa7, 0xc3, 0x5e, 0x1f, 0xcf, 0xc9, 0x8b, 0xd3, 0x3d, + 0x71, 0x4e, 0x1e, 0xb5, 0xe0, 0xd1, 0xf0, 0x09, 0xb1, 0xe0, 0x04, 0xfb, 0x10, 0x6d, 0xef, 0x74, + 0xc5, 0x2e, 0x5c, 0x8d, 0xf4, 0xf8, 0x3e, 0x39, 0xc1, 0x27, 0x47, 0x36, 0x83, 0xc5, 0x11, 0x36, + 0x70, 0xd6, 0x20, 0x43, 0x33, 0xe9, 0xbc, 0xe9, 0x67, 0x72, 0x12, 0x6f, 0x89, 0x7f, 0x48, 0xc2, + 0xe5, 0x70, 0xe7, 0x8f, 0xd6, 0xa1, 0x30, 0x50, 0xc6, 0xb2, 0x35, 0xe6, 0x16, 0x2a, 0xd0, 0x3d, + 0x87, 0x81, 0x32, 0x6e, 0x8f, 0x99, 0x79, 0x56, 0x20, 0x61, 0x8d, 0xcd, 0x6a, 0x7c, 0x3d, 0x71, + 0xbb, 0x20, 0x91, 0x9f, 0xe8, 0x31, 0x2c, 0xf5, 0xb5, 0x8e, 0xd2, 0x97, 0xfb, 0x8a, 0x69, 0xc9, + 0x1d, 0x6d, 0x30, 0x50, 0x2d, 0x7e, 0xee, 0xae, 0x4d, 0x6e, 0x2f, 0xed, 0x26, 0xbe, 0x89, 0x1e, + 0x92, 0x98, 0x54, 0xa6, 0xb2, 0xbb, 0x8a, 0x69, 0xb1, 0x2e, 0xb4, 0x0d, 0xf9, 0x81, 0x6a, 0x9e, + 0xe0, 0x9e, 0x72, 0xae, 0x6a, 0x46, 0x35, 0xb9, 0x9e, 0x08, 0xcd, 0x89, 0x1e, 0xbb, 0x3c, 0x5c, + 0x93, 0x57, 0xcc, 0xb3, 0x2d, 0x29, 0x9f, 0xd9, 0xda, 0x8e, 0x27, 0xbd, 0xb0, 0xe3, 0x79, 0x13, + 0x56, 0x86, 0x78, 0x6c, 0xc9, 0xce, 0xa1, 0x36, 0x99, 0xad, 0x64, 0xe8, 0x92, 0x23, 0xd2, 0xe7, + 0x78, 0x02, 0x93, 0x98, 0x0d, 0xd9, 0x15, 0x43, 0x1b, 0x0d, 0xbb, 0xd5, 0xec, 0xba, 0x70, 0x3b, + 0x25, 0xb1, 0x06, 0xba, 0x0f, 0x55, 0x7a, 0x60, 0x99, 0x17, 0x23, 0xde, 0x16, 0x77, 0xed, 0xd3, + 0x9b, 0xa3, 0x96, 0x72, 0x89, 0xf4, 0x53, 0x3f, 0xb9, 0x4b, 0x7b, 0xf9, 0x89, 0xdf, 0x84, 0x15, + 0x16, 0x7d, 0xb1, 0x41, 0xc2, 0x30, 0xd9, 0x24, 0x3a, 0x00, 0xa0, 0x03, 0x58, 0xb2, 0xfb, 0x0e, + 0x0c, 0xad, 0x3d, 0xa6, 0xdf, 0x7f, 0xd3, 0x11, 0xe8, 0xca, 0xc4, 0xb4, 0x6d, 0x7b, 0xcc, 0x53, + 0x43, 0x45, 0x76, 0x5f, 0x5d, 0x77, 0xdc, 0xf9, 0x7d, 0xd7, 0x68, 0x0b, 0x93, 0x29, 0x21, 0xef, + 0x72, 0x5d, 0xa7, 0x6b, 0xd3, 0x6b, 0x90, 0xff, 0x62, 0xa4, 0x19, 0xa3, 0x01, 0x1b, 0x52, 0x91, + 0x0e, 0x09, 0x18, 0x89, 0x1e, 0xa1, 0xff, 0x4f, 0x79, 0x6c, 0xce, 0x9f, 0x07, 0x70, 0x8b, 0x12, + 0x5c, 0x8b, 0x3a, 0xf4, 0x0c, 0xdc, 0x6b, 0x54, 0xf1, 0x79, 0x8d, 0xca, 0x99, 0x5b, 0xb4, 0x5d, + 0x25, 0x7e, 0x9a, 0x5d, 0x21, 0x48, 0xd2, 0x19, 0x26, 0x99, 0xdb, 0x24, 0xbf, 0x23, 0x6d, 0xcd, + 0xd9, 0xff, 0xb4, 0x77, 0xff, 0x6d, 0x0b, 0xcc, 0xfc, 0x6c, 0x16, 0x98, 0x8d, 0xb4, 0xc0, 0x9f, + 0x6c, 0x6b, 0x6d, 0xb8, 0x1c, 0x10, 0x94, 0x47, 0x34, 0xb4, 0x51, 0x6b, 0x0b, 0x24, 0xfc, 0x76, + 0x40, 0xf5, 0x28, 0x92, 0x96, 0x7d, 0x7a, 0x59, 0x58, 0x8c, 0xb4, 0xe0, 0xfc, 0xa2, 0x16, 0x5c, + 0x98, 0xc7, 0x82, 0x8b, 0x2f, 0x62, 0xc1, 0xa5, 0x09, 0x0b, 0x3e, 0x82, 0xa5, 0x89, 0x54, 0xd4, + 0x31, 0x07, 0x21, 0xd4, 0x1c, 0xe2, 0xe1, 0xe6, 0x90, 0xf0, 0x98, 0x83, 0xf8, 0x83, 0x00, 0xb5, + 0xe8, 0x8c, 0x34, 0xf4, 0x03, 0x6f, 0xc1, 0x25, 0x37, 0x33, 0xf1, 0xae, 0x23, 0xf3, 0xfe, 0xc8, + 0xe9, 0x74, 0x17, 0x72, 0x4a, 0x14, 0x67, 0x63, 0x4a, 0x7a, 0x4d, 0xf4, 0x31, 0x94, 0xfd, 0xb9, + 0x34, 0x49, 0x55, 0xc8, 0x71, 0xf9, 0x9b, 0x89, 0xe3, 0xe2, 0xae, 0x85, 0x33, 0x66, 0xa9, 0x74, + 0xee, 0x6d, 0x9a, 0xe2, 0xaf, 0xe2, 0x4e, 0xa4, 0xf6, 0x25, 0xc6, 0xe8, 0x3d, 0x48, 0xf3, 0x93, + 0x2d, 0xcc, 0x7b, 0xb2, 0xb9, 0x40, 0xf0, 0x34, 0xc7, 0x5f, 0xec, 0x34, 0x27, 0x42, 0xb7, 0x2f, + 0x19, 0xbe, 0x54, 0x29, 0xef, 0x52, 0xbd, 0x01, 0x29, 0x76, 0x23, 0x60, 0x01, 0xe5, 0xca, 0xe4, + 0xb9, 0xa0, 0x53, 0x95, 0x18, 0x17, 0xaa, 0x43, 0x96, 0x65, 0xdd, 0x6a, 0x97, 0x3b, 0x80, 0xab, + 0x11, 0x12, 0x3b, 0xdb, 0x5b, 0xf9, 0xe7, 0xcf, 0xd6, 0x32, 0xbc, 0x21, 0x65, 0xa8, 0xdc, 0x4e, + 0x57, 0xfc, 0x45, 0x0e, 0xb2, 0x12, 0x36, 0x75, 0x62, 0xc2, 0x68, 0x0b, 0x72, 0x78, 0xdc, 0xc1, + 0xba, 0x65, 0x67, 0xf8, 0xe1, 0x37, 0x28, 0xc6, 0xdd, 0xb4, 0x39, 0x5b, 0x31, 0xc9, 0x15, 0x43, + 0xf7, 0x38, 0xd0, 0x11, 0x8d, 0x59, 0x70, 0x71, 0x2f, 0xd2, 0xf1, 0x8e, 0x8d, 0x74, 0xb0, 0x40, + 0xbf, 0x1a, 0x29, 0x15, 0x80, 0x3a, 0xee, 0x71, 0xa8, 0x23, 0x39, 0xe3, 0x63, 0x3e, 0xac, 0xa3, + 0xe1, 0xc3, 0x3a, 0x52, 0x33, 0xa6, 0x19, 0x01, 0x76, 0xbc, 0x63, 0x83, 0x1d, 0xe9, 0x19, 0x23, + 0x0e, 0xa0, 0x1d, 0x7f, 0x3f, 0x81, 0x76, 0xac, 0x47, 0x8a, 0x86, 0xc0, 0x1d, 0xfb, 0x13, 0x70, + 0x47, 0x96, 0x2a, 0x79, 0x25, 0x52, 0xc9, 0x0c, 0xbc, 0x63, 0x7f, 0x02, 0xef, 0xc8, 0xcd, 0x50, + 0x38, 0x03, 0xf0, 0xf8, 0x97, 0x70, 0xc0, 0x03, 0x22, 0x21, 0x09, 0x3e, 0xcc, 0xf9, 0x10, 0x0f, + 0x39, 0x02, 0xf1, 0xc8, 0x47, 0xde, 0xce, 0x99, 0xfa, 0xb9, 0x21, 0x8f, 0xa3, 0x10, 0xc8, 0x83, + 0x25, 0x2f, 0xb7, 0x23, 0x95, 0xcf, 0x81, 0x79, 0x1c, 0x85, 0x60, 0x1e, 0xc5, 0x99, 0x6a, 0x67, + 0x82, 0x1e, 0x0f, 0xfc, 0xa0, 0x47, 0x29, 0xe2, 0x4e, 0xe9, 0x1e, 0xd9, 0x08, 0xd4, 0xe3, 0x24, + 0x0a, 0xf5, 0x60, 0x68, 0xcf, 0xeb, 0x91, 0x1a, 0x17, 0x80, 0x3d, 0xf6, 0x27, 0x60, 0x8f, 0xca, + 0x0c, 0x4b, 0x9b, 0x13, 0xf7, 0x10, 0x5f, 0x25, 0xb1, 0x34, 0xe0, 0x94, 0x88, 0x83, 0xc5, 0x86, + 0xa1, 0x19, 0x1c, 0xa9, 0x60, 0x0d, 0xf1, 0x36, 0xb9, 0xb7, 0xba, 0x0e, 0x68, 0x0a, 0x16, 0x52, + 0x86, 0xa2, 0xcf, 0xe9, 0x88, 0xff, 0x23, 0xb8, 0xb2, 0x14, 0x0d, 0xf1, 0xde, 0x79, 0x73, 0xfc, + 0xce, 0x1b, 0xb8, 0xa7, 0xe5, 0x7c, 0x19, 0x81, 0x37, 0xe7, 0xe0, 0xe0, 0x87, 0xe2, 0xe6, 0x1a, + 0x77, 0x60, 0x89, 0x66, 0xa7, 0xcc, 0xa3, 0xfb, 0x82, 0x46, 0x99, 0x74, 0xb0, 0x55, 0x60, 0xd1, + 0xe3, 0x0d, 0x58, 0xf6, 0xf0, 0x3a, 0x17, 0x4d, 0x86, 0x00, 0x54, 0x1c, 0xee, 0x3a, 0xbf, 0x71, + 0xfe, 0x2e, 0xee, 0xae, 0x90, 0x8b, 0x9a, 0x84, 0x01, 0x1c, 0xc2, 0x4f, 0x06, 0x38, 0xa2, 0x2f, + 0xbc, 0xe8, 0x13, 0x58, 0xf1, 0x61, 0x1f, 0x76, 0xf2, 0x97, 0x58, 0x0c, 0x02, 0x89, 0x79, 0x72, + 0x11, 0xa7, 0x07, 0x7d, 0x0a, 0xd7, 0x68, 0x1a, 0x1b, 0x91, 0x60, 0x26, 0xe7, 0x4b, 0x30, 0xaf, + 0x10, 0x1d, 0x8d, 0x90, 0x24, 0x33, 0x02, 0x18, 0x49, 0x45, 0x01, 0x23, 0x7f, 0x14, 0x5c, 0xbb, + 0x71, 0xa0, 0x91, 0x8e, 0xd6, 0x65, 0xf6, 0x55, 0x94, 0xe8, 0x6f, 0x72, 0x49, 0xe9, 0x6b, 0x67, + 0xdc, 0x44, 0xc8, 0x4f, 0xc2, 0xe5, 0x80, 0xf6, 0x39, 0x1e, 0xa8, 0x56, 0x20, 0xa5, 0x0e, 0xbb, + 0x78, 0xcc, 0xad, 0x80, 0x35, 0x88, 0xec, 0x13, 0x7c, 0xc1, 0xf7, 0x9a, 0xfc, 0x24, 0x7c, 0xf4, + 0x20, 0xd0, 0x58, 0x54, 0x90, 0x58, 0x03, 0xbd, 0x0b, 0x39, 0x5a, 0x79, 0x91, 0x35, 0xdd, 0xe4, + 0xa1, 0xc6, 0x97, 0x11, 0xb1, 0x2a, 0xc9, 0xc6, 0x01, 0xe1, 0xd9, 0xd7, 0x4d, 0x29, 0xab, 0xf3, + 0x5f, 0x9e, 0x9c, 0x25, 0xeb, 0xcb, 0x59, 0xae, 0x43, 0x8e, 0x8c, 0xde, 0xd4, 0x95, 0x0e, 0xa6, + 0x61, 0x22, 0x27, 0xb9, 0x04, 0xf1, 0xff, 0x04, 0x28, 0x07, 0x22, 0x57, 0xe8, 0xdc, 0xed, 0x63, + 0x13, 0xf7, 0x43, 0x45, 0x13, 0xb3, 0xbf, 0x01, 0x70, 0xa6, 0x98, 0xf2, 0x53, 0x65, 0x68, 0xe1, + 0x2e, 0x5f, 0x82, 0xdc, 0x99, 0x62, 0x7e, 0x44, 0x09, 0xfe, 0xc1, 0xa4, 0x02, 0x83, 0xf1, 0x80, + 0x15, 0x69, 0x2f, 0x58, 0x81, 0x6a, 0x90, 0xd5, 0x0d, 0x55, 0x33, 0x54, 0xeb, 0x82, 0xae, 0x49, + 0x42, 0x72, 0xda, 0xe2, 0x01, 0x5c, 0x0a, 0x0d, 0x9a, 0xe8, 0x3e, 0xe4, 0xdc, 0x78, 0x2b, 0xd0, + 0xdc, 0x70, 0x0a, 0x06, 0xe4, 0xf2, 0x92, 0x25, 0xb9, 0x14, 0x1a, 0x36, 0x51, 0x13, 0xd2, 0x06, + 0x36, 0x47, 0x7d, 0x96, 0xab, 0x96, 0xee, 0xbe, 0x31, 0x5f, 0xb8, 0x25, 0xd4, 0x51, 0xdf, 0x92, + 0xb8, 0xb0, 0xf8, 0x19, 0xa4, 0x19, 0x05, 0xe5, 0x21, 0x73, 0xb4, 0xf7, 0x68, 0x6f, 0xff, 0xa3, + 0xbd, 0x4a, 0x0c, 0x01, 0xa4, 0xeb, 0x8d, 0x46, 0xf3, 0xa0, 0x5d, 0x11, 0x50, 0x0e, 0x52, 0xf5, + 0xad, 0x7d, 0xa9, 0x5d, 0x89, 0x13, 0xb2, 0xd4, 0x7c, 0xd8, 0x6c, 0xb4, 0x2b, 0x09, 0xb4, 0x04, + 0x45, 0xf6, 0x5b, 0x7e, 0xb0, 0x2f, 0x3d, 0xae, 0xb7, 0x2b, 0x49, 0x0f, 0xe9, 0xb0, 0xb9, 0xb7, + 0xdd, 0x94, 0x2a, 0x29, 0xf1, 0x2d, 0xb8, 0x1a, 0x19, 0xa0, 0x5d, 0x98, 0x48, 0xf0, 0xc0, 0x44, + 0xe2, 0xf7, 0x71, 0x72, 0x03, 0x89, 0x8a, 0xba, 0xe8, 0x61, 0x60, 0xe2, 0x77, 0x17, 0x08, 0xd9, + 0x81, 0xd9, 0xa3, 0x97, 0xa1, 0x64, 0xe0, 0x53, 0x6c, 0x75, 0x7a, 0x2c, 0x0b, 0xb0, 0x71, 0xa4, + 0x22, 0xa7, 0x52, 0x21, 0x93, 0xb1, 0x7d, 0x8e, 0x3b, 0x96, 0xcc, 0x8c, 0xc0, 0xa4, 0xb7, 0xf5, + 0x1c, 0x61, 0x23, 0xd4, 0x43, 0x46, 0x24, 0x0e, 0x9a, 0x39, 0x12, 0xa6, 0x2a, 0x49, 0x55, 0x01, + 0xf5, 0x0b, 0x94, 0x22, 0x3e, 0x5d, 0x68, 0xb1, 0x73, 0x90, 0x92, 0x9a, 0x6d, 0xe9, 0xe3, 0x4a, + 0x02, 0x21, 0x28, 0xd1, 0x9f, 0xf2, 0xe1, 0x5e, 0xfd, 0xe0, 0xb0, 0xb5, 0x4f, 0x16, 0x7b, 0x19, + 0xca, 0xf6, 0x62, 0xdb, 0xc4, 0x14, 0xba, 0x04, 0x4b, 0x8d, 0xfd, 0xc7, 0x07, 0xbb, 0xcd, 0x76, + 0xd3, 0x25, 0xa7, 0xc5, 0xff, 0x4d, 0xc0, 0x95, 0x88, 0x5c, 0x03, 0xbd, 0x0b, 0x60, 0x8d, 0x65, + 0x03, 0x77, 0x34, 0xa3, 0x1b, 0x6d, 0x9c, 0xed, 0xb1, 0x44, 0x39, 0xa4, 0x9c, 0xc5, 0x7f, 0x4d, + 0x75, 0xd8, 0x1f, 0x70, 0xa5, 0x64, 0xb2, 0x26, 0xc7, 0x36, 0x6e, 0x84, 0x5c, 0xd6, 0x70, 0x87, + 0x28, 0xa6, 0x7b, 0x42, 0x15, 0x53, 0x7e, 0xf4, 0x31, 0x5c, 0x09, 0xc4, 0x15, 0xee, 0x8c, 0xcd, + 0xb0, 0xc2, 0x62, 0x78, 0x78, 0xb9, 0xe4, 0x0f, 0x2f, 0xcc, 0x19, 0x9b, 0x53, 0x80, 0x84, 0xd4, + 0x0b, 0x00, 0x09, 0x51, 0xf1, 0x29, 0xbd, 0x28, 0x44, 0x1f, 0x12, 0x9f, 0xc4, 0xff, 0xf6, 0x6d, + 0x9e, 0x3f, 0x7d, 0xdb, 0x87, 0xb4, 0x69, 0x29, 0xd6, 0xc8, 0xe4, 0x87, 0xe1, 0xfe, 0xbc, 0xb9, + 0xe0, 0x86, 0xfd, 0xe3, 0x90, 0x8a, 0x4b, 0x5c, 0xcd, 0x5f, 0xe5, 0x9e, 0x46, 0xad, 0x7e, 0xea, + 0xe7, 0x58, 0xfd, 0xb7, 0xa1, 0xe4, 0x5f, 0xaa, 0xe8, 0xb3, 0xeb, 0x7a, 0xc7, 0xb8, 0xd8, 0x87, + 0xe5, 0x10, 0x28, 0x02, 0xdd, 0xe7, 0xd5, 0x06, 0xb6, 0x5b, 0x37, 0x27, 0xa7, 0xec, 0x63, 0x77, + 0x8b, 0x0e, 0x24, 0x58, 0xb9, 0x39, 0x35, 0xdb, 0x18, 0x97, 0x20, 0x76, 0x00, 0x4d, 0x66, 0xe8, + 0x61, 0xb0, 0x89, 0xf0, 0x02, 0xb0, 0xc9, 0x7f, 0x0a, 0x70, 0x6d, 0x4a, 0xd6, 0x8e, 0x3e, 0x0c, + 0xd8, 0xe2, 0x7b, 0x8b, 0xe4, 0xfc, 0x1b, 0x8c, 0xe6, 0xb7, 0x46, 0xf1, 0x1e, 0x14, 0xbc, 0xf4, + 0xf9, 0x96, 0xfe, 0xdf, 0x3d, 0x31, 0xd3, 0x8f, 0xef, 0xb4, 0x20, 0x8d, 0xcf, 0xf1, 0xd0, 0x89, + 0xc1, 0x97, 0x27, 0xd7, 0x81, 0x74, 0x6f, 0x55, 0x49, 0xae, 0xf8, 0xfb, 0x67, 0x6b, 0x15, 0xc6, + 0xfd, 0xba, 0x36, 0x50, 0x2d, 0x3c, 0xd0, 0xad, 0x0b, 0x89, 0xcb, 0xa3, 0x9b, 0x50, 0x34, 0xb0, + 0x45, 0x5c, 0x88, 0x0f, 0x5a, 0x2b, 0x30, 0x22, 0xcf, 0xe4, 0x7e, 0x29, 0x00, 0xb8, 0x80, 0x91, + 0x0b, 0xd8, 0x08, 0x5e, 0xc0, 0x26, 0x80, 0xf3, 0xc5, 0x83, 0x38, 0x1f, 0xba, 0x05, 0x65, 0x96, + 0xa4, 0x9b, 0xea, 0xd9, 0x50, 0xb1, 0x46, 0x06, 0xe6, 0xf0, 0x50, 0x89, 0x92, 0x0f, 0x6d, 0x2a, + 0xfa, 0x04, 0xae, 0x5a, 0x3d, 0x03, 0x9b, 0x3d, 0xad, 0xdf, 0x95, 0x83, 0x1b, 0xcf, 0xca, 0x16, + 0x6b, 0x33, 0x0c, 0x4e, 0xba, 0xe2, 0x68, 0x38, 0xf6, 0x6f, 0xfe, 0x97, 0x90, 0xa2, 0x6b, 0x43, + 0x12, 0x2d, 0xc7, 0x82, 0x73, 0xdc, 0x38, 0x3f, 0x05, 0x50, 0x2c, 0xcb, 0x50, 0x4f, 0x46, 0xe4, + 0x38, 0xc7, 0x27, 0x3f, 0xe5, 0xae, 0x6d, 0xdd, 0xe6, 0xdb, 0xba, 0xce, 0x17, 0x79, 0xc5, 0x15, + 0xf5, 0x2c, 0xb4, 0x47, 0xa1, 0xb8, 0x07, 0x25, 0xbf, 0xac, 0x9d, 0xc1, 0xb2, 0x31, 0xf8, 0x33, + 0x58, 0x96, 0x11, 0xf3, 0x0c, 0xd6, 0xc9, 0x7f, 0x13, 0xac, 0x28, 0x48, 0x1b, 0xe2, 0x9f, 0x04, + 0x28, 0x78, 0xdd, 0xd4, 0xdc, 0x49, 0x26, 0x4f, 0xba, 0x13, 0x93, 0x49, 0x77, 0x32, 0x32, 0xed, + 0x4c, 0x05, 0xd3, 0xce, 0xab, 0x90, 0x25, 0xdd, 0x23, 0x13, 0x77, 0x79, 0x25, 0x35, 0x73, 0xa6, + 0x98, 0x47, 0x26, 0xee, 0x7a, 0xec, 0x33, 0xf3, 0x82, 0xf6, 0xe9, 0xcb, 0x6d, 0xb3, 0xc1, 0x44, + 0xfb, 0x2b, 0x01, 0xb2, 0xce, 0xe4, 0xfd, 0x05, 0x43, 0x1f, 0xbe, 0xc8, 0xd6, 0x8e, 0x95, 0x0b, + 0xf9, 0xdd, 0x81, 0x95, 0x4f, 0x13, 0x4e, 0xf9, 0xf4, 0x7d, 0x27, 0x1b, 0x8b, 0x42, 0xd0, 0xbc, + 0x2b, 0x6d, 0x83, 0xa6, 0x3c, 0xf9, 0xfc, 0x0f, 0x3e, 0x0e, 0x92, 0x4e, 0xa0, 0xbf, 0x83, 0xb4, + 0xd2, 0x71, 0x70, 0xc3, 0x52, 0x08, 0xa0, 0x66, 0xb3, 0x6e, 0xb4, 0xc7, 0x75, 0xca, 0x29, 0x71, + 0x09, 0x3e, 0xaa, 0xb8, 0x3d, 0x2a, 0xf1, 0x1f, 0x89, 0x5e, 0xc6, 0xe3, 0xf7, 0x19, 0x25, 0x80, + 0xa3, 0xbd, 0xc7, 0xfb, 0xdb, 0x3b, 0x0f, 0x76, 0x9a, 0xdb, 0x3c, 0xdd, 0xda, 0xde, 0x6e, 0x6e, + 0x57, 0xe2, 0x84, 0x4f, 0x6a, 0x3e, 0xde, 0x3f, 0x6e, 0x6e, 0x57, 0x12, 0x62, 0x1d, 0x72, 0x4e, + 0x94, 0xa0, 0xa5, 0x65, 0xed, 0x29, 0x36, 0xf8, 0x02, 0xb1, 0x06, 0x5a, 0x85, 0xfc, 0x24, 0xd6, + 0x4d, 0x2e, 0x4c, 0x0c, 0xe2, 0x16, 0xff, 0x4b, 0x80, 0xb2, 0xa3, 0x83, 0xe7, 0x09, 0xef, 0x43, + 0x46, 0x1f, 0x9d, 0xc8, 0xb6, 0xed, 0x06, 0x10, 0x62, 0xfb, 0x3e, 0x35, 0x3a, 0xe9, 0xab, 0x9d, + 0x47, 0xf8, 0x82, 0x47, 0xa5, 0xb4, 0x3e, 0x3a, 0x79, 0xc4, 0x4c, 0x9c, 0x0d, 0x23, 0x3e, 0x65, + 0x18, 0x89, 0xc0, 0x30, 0xd0, 0x2d, 0x28, 0x0c, 0xb5, 0x2e, 0x96, 0x95, 0x6e, 0xd7, 0xc0, 0x26, + 0x0b, 0xb6, 0x39, 0xae, 0x39, 0x4f, 0x7a, 0xea, 0xac, 0x43, 0xfc, 0x41, 0x00, 0x34, 0x19, 0x19, + 0xd1, 0x21, 0x2c, 0xb9, 0xc1, 0xd5, 0x8e, 0xd8, 0xcc, 0x7d, 0xae, 0x47, 0x47, 0x56, 0xdf, 0xa5, + 0xbb, 0x72, 0xee, 0x27, 0x93, 0x2c, 0x6c, 0xc5, 0x75, 0x55, 0x3a, 0x9d, 0x2f, 0x5d, 0x94, 0xf8, + 0x9c, 0x8b, 0x12, 0x93, 0x90, 0x23, 0xef, 0xf4, 0x04, 0x5d, 0x69, 0x62, 0xa2, 0x64, 0xa2, 0x43, + 0xb5, 0x3d, 0x21, 0xc6, 0xe7, 0x19, 0x35, 0x24, 0xe1, 0x45, 0x86, 0x24, 0xde, 0x83, 0xca, 0x87, + 0xce, 0xf7, 0xf9, 0x97, 0x02, 0xc3, 0x14, 0x26, 0x86, 0x79, 0x0e, 0x59, 0xe2, 0x7d, 0x69, 0xd0, + 0xf8, 0x07, 0xc8, 0x39, 0xab, 0xe7, 0xbc, 0x4e, 0x89, 0x5c, 0x76, 0x3e, 0x12, 0x57, 0x04, 0xdd, + 0x81, 0x25, 0x12, 0x37, 0xec, 0xc2, 0x25, 0x83, 0xcd, 0xe2, 0xd4, 0x1b, 0x96, 0x59, 0xc7, 0xae, + 0x8d, 0xf5, 0x90, 0x00, 0x5f, 0x61, 0x89, 0x00, 0xee, 0xfe, 0x25, 0x06, 0x40, 0xee, 0x59, 0x01, + 0xf4, 0x90, 0xed, 0x61, 0xd1, 0x97, 0x89, 0x88, 0xff, 0x1a, 0x87, 0xbc, 0xa7, 0x90, 0x82, 0xfe, + 0xd6, 0x97, 0x54, 0xad, 0x4f, 0x2b, 0xba, 0x78, 0x32, 0x2a, 0xdf, 0xc4, 0xe2, 0x8b, 0x4f, 0x2c, + 0xaa, 0x84, 0x65, 0xd7, 0x53, 0x93, 0x0b, 0xd7, 0x53, 0x5f, 0x07, 0x64, 0x69, 0x96, 0xd2, 0x27, + 0xc1, 0x5b, 0x1d, 0x9e, 0xc9, 0xec, 0xb4, 0xb3, 0x00, 0x52, 0xa1, 0x3d, 0xc7, 0xb4, 0xe3, 0x80, + 0xd0, 0xc5, 0x3e, 0x64, 0x1d, 0x30, 0x60, 0xf1, 0x47, 0x1f, 0x61, 0x75, 0xe3, 0x1a, 0x64, 0x07, + 0xd8, 0x52, 0x68, 0xd8, 0x63, 0xe0, 0x90, 0xd3, 0xbe, 0xf3, 0x1e, 0xe4, 0x3d, 0x2f, 0x61, 0x48, + 0x24, 0xdc, 0x6b, 0x7e, 0x54, 0x89, 0xd5, 0x32, 0x5f, 0x7f, 0xbb, 0x9e, 0xd8, 0xc3, 0x4f, 0xc9, + 0xa7, 0xa4, 0x66, 0xa3, 0xd5, 0x6c, 0x3c, 0xaa, 0x08, 0xb5, 0xfc, 0xd7, 0xdf, 0xae, 0x67, 0x24, + 0x4c, 0x6b, 0x0e, 0x77, 0x1e, 0x41, 0x39, 0xb0, 0x03, 0x7e, 0x9f, 0x8c, 0xa0, 0xb4, 0x7d, 0x74, + 0xb0, 0xbb, 0xd3, 0xa8, 0xb7, 0x9b, 0xf2, 0xf1, 0x7e, 0xbb, 0x59, 0x11, 0xd0, 0x15, 0x58, 0xde, + 0xdd, 0xf9, 0xa7, 0x56, 0x5b, 0x6e, 0xec, 0xee, 0x34, 0xf7, 0xda, 0x72, 0xbd, 0xdd, 0xae, 0x37, + 0x1e, 0x55, 0xe2, 0x77, 0x7f, 0x03, 0x50, 0xae, 0x6f, 0x35, 0x76, 0xc8, 0xdd, 0x5e, 0xed, 0x28, + 0xd4, 0xc3, 0x37, 0x20, 0x49, 0xa1, 0xd8, 0xa9, 0x6f, 0x62, 0x6b, 0xd3, 0x0b, 0x49, 0xe8, 0x01, + 0xa4, 0x28, 0x4a, 0x8b, 0xa6, 0x3f, 0x92, 0xad, 0xcd, 0xa8, 0x2c, 0x91, 0xc1, 0xd0, 0x73, 0x33, + 0xf5, 0xd5, 0x6c, 0x6d, 0x7a, 0xa1, 0x09, 0xed, 0x42, 0xc6, 0x06, 0xc0, 0x66, 0x3d, 0x65, 0xad, + 0xcd, 0xac, 0xfe, 0x90, 0xa9, 0x31, 0x20, 0x71, 0xfa, 0x83, 0xda, 0xda, 0x8c, 0x12, 0x14, 0x92, + 0x20, 0xe7, 0x62, 0xbf, 0xb3, 0xdf, 0xf6, 0xd6, 0xe6, 0x28, 0x89, 0xa1, 0xcf, 0xa0, 0xe8, 0x87, + 0xca, 0xe6, 0x7b, 0x76, 0x5b, 0x9b, 0xb3, 0x5c, 0x45, 0xf4, 0xfb, 0x71, 0xb3, 0xf9, 0x9e, 0xe1, + 0xd6, 0xe6, 0xac, 0x5e, 0xa1, 0xcf, 0x61, 0x69, 0x12, 0xd7, 0x9a, 0xff, 0x55, 0x6e, 0x6d, 0x81, + 0x7a, 0x16, 0x1a, 0x00, 0x0a, 0xc1, 0xc3, 0x16, 0x78, 0xa4, 0x5b, 0x5b, 0xa4, 0xbc, 0x85, 0xba, + 0x50, 0x0e, 0x62, 0x45, 0xf3, 0x3e, 0xda, 0xad, 0xcd, 0x5d, 0xea, 0x62, 0x5f, 0xf1, 0x83, 0x1a, + 0xf3, 0x3e, 0xe2, 0xad, 0xcd, 0x5d, 0xf9, 0x42, 0x47, 0x00, 0x9e, 0x8b, 0xf1, 0x1c, 0x8f, 0x7a, + 0x6b, 0xf3, 0xd4, 0xc0, 0x90, 0x0e, 0xcb, 0x61, 0x37, 0xe1, 0x45, 0xde, 0xf8, 0xd6, 0x16, 0x2a, + 0x8d, 0x11, 0x7b, 0xf6, 0xdf, 0x69, 0xe7, 0x7b, 0xf3, 0x5b, 0x9b, 0xb3, 0x46, 0xb6, 0xd5, 0xfc, + 0xee, 0xf9, 0xaa, 0xf0, 0xfd, 0xf3, 0x55, 0xe1, 0x87, 0xe7, 0xab, 0xc2, 0x37, 0x3f, 0xae, 0xc6, + 0xbe, 0xff, 0x71, 0x35, 0xf6, 0xeb, 0x1f, 0x57, 0x63, 0xff, 0xfc, 0xda, 0x99, 0x6a, 0xf5, 0x46, + 0x27, 0x1b, 0x1d, 0x6d, 0xb0, 0xe9, 0xfd, 0x33, 0x44, 0xd8, 0x7f, 0x3b, 0x4e, 0xd2, 0x34, 0xdc, + 0xdd, 0xfb, 0x73, 0x00, 0x00, 0x00, 0xff, 0xff, 0x25, 0x24, 0x2d, 0xe9, 0xfb, 0x31, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -5792,13 +5796,15 @@ func (m *RequestLoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error _ = i var l int _ = l - if m.Chunk != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Chunk)) + if len(m.ChunkId) > 0 { + i -= len(m.ChunkId) + copy(dAtA[i:], m.ChunkId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChunkId))) i-- - dAtA[i] = 0x18 + dAtA[i] = 0x1a } - if m.Format != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Format)) + if m.Version != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Version)) i-- dAtA[i] = 0x10 } @@ -5844,10 +5850,12 @@ func (m *RequestApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, erro i-- dAtA[i] = 0x12 } - if m.Index != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + if len(m.ChunkId) > 0 { + i -= len(m.ChunkId) + copy(dAtA[i:], m.ChunkId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChunkId))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } @@ -7141,6 +7149,15 @@ func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, err _ = i var l int _ = l + if len(m.NextChunks) > 0 { + for iNdEx := len(m.NextChunks) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NextChunks[iNdEx]) + copy(dAtA[i:], m.NextChunks[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.NextChunks[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } if len(m.RejectSenders) > 0 { for iNdEx := len(m.RejectSenders) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.RejectSenders[iNdEx]) @@ -7151,22 +7168,13 @@ func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, err } } if len(m.RefetchChunks) > 0 { - dAtA51 := make([]byte, len(m.RefetchChunks)*10) - var j50 int - for _, num := range m.RefetchChunks { - for num >= 1<<7 { - dAtA51[j50] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j50++ - } - dAtA51[j50] = uint8(num) - j50++ - } - i -= j50 - copy(dAtA[i:], dAtA51[:j50]) - i = encodeVarintTypes(dAtA, i, uint64(j50)) - i-- - dAtA[i] = 0x12 + for iNdEx := len(m.RefetchChunks) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RefetchChunks[iNdEx]) + copy(dAtA[i:], m.RefetchChunks[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.RefetchChunks[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } if m.Result != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Result)) @@ -8121,12 +8129,12 @@ func (m *Misbehavior) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x28 } - n63, err63 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err63 != nil { - return 0, err63 + n61, err61 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err61 != nil { + return 0, err61 } - i -= n63 - i = encodeVarintTypes(dAtA, i, uint64(n63)) + i -= n61 + i = encodeVarintTypes(dAtA, i, uint64(n61)) i-- dAtA[i] = 0x22 if m.Height != 0 { @@ -8186,13 +8194,8 @@ func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - if m.Chunks != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Chunks)) - i-- - dAtA[i] = 0x18 - } - if m.Format != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Format)) + if m.Version != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Version)) i-- dAtA[i] = 0x10 } @@ -8559,11 +8562,12 @@ func (m *RequestLoadSnapshotChunk) Size() (n int) { if m.Height != 0 { n += 1 + sovTypes(uint64(m.Height)) } - if m.Format != 0 { - n += 1 + sovTypes(uint64(m.Format)) + if m.Version != 0 { + n += 1 + sovTypes(uint64(m.Version)) } - if m.Chunk != 0 { - n += 1 + sovTypes(uint64(m.Chunk)) + l = len(m.ChunkId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) } return n } @@ -8574,8 +8578,9 @@ func (m *RequestApplySnapshotChunk) Size() (n int) { } var l int _ = l - if m.Index != 0 { - n += 1 + sovTypes(uint64(m.Index)) + l = len(m.ChunkId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) } l = len(m.Chunk) if l > 0 { @@ -9206,11 +9211,10 @@ func (m *ResponseApplySnapshotChunk) Size() (n int) { n += 1 + sovTypes(uint64(m.Result)) } if len(m.RefetchChunks) > 0 { - l = 0 - for _, e := range m.RefetchChunks { - l += sovTypes(uint64(e)) + for _, b := range m.RefetchChunks { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) } - n += 1 + sovTypes(uint64(l)) + l } if len(m.RejectSenders) > 0 { for _, s := range m.RejectSenders { @@ -9218,6 +9222,12 @@ func (m *ResponseApplySnapshotChunk) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } + if len(m.NextChunks) > 0 { + for _, b := range m.NextChunks { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } return n } @@ -9639,11 +9649,8 @@ func (m *Snapshot) Size() (n int) { if m.Height != 0 { n += 1 + sovTypes(uint64(m.Height)) } - if m.Format != 0 { - n += 1 + sovTypes(uint64(m.Format)) - } - if m.Chunks != 0 { - n += 1 + sovTypes(uint64(m.Chunks)) + if m.Version != 0 { + n += 1 + sovTypes(uint64(m.Version)) } l = len(m.Hash) if l > 0 { @@ -11258,9 +11265,9 @@ func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } - m.Format = 0 + m.Version = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11270,16 +11277,16 @@ func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Format |= uint32(b&0x7F) << shift + m.Version |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunkId", wireType) } - m.Chunk = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11289,11 +11296,26 @@ func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Chunk |= uint32(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChunkId = append(m.ChunkId[:0], dAtA[iNdEx:postIndex]...) + if m.ChunkId == nil { + m.ChunkId = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -11345,10 +11367,10 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunkId", wireType) } - m.Index = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11358,11 +11380,26 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Index |= uint32(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChunkId = append(m.ChunkId[:0], dAtA[iNdEx:postIndex]...) + if m.ChunkId == nil { + m.ChunkId = []byte{} + } + iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) @@ -14952,81 +14989,37 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { } } case 2: - if wireType == 0 { - var v uint32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RefetchChunks = append(m.RefetchChunks, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RefetchChunks", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.RefetchChunks) == 0 { - m.RefetchChunks = make([]uint32, 0, elementCount) - } - for iNdEx < postIndex { - var v uint32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RefetchChunks = append(m.RefetchChunks, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field RefetchChunks", wireType) + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.RefetchChunks = append(m.RefetchChunks, make([]byte, postIndex-iNdEx)) + copy(m.RefetchChunks[len(m.RefetchChunks)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RejectSenders", wireType) @@ -15059,6 +15052,38 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { } m.RejectSenders = append(m.RejectSenders, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextChunks", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextChunks = append(m.NextChunks, make([]byte, postIndex-iNdEx)) + copy(m.NextChunks[len(m.NextChunks)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -17913,28 +17938,9 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) - } - m.Format = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Format |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } - m.Chunks = 0 + m.Version = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -17944,7 +17950,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Chunks |= uint32(b&0x7F) << shift + m.Version |= uint32(b&0x7F) << shift if b < 0x80 { break } diff --git a/config/config.go b/config/config.go index 103f50bfcc..cc7265f2f9 100644 --- a/config/config.go +++ b/config/config.go @@ -911,7 +911,7 @@ type StateSyncConfig struct { ChunkRequestTimeout time.Duration `mapstructure:"chunk-request-timeout"` // The number of concurrent chunk and block fetchers to run (default: 4). - Fetchers int32 `mapstructure:"fetchers"` + Fetchers int `mapstructure:"fetchers"` } func (cfg *StateSyncConfig) TrustHashBytes() []byte { diff --git a/internal/rpc/core/status.go b/internal/rpc/core/status.go index 85a788094d..9086c71feb 100644 --- a/internal/rpc/core/status.go +++ b/internal/rpc/core/status.go @@ -93,7 +93,6 @@ func (env *Environment) Status(ctx context.Context) (*coretypes.ResultStatus, er result.SyncInfo.ChunkProcessAvgTime = env.StateSyncMetricer.ChunkProcessAvgTime() result.SyncInfo.SnapshotHeight = env.StateSyncMetricer.SnapshotHeight() result.SyncInfo.SnapshotChunksCount = env.StateSyncMetricer.SnapshotChunksCount() - result.SyncInfo.SnapshotChunksTotal = env.StateSyncMetricer.SnapshotChunksTotal() result.SyncInfo.BackFilledBlocks = env.StateSyncMetricer.BackFilledBlocks() result.SyncInfo.BackFillBlocksTotal = env.StateSyncMetricer.BackFillBlocksTotal() } diff --git a/internal/statesync/chunks.go b/internal/statesync/chunks.go index 0b6753118e..2a49bceb88 100644 --- a/internal/statesync/chunks.go +++ b/internal/statesync/chunks.go @@ -5,374 +5,395 @@ import ( "fmt" "os" "path/filepath" - "strconv" "time" sync "github.com/sasha-s/go-deadlock" + "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/types" ) // errDone is returned by chunkQueue.Next() when all chunks have been returned. -var errDone = errors.New("chunk queue has completed") +var ( + errDone = errors.New("chunk queue has completed") + errQueueEmpty = errors.New("requestQueue is empty") + errChunkNil = errors.New("cannot add nil chunk") + errNoChunkItem = errors.New("no chunk item found") + errNilSnapshot = errors.New("snapshot is nil") +) -// chunk contains data for a chunk. -type chunk struct { - Height uint64 - Format uint32 - Index uint32 - Chunk []byte - Sender types.NodeID -} +const ( + initStatus chunkStatus = iota + inProgressStatus + discardedStatus + receivedStatus + doneStatus +) -// chunkQueue manages chunks for a state sync process, ordering them if requested. It acts as an -// iterator over all chunks, but callers can request chunks to be retried, optionally after -// refetching. -type chunkQueue struct { - sync.Mutex - snapshot *snapshot // if this is nil, the queue has been closed - dir string // temp dir for on-disk chunk storage - chunkFiles map[uint32]string // path to temporary chunk file - chunkSenders map[uint32]types.NodeID // the peer who sent the given chunk - chunkAllocated map[uint32]bool // chunks that have been allocated via Allocate() - chunkReturned map[uint32]bool // chunks returned via Next() - waiters map[uint32][]chan<- uint32 // signals WaitFor() waiters about chunk arrival -} +// chunk contains data for a chunk. +type ( + chunk struct { + Height uint64 + Version uint32 + ID bytes.HexBytes + Chunk []byte + Sender types.NodeID + } + chunkStatus int + chunkItem struct { + chunkID bytes.HexBytes + file string // path to temporary chunk file + sender types.NodeID // the peer who sent the given chunk + waitChs []chan<- bytes.HexBytes // signals WaitFor() waiters about chunk arrival + status chunkStatus // status of the chunk + } + // chunkQueue manages chunks for a state sync process, ordering them if requested. It acts as an + // iterator over all chunks, but callers can request chunks to be retried, optionally after + // refetching. + chunkQueue struct { + mtx sync.Mutex + snapshot *snapshot // if this is nil, the queue has been closed + dir string // temp dir for on-disk chunk storage + items map[string]*chunkItem + requestQueue []bytes.HexBytes + applyCh chan bytes.HexBytes + // doneCount counts the number of chunks that have been processed to the done status + // if for some reason some chunks have been processed more than once, this number should take them into account + doneCount int + } +) -// newChunkQueue creates a new chunk queue for a snapshot, using a temp dir for storage. +// newChunkQueue creates a new chunk requestQueue for a snapshot, using a temp dir for storage. // Callers must call Close() when done. -func newChunkQueue(snapshot *snapshot, tempDir string) (*chunkQueue, error) { +func newChunkQueue(snapshot *snapshot, tempDir string, bufLen int) (*chunkQueue, error) { dir, err := os.MkdirTemp(tempDir, "tm-statesync") if err != nil { return nil, fmt.Errorf("unable to create temp dir for state sync chunks: %w", err) } - if snapshot.Chunks == 0 { + if snapshot.Hash.IsZero() { return nil, errors.New("snapshot has no chunks") } - return &chunkQueue{ - snapshot: snapshot, - dir: dir, - chunkFiles: make(map[uint32]string, snapshot.Chunks), - chunkSenders: make(map[uint32]types.NodeID, snapshot.Chunks), - chunkAllocated: make(map[uint32]bool, snapshot.Chunks), - chunkReturned: make(map[uint32]bool, snapshot.Chunks), - waiters: make(map[uint32][]chan<- uint32), + snapshot: snapshot, + dir: dir, + items: make(map[string]*chunkItem), + applyCh: make(chan bytes.HexBytes, bufLen), }, nil } -// Add adds a chunk to the queue. It ignores chunks that already exist, returning false. -func (q *chunkQueue) Add(chunk *chunk) (bool, error) { - if chunk == nil || chunk.Chunk == nil { - return false, errors.New("cannot add nil chunk") - } +// IsRequestQueueEmpty returns true if the request queue is empty +func (q *chunkQueue) IsRequestQueueEmpty() bool { + return q.RequestQueueLen() == 0 +} - q.Lock() - defer q.Unlock() +// RequestQueueLen returns the length of the request queue +func (q *chunkQueue) RequestQueueLen() int { + q.mtx.Lock() + defer q.mtx.Unlock() + return len(q.requestQueue) +} - if q.snapshot == nil { - return false, nil // queue is closed - } - if chunk.Height != q.snapshot.Height { - return false, fmt.Errorf( - "invalid chunk height %v, expected %v", - chunk.Height, - q.snapshot.Height, - ) - } - if chunk.Format != q.snapshot.Format { - return false, fmt.Errorf( - "invalid chunk format %v, expected %v", - chunk.Format, - q.snapshot.Format, - ) - } - if chunk.Index >= q.snapshot.Chunks { - return false, fmt.Errorf("received unexpected chunk %v", chunk.Index) - } - if q.chunkFiles[chunk.Index] != "" { - return false, nil +// Enqueue adds a chunk ID to the end of the requestQueue +func (q *chunkQueue) Enqueue(chunkIDs ...[]byte) { + q.mtx.Lock() + defer q.mtx.Unlock() + for _, chunkID := range chunkIDs { + q.enqueue(chunkID) } +} - path := filepath.Join(q.dir, strconv.FormatUint(uint64(chunk.Index), 10)) - err := os.WriteFile(path, chunk.Chunk, 0600) - if err != nil { - return false, fmt.Errorf("failed to save chunk %v to file %v: %w", chunk.Index, path, err) +func (q *chunkQueue) enqueue(chunkID bytes.HexBytes) { + q.requestQueue = append(q.requestQueue, chunkID) + _, ok := q.items[chunkID.String()] + if ok { + return } - - q.chunkFiles[chunk.Index] = path - q.chunkSenders[chunk.Index] = chunk.Sender - - // Signal any waiters that the chunk has arrived. - for _, waiter := range q.waiters[chunk.Index] { - waiter <- chunk.Index - close(waiter) + q.items[chunkID.String()] = &chunkItem{ + chunkID: chunkID, + status: initStatus, } +} - delete(q.waiters, chunk.Index) - - return true, nil +// Dequeue returns the next chunk ID in the requestQueue, or an error if the queue is empty +func (q *chunkQueue) Dequeue() (bytes.HexBytes, error) { + q.mtx.Lock() + defer q.mtx.Unlock() + return q.dequeue() } -// Allocate allocates a chunk to the caller, making it responsible for fetching it. Returns -// errDone once no chunks are left or the queue is closed. -func (q *chunkQueue) Allocate() (uint32, error) { - q.Lock() - defer q.Unlock() +func (q *chunkQueue) dequeue() (bytes.HexBytes, error) { + if len(q.requestQueue) == 0 { + return nil, errQueueEmpty + } + chunkID := q.requestQueue[0] + q.requestQueue = q.requestQueue[1:] + q.items[chunkID.String()].status = inProgressStatus + return chunkID, nil +} +// Add adds a chunk to the queue. It ignores chunks that already exist, returning false. +func (q *chunkQueue) Add(chunk *chunk) (bool, error) { + if chunk == nil || chunk.Chunk == nil { + return false, errChunkNil + } + q.mtx.Lock() + defer q.mtx.Unlock() if q.snapshot == nil { - return 0, errDone + return false, errNilSnapshot } - - if uint32(len(q.chunkAllocated)) >= q.snapshot.Chunks { - return 0, errDone + chunkIDKey := chunk.ID.String() + item, ok := q.items[chunkIDKey] + if !ok { + return false, fmt.Errorf("failed to add the chunk %x, it was never requested", chunk.ID) } - - for i := uint32(0); i < q.snapshot.Chunks; i++ { - if !q.chunkAllocated[i] { - q.chunkAllocated[i] = true - return i, nil - } + if item.status != inProgressStatus && item.status != discardedStatus { + return false, nil } - - return 0, errDone + err := q.validateChunk(chunk) + if err != nil { + return false, err + } + item.file = filepath.Join(q.dir, chunkIDKey) + err = item.write(chunk.Chunk) + if err != nil { + return false, err + } + item.sender = chunk.Sender + item.status = receivedStatus + q.applyCh <- chunk.ID + // Signal any waiters that the chunk has arrived. + item.closeWaitChs(true) + return true, nil } // Close closes the chunk queue, cleaning up all temporary files. func (q *chunkQueue) Close() error { - q.Lock() - defer q.Unlock() - + q.mtx.Lock() + defer q.mtx.Unlock() if q.snapshot == nil { return nil } - - for _, waiters := range q.waiters { - for _, waiter := range waiters { - close(waiter) - } - } - - q.waiters = nil q.snapshot = nil - + close(q.applyCh) + for len(q.applyCh) > 0 { + <-q.applyCh + } + for _, item := range q.items { + item.closeWaitChs(false) + } if err := os.RemoveAll(q.dir); err != nil { - return fmt.Errorf("failed to clean up state sync tempdir %v: %w", q.dir, err) + return fmt.Errorf("failed to clean up state sync tempdir %s: %w", q.dir, err) } - return nil } // Discard discards a chunk. It will be removed from the queue, available for allocation, and can // be added and returned via Next() again. If the chunk is not already in the queue this does // nothing, to avoid it being allocated to multiple fetchers. -func (q *chunkQueue) Discard(index uint32) error { - q.Lock() - defer q.Unlock() - return q.discard(index) +func (q *chunkQueue) Discard(chunkID bytes.HexBytes) error { + q.mtx.Lock() + defer q.mtx.Unlock() + return q.discard(chunkID) } // discard discards a chunk, scheduling it for refetching. The caller must hold the mutex lock. -func (q *chunkQueue) discard(index uint32) error { +func (q *chunkQueue) discard(chunkID bytes.HexBytes) error { if q.snapshot == nil { return nil } - - path := q.chunkFiles[index] - if path == "" { + chunkIDKey := chunkID.String() + item, ok := q.items[chunkIDKey] + if !ok { return nil } - - if err := os.Remove(path); err != nil { - return fmt.Errorf("failed to remove chunk %v: %w", index, err) - } - - delete(q.chunkFiles, index) - delete(q.chunkReturned, index) - delete(q.chunkAllocated, index) - - return nil + item.status = discardedStatus + return item.remove() } // DiscardSender discards all *unreturned* chunks from a given sender. If the caller wants to // discard already returned chunks, this can be done via Discard(). func (q *chunkQueue) DiscardSender(peerID types.NodeID) error { - q.Lock() - defer q.Unlock() - - for index, sender := range q.chunkSenders { - if sender == peerID && !q.chunkReturned[index] { - err := q.discard(index) + q.mtx.Lock() + defer q.mtx.Unlock() + for _, item := range q.items { + if item.sender == peerID && item.isDiscardable() { + err := q.discard(item.chunkID) if err != nil { return err } - - delete(q.chunkSenders, index) } } - return nil } // GetSender returns the sender of the chunk with the given index, or empty if // not found. -func (q *chunkQueue) GetSender(index uint32) types.NodeID { - q.Lock() - defer q.Unlock() - return q.chunkSenders[index] -} - -// Has checks whether a chunk exists in the queue. -func (q *chunkQueue) Has(index uint32) bool { - q.Lock() - defer q.Unlock() - return q.chunkFiles[index] != "" +func (q *chunkQueue) GetSender(chunkID bytes.HexBytes) types.NodeID { + q.mtx.Lock() + defer q.mtx.Unlock() + item, ok := q.items[chunkID.String()] + if ok { + return item.sender + } + return "" } // load loads a chunk from disk, or nil if the chunk is not in the queue. The caller must hold the // mutex lock. -func (q *chunkQueue) load(index uint32) (*chunk, error) { - path, ok := q.chunkFiles[index] +func (q *chunkQueue) load(chunkID bytes.HexBytes) (*chunk, error) { + chunkIDKey := chunkID.String() + item, ok := q.items[chunkIDKey] if !ok { + return nil, errNoChunkItem + } + if item.status != receivedStatus { return nil, nil } - - body, err := os.ReadFile(path) + data, err := item.loadData() if err != nil { - return nil, fmt.Errorf("failed to load chunk %v: %w", index, err) + return nil, err } - return &chunk{ - Height: q.snapshot.Height, - Format: q.snapshot.Format, - Index: index, - Chunk: body, - Sender: q.chunkSenders[index], + Height: q.snapshot.Height, + Version: q.snapshot.Version, + ID: chunkID, + Chunk: data, + Sender: item.sender, }, nil } // Next returns the next chunk from the queue, or errDone if all chunks have been returned. It // blocks until the chunk is available. Concurrent Next() calls may return the same chunk. func (q *chunkQueue) Next() (*chunk, error) { - q.Lock() - - var chunk *chunk - index, err := q.nextUp() - if err == nil { - chunk, err = q.load(index) - if err == nil { - q.chunkReturned[index] = true - } - } - - q.Unlock() - - if chunk != nil || err != nil { - return chunk, err - } - select { - case _, ok := <-q.WaitFor(index): + case chunkID, ok := <-q.applyCh: if !ok { return nil, errDone // queue closed } + q.mtx.Lock() + defer q.mtx.Unlock() + loadedChunk, err := q.load(chunkID) + if err != nil { + return nil, err + } + item, ok := q.items[chunkID.String()] + if !ok { + return nil, errNoChunkItem + } + item.status = doneStatus + q.doneCount++ + return loadedChunk, nil case <-time.After(chunkTimeout): return nil, errTimeout } - - q.Lock() - defer q.Unlock() - - chunk, err = q.load(index) - if err != nil { - return nil, err - } - - q.chunkReturned[index] = true - return chunk, nil } -// nextUp returns the next chunk to be returned, or errDone if all chunks have been returned. The -// caller must hold the mutex lock. -func (q *chunkQueue) nextUp() (uint32, error) { - if q.snapshot == nil { - return 0, errDone - } - - for i := uint32(0); i < q.snapshot.Chunks; i++ { - if !q.chunkReturned[i] { - return i, nil - } - } - - return 0, errDone +// Retry schedules a chunk to be retried, without refetching it. +func (q *chunkQueue) Retry(chunkID bytes.HexBytes) { + q.mtx.Lock() + defer q.mtx.Unlock() + q.retry(chunkID) } -// Retry schedules a chunk to be retried, without refetching it. -func (q *chunkQueue) Retry(index uint32) { - q.Lock() - defer q.Unlock() - delete(q.chunkReturned, index) +func (q *chunkQueue) retry(chunkID bytes.HexBytes) { + item, ok := q.items[chunkID.String()] + if !ok || (item.status != receivedStatus && item.status != doneStatus) { + return + } + q.requestQueue = append(q.requestQueue, chunkID) + q.items[chunkID.String()].status = initStatus } // RetryAll schedules all chunks to be retried, without refetching them. func (q *chunkQueue) RetryAll() { - q.Lock() - defer q.Unlock() - q.chunkReturned = make(map[uint32]bool) -} - -// Size returns the total number of chunks for the snapshot and queue, or 0 when closed. -func (q *chunkQueue) Size() uint32 { - q.Lock() - defer q.Unlock() - - if q.snapshot == nil { - return 0 + q.mtx.Lock() + defer q.mtx.Unlock() + q.requestQueue = make([]bytes.HexBytes, 0, len(q.items)) + for _, item := range q.items { + q.retry(item.chunkID) } - - return q.snapshot.Chunks } -// WaitFor returns a channel that receives a chunk index when it arrives in the queue, or -// immediately if it has already arrived. The channel is closed without a value if the queue is -// closed or if the chunk index is not valid. -func (q *chunkQueue) WaitFor(index uint32) <-chan uint32 { - q.Lock() - defer q.Unlock() +// WaitFor returns a channel that receives a chunk ID when it arrives in the queue, or +// immediately if it has already arrived. The channel is closed without a value if the queue is closed +func (q *chunkQueue) WaitFor(chunkID bytes.HexBytes) <-chan bytes.HexBytes { + q.mtx.Lock() + defer q.mtx.Unlock() + return q.waitFor(chunkID) +} - ch := make(chan uint32, 1) - switch { - case q.snapshot == nil: +func (q *chunkQueue) waitFor(chunkID bytes.HexBytes) <-chan bytes.HexBytes { + ch := make(chan bytes.HexBytes, 1) + if q.snapshot == nil { close(ch) - - case index >= q.snapshot.Chunks: + return ch + } + item, ok := q.items[chunkID.String()] + if !ok { + ch <- chunkID close(ch) + return ch + } + item.waitChs = append(item.waitChs, ch) + return ch +} - case q.chunkFiles[index] != "": - ch <- index - close(ch) +// DoneChunksCount returns the number of chunks that have been returned +func (q *chunkQueue) DoneChunksCount() int { + q.mtx.Lock() + defer q.mtx.Unlock() + return q.doneCount +} - default: - if q.waiters[index] == nil { - q.waiters[index] = make([]chan<- uint32, 0) - } +func (q *chunkQueue) validateChunk(chunk *chunk) error { + if chunk.Height != q.snapshot.Height { + return fmt.Errorf("invalid chunk height %v, expected %v", + chunk.Height, + q.snapshot.Height) + } + if chunk.Version != q.snapshot.Version { + return fmt.Errorf("invalid chunk version %v, expected %v", + chunk.Version, + q.snapshot.Version) + } + return nil +} - q.waiters[index] = append(q.waiters[index], ch) +func (c *chunkItem) remove() error { + if err := os.Remove(c.file); err != nil { + return fmt.Errorf("failed to remove chunk %s: %w", c.chunkID, err) } + c.file = "" + return nil +} - return ch +func (c *chunkItem) write(data []byte) error { + err := os.WriteFile(c.file, data, 0600) + if err != nil { + return fmt.Errorf("failed to save chunk %v to file %v: %w", c.chunkID, c.file, err) + } + return nil } -func (q *chunkQueue) numChunksReturned() int { - q.Lock() - defer q.Unlock() +func (c *chunkItem) loadData() ([]byte, error) { + body, err := os.ReadFile(c.file) + if err != nil { + return nil, fmt.Errorf("failed to load chunk %s: %w", c.chunkID, err) + } + return body, nil +} - cnt := 0 - for _, b := range q.chunkReturned { - if b { - cnt++ +func (c *chunkItem) closeWaitChs(send bool) { + for _, ch := range c.waitChs { + if send { + ch <- c.chunkID } + close(ch) } - return cnt + c.waitChs = nil +} + +// isDiscardable returns true if a status is suitable for transition to discarded, otherwise false +func (c *chunkItem) isDiscardable() bool { + return c.status == initStatus } diff --git a/internal/statesync/chunks_test.go b/internal/statesync/chunks_test.go index 85cc23a806..1ba6af6612 100644 --- a/internal/statesync/chunks_test.go +++ b/internal/statesync/chunks_test.go @@ -4,562 +4,363 @@ import ( "os" "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/internal/test/factory" + "github.com/tendermint/tendermint/libs/bytes" ) -func setupChunkQueue(t *testing.T) (*chunkQueue, func()) { - snapshot := &snapshot{ - Height: 3, - Format: 1, - Chunks: 5, - Hash: []byte{7}, - Metadata: nil, - } - queue, err := newChunkQueue(snapshot, t.TempDir()) - require.NoError(t, err) - teardown := func() { - err := queue.Close() - require.NoError(t, err) - } - return queue, teardown +type ChunkQueueTestSuite struct { + suite.Suite + + snapshot *snapshot + queue *chunkQueue + tempDir string + chunks []*chunk } -func TestNewChunkQueue_TempDir(t *testing.T) { - snapshot := &snapshot{ +func TestChunkQueue(t *testing.T) { + suite.Run(t, new(ChunkQueueTestSuite)) +} + +func (suite *ChunkQueueTestSuite) SetupSuite() { + suite.snapshot = &snapshot{ Height: 3, - Format: 1, - Chunks: 5, - Hash: []byte{7}, + Version: 1, + Hash: []byte{0}, Metadata: nil, } - dir := t.TempDir() - queue, err := newChunkQueue(snapshot, dir) - require.NoError(t, err) - - files, err := os.ReadDir(dir) - require.NoError(t, err) - assert.Len(t, files, 1) - - err = queue.Close() - require.NoError(t, err) - - files, err = os.ReadDir(dir) - require.NoError(t, err) - assert.Len(t, files, 0) + suite.chunks = []*chunk{ + { + Height: 3, + Version: 1, + ID: []byte{0}, + Chunk: []byte{3, 1, 0}, + Sender: "a", + }, + { + Height: 3, + Version: 1, + ID: []byte{1}, + Chunk: []byte{3, 1, 1}, + Sender: "b", + }, + { + Height: 3, + Version: 1, + ID: []byte{2}, + Chunk: []byte{3, 1, 2}, + Sender: "c", + }, + { + Height: 3, + Version: 1, + ID: []byte{3}, + Chunk: []byte{3, 1, 3}, + Sender: "d", + }, + } } -func TestChunkQueue(t *testing.T) { - queue, teardown := setupChunkQueue(t) - defer teardown() - - // Adding the first chunk should be fine - added, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}}) - require.NoError(t, err) - assert.True(t, added) - - // Adding the last chunk should also be fine - added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}}) - require.NoError(t, err) - assert.True(t, added) +func (suite *ChunkQueueTestSuite) SetupTest() { + var err error + suite.tempDir = suite.T().TempDir() + suite.queue, err = newChunkQueue(suite.snapshot, suite.tempDir, 100) + suite.Require().NoError(err) +} - // Adding the first or last chunks again should return false - added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}}) - require.NoError(t, err) - assert.False(t, added) +func (suite *ChunkQueueTestSuite) TearDownTest() { + err := suite.queue.Close() + suite.Require().NoError(err) +} - added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}}) - require.NoError(t, err) - assert.False(t, added) +func (suite *ChunkQueueTestSuite) TestTempDir() { + files, err := os.ReadDir(suite.tempDir) + suite.Require().NoError(err) + suite.Require().Len(files, 1) - // Adding the remaining chunks in reverse should be fine - added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}}) - require.NoError(t, err) - assert.True(t, added) + err = suite.queue.Close() + suite.Require().NoError(err) - added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}}) - require.NoError(t, err) - assert.True(t, added) + files, err = os.ReadDir(suite.tempDir) + suite.Require().NoError(err) + suite.Require().Len(files, 0) +} - added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}}) - require.NoError(t, err) - assert.True(t, added) +func (suite *ChunkQueueTestSuite) TestChunkQueue() { + suite.initChunks() + testCases := []struct { + chunk *chunk + want bool + }{ + {chunk: suite.chunks[0], want: true}, + {chunk: suite.chunks[2], want: true}, + {chunk: suite.chunks[0], want: false}, + {chunk: suite.chunks[2], want: false}, + {chunk: suite.chunks[1], want: true}, + } + require := suite.Require() + for _, tc := range testCases { + added, err := suite.queue.Add(tc.chunk) + require.NoError(err) + require.Equal(tc.want, added) + } // At this point, we should be able to retrieve them all via Next - for i := 0; i < 5; i++ { - c, err := queue.Next() - require.NoError(t, err) - assert.Equal( - t, - &chunk{Height: 3, Format: 1, Index: uint32(i), Chunk: []byte{3, 1, byte(i)}}, - c, - ) + for _, i := range []int{0, 2, 1} { + c, err := suite.queue.Next() + require.NoError(err) + require.Equal(suite.chunks[i], c) } - _, err = queue.Next() - require.Error(t, err) - assert.Equal(t, errDone, err) // It should still be possible to try to add chunks (which will be ignored) - added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}}) - require.NoError(t, err) - assert.False(t, added) + added, err := suite.queue.Add(suite.chunks[0]) + require.NoError(err) + require.False(added) - // After closing the queue it will also return false - err = queue.Close() - require.NoError(t, err) - added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}}) - require.NoError(t, err) - assert.False(t, added) + // After closing the requestQueue it will also return false + err = suite.queue.Close() + require.NoError(err) + added, err = suite.queue.Add(suite.chunks[0]) + require.Error(err, errNilSnapshot) + require.False(added) // Closing the queue again should also be fine - err = queue.Close() - require.NoError(t, err) + err = suite.queue.Close() + require.NoError(err) } -func TestChunkQueue_Add_ChunkErrors(t *testing.T) { - testcases := map[string]struct { +func (suite *ChunkQueueTestSuite) TestAddChunkErrors() { + testCases := map[string]struct { chunk *chunk }{ "nil chunk": {nil}, - "nil body": {&chunk{Height: 3, Format: 1, Index: 0, Chunk: nil}}, - "wrong height": {&chunk{Height: 9, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}}}, - "wrong format": {&chunk{Height: 3, Format: 9, Index: 0, Chunk: []byte{3, 1, 0}}}, - "invalid index": {&chunk{Height: 3, Format: 1, Index: 5, Chunk: []byte{3, 1, 0}}}, + "nil body": {&chunk{Height: 3, Version: 1, ID: []byte{1}, Chunk: nil}}, + "wrong height": {&chunk{Height: 9, Version: 1, ID: []byte{2}, Chunk: []byte{2}}}, + "wrong format": {&chunk{Height: 3, Version: 9, ID: []byte{3}, Chunk: []byte{3}}}, + "invalid index": {&chunk{Height: 3, Version: 1, ID: []byte{4}, Chunk: []byte{4}}}, } - for name, tc := range testcases { - tc := tc - t.Run(name, func(t *testing.T) { - queue, teardown := setupChunkQueue(t) - defer teardown() - _, err := queue.Add(tc.chunk) - require.Error(t, err) + for name, tc := range testCases { + suite.Run(name, func() { + _, err := suite.queue.Add(tc.chunk) + suite.Require().Error(err) }) } } -func TestChunkQueue_Allocate(t *testing.T) { - queue, teardown := setupChunkQueue(t) - defer teardown() - - for i := uint32(0); i < queue.Size(); i++ { - index, err := queue.Allocate() - require.NoError(t, err) - assert.EqualValues(t, i, index) - } - - _, err := queue.Allocate() - require.Error(t, err) - assert.Equal(t, errDone, err) - - for i := uint32(0); i < queue.Size(); i++ { - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) - require.NoError(t, err) - } - - // After all chunks have been allocated and retrieved, discarding a chunk will reallocate it. - err = queue.Discard(2) - require.NoError(t, err) - - index, err := queue.Allocate() - require.NoError(t, err) - assert.EqualValues(t, 2, index) - _, err = queue.Allocate() - require.Error(t, err) - assert.Equal(t, errDone, err) - - // Discarding a chunk the closing the queue will return errDone. - err = queue.Discard(2) - require.NoError(t, err) - err = queue.Close() - require.NoError(t, err) - _, err = queue.Allocate() - require.Error(t, err) - assert.Equal(t, errDone, err) -} - -func TestChunkQueue_Discard(t *testing.T) { - queue, teardown := setupChunkQueue(t) - defer teardown() - +func (suite *ChunkQueueTestSuite) TestDiscard() { + suite.initChunks() + require := suite.Require() // Add a few chunks to the queue and fetch a couple - _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{byte(0)}}) - require.NoError(t, err) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{byte(1)}}) - require.NoError(t, err) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{byte(2)}}) - require.NoError(t, err) - - c, err := queue.Next() - require.NoError(t, err) - assert.EqualValues(t, 0, c.Index) - c, err = queue.Next() - require.NoError(t, err) - assert.EqualValues(t, 1, c.Index) - - // Discarding the first chunk and re-adding it should cause it to be returned - // immediately by Next(), before procceeding with chunk 2 - err = queue.Discard(0) - require.NoError(t, err) - added, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{byte(0)}}) - require.NoError(t, err) - assert.True(t, added) - c, err = queue.Next() - require.NoError(t, err) - assert.EqualValues(t, 0, c.Index) - c, err = queue.Next() - require.NoError(t, err) - assert.EqualValues(t, 2, c.Index) - - // Discard then allocate, add and fetch all chunks - for i := uint32(0); i < queue.Size(); i++ { - err := queue.Discard(i) - require.NoError(t, err) + for _, c := range suite.chunks { + _, err := suite.queue.Add(c) + require.NoError(err) } - for i := uint32(0); i < queue.Size(); i++ { - _, err := queue.Allocate() - require.NoError(t, err) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) - require.NoError(t, err) - c, err = queue.Next() - require.NoError(t, err) - assert.EqualValues(t, i, c.Index) + for _, i := range []int{0, 1} { + c, err := suite.queue.Next() + require.NoError(err) + require.EqualValues(suite.chunks[i].ID, c.ID) } + // Discarding the first chunk and re-adding it should cause it to be returned + // immediately by Next(), before proceeding with chunk 2 + err := suite.queue.Discard(suite.chunks[0].ID) + require.NoError(err) + added, err := suite.queue.Add(suite.chunks[0]) + require.NoError(err) + require.True(added) + nextChunk, err := suite.queue.Next() + require.NoError(err) + require.EqualValues(suite.chunks[2].ID, nextChunk.ID) // Discarding a non-existent chunk does nothing. - err = queue.Discard(99) - require.NoError(t, err) + err = suite.queue.Discard(factory.RandomHash()) + require.NoError(err) // When discard a couple of chunks, we should be able to allocate, add, and fetch them again. - err = queue.Discard(3) - require.NoError(t, err) - err = queue.Discard(1) - require.NoError(t, err) - - index, err := queue.Allocate() - require.NoError(t, err) - assert.EqualValues(t, 1, index) - index, err = queue.Allocate() - require.NoError(t, err) - assert.EqualValues(t, 3, index) - - added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3}}) - require.NoError(t, err) - assert.True(t, added) - added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{1}}) - require.NoError(t, err) - assert.True(t, added) - - chunk, err := queue.Next() - require.NoError(t, err) - assert.EqualValues(t, 1, chunk.Index) - - chunk, err = queue.Next() - require.NoError(t, err) - assert.EqualValues(t, 3, chunk.Index) - - _, err = queue.Next() - require.Error(t, err) - assert.Equal(t, errDone, err) - - // After closing the queue, discarding does nothing - err = queue.Close() - require.NoError(t, err) - err = queue.Discard(2) - require.NoError(t, err) -} + for _, i := range []int{1, 2} { + err = suite.queue.Discard(suite.chunks[i].ID) + require.NoError(err) + } -func TestChunkQueue_DiscardSender(t *testing.T) { - queue, teardown := setupChunkQueue(t) - defer teardown() - - // Allocate and add all chunks to the queue - senders := []types.NodeID{types.NodeID("a"), types.NodeID("b"), types.NodeID("c")} - for i := uint32(0); i < queue.Size(); i++ { - _, err := queue.Allocate() - require.NoError(t, err) - _, err = queue.Add(&chunk{ - Height: 3, - Format: 1, - Index: i, - Chunk: []byte{byte(i)}, - Sender: senders[int(i)%len(senders)], - }) - require.NoError(t, err) + for _, i := range []int{2, 1} { + added, err = suite.queue.Add(suite.chunks[i]) + require.NoError(err) + require.True(added) } - // Fetch the first three chunks - for i := uint32(0); i < 3; i++ { - _, err := queue.Next() - require.NoError(t, err) + for _, i := range []int{3, 0, 2, 1} { + nextChunk, err = suite.queue.Next() + require.NoError(err) + require.EqualValues(suite.chunks[i].ID, nextChunk.ID) } + // After closing the requestQueue, discarding does nothing + err = suite.queue.Close() + require.NoError(err) + err = suite.queue.Discard(suite.chunks[2].ID) + require.NoError(err) +} + +func (suite *ChunkQueueTestSuite) TestDiscardSender() { + suite.initChunks() + suite.processChunks() + // Discarding an unknown sender should do nothing - err := queue.DiscardSender(types.NodeID("x")) - require.NoError(t, err) - _, err = queue.Allocate() - assert.Equal(t, errDone, err) + err := suite.queue.DiscardSender("unknown") + suite.Require().NoError(err) // Discarding sender b should discard chunk 4, but not chunk 1 which has already been // returned. - err = queue.DiscardSender(types.NodeID("b")) - require.NoError(t, err) - index, err := queue.Allocate() - require.NoError(t, err) - assert.EqualValues(t, 4, index) - _, err = queue.Allocate() - assert.Equal(t, errDone, err) + err = suite.queue.DiscardSender(suite.chunks[1].Sender) + suite.Require().NoError(err) + suite.Require().True(suite.queue.IsRequestQueueEmpty()) } -func TestChunkQueue_GetSender(t *testing.T) { - queue, teardown := setupChunkQueue(t) - defer teardown() +func (suite *ChunkQueueTestSuite) TestGetSender() { + suite.initChunks() + require := suite.Require() + _, err := suite.queue.Add(suite.chunks[0]) + require.NoError(err) + _, err = suite.queue.Add(suite.chunks[1]) + require.NoError(err) - peerAID := types.NodeID("aa") - peerBID := types.NodeID("bb") - - _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{1}, Sender: peerAID}) - require.NoError(t, err) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{2}, Sender: peerBID}) - require.NoError(t, err) - - assert.EqualValues(t, "aa", queue.GetSender(0)) - assert.EqualValues(t, "bb", queue.GetSender(1)) - assert.EqualValues(t, "", queue.GetSender(2)) + require.EqualValues(suite.chunks[0].Sender, suite.queue.GetSender(suite.chunks[0].ID)) + require.EqualValues(suite.chunks[1].Sender, suite.queue.GetSender(suite.chunks[1].ID)) + require.EqualValues("", suite.queue.GetSender(suite.chunks[2].ID)) // After the chunk has been processed, we should still know who the sender was - chunk, err := queue.Next() - require.NoError(t, err) - require.NotNil(t, chunk) - require.EqualValues(t, 0, chunk.Index) - assert.EqualValues(t, "aa", queue.GetSender(0)) + nextChunk, err := suite.queue.Next() + require.NoError(err) + require.NotNil(nextChunk) + require.EqualValues(suite.chunks[0].ID, nextChunk.ID) + require.EqualValues(suite.chunks[0].Sender, suite.queue.GetSender(suite.chunks[0].ID)) } -func TestChunkQueue_Next(t *testing.T) { - queue, teardown := setupChunkQueue(t) - defer teardown() - +func (suite *ChunkQueueTestSuite) TestNext() { + suite.initChunks() + require := suite.Require() // Next should block waiting for the next chunks, even when given out of order. - chNext := make(chan *chunk, 10) + chNext := make(chan *chunk) go func() { for { - c, err := queue.Next() + c, err := suite.queue.Next() if err == errDone { close(chNext) break } - require.NoError(t, err) + require.NoError(err) chNext <- c } }() - assert.Empty(t, chNext) - _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: types.NodeID("b")}) - require.NoError(t, err) + require.Empty(chNext) + _, err := suite.queue.Add(suite.chunks[1]) + require.NoError(err) select { case <-chNext: - assert.Fail(t, "channel should be empty") + suite.Fail("channel should be empty") default: } - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: types.NodeID("a")}) - require.NoError(t, err) - - assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: types.NodeID("a")}, - <-chNext) - assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: types.NodeID("b")}, - <-chNext) - - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: types.NodeID("e")}) - require.NoError(t, err) - select { - case <-chNext: - assert.Fail(t, "channel should be empty") - default: - } + _, err = suite.queue.Add(suite.chunks[0]) + require.NoError(err) + require.Equal(suite.chunks[1], <-chNext) + require.Equal(suite.chunks[0], <-chNext) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: types.NodeID("c")}) - require.NoError(t, err) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: types.NodeID("d")}) - require.NoError(t, err) - - assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: types.NodeID("c")}, - <-chNext) - assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: types.NodeID("d")}, - <-chNext) - assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: types.NodeID("e")}, - <-chNext) + err = suite.queue.Close() + require.NoError(err) _, ok := <-chNext - assert.False(t, ok, "channel should be closed") - - // Calling next on a finished queue should return done - _, err = queue.Next() - assert.Equal(t, errDone, err) + require.False(ok) } -func TestChunkQueue_Next_Closed(t *testing.T) { - queue, teardown := setupChunkQueue(t) - defer teardown() - +func (suite *ChunkQueueTestSuite) TestNextClosed() { + suite.initChunks() + require := suite.Require() // Calling Next on a closed queue should return done - _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}}) - require.NoError(t, err) - err = queue.Close() - require.NoError(t, err) - - _, err = queue.Next() - assert.Equal(t, errDone, err) -} - -func TestChunkQueue_Retry(t *testing.T) { - queue, teardown := setupChunkQueue(t) - defer teardown() - - allocateAddChunksToQueue(t, queue) + _, err := suite.queue.Add(suite.chunks[1]) + require.NoError(err) + err = suite.queue.Close() + require.NoError(err) - // Retrying a couple of chunks makes Next() return them, but they are not allocatable - queue.Retry(3) - queue.Retry(1) - - _, err := queue.Allocate() - assert.Equal(t, errDone, err) - - chunk, err := queue.Next() - require.NoError(t, err) - assert.EqualValues(t, 1, chunk.Index) - - chunk, err = queue.Next() - require.NoError(t, err) - assert.EqualValues(t, 3, chunk.Index) - - _, err = queue.Next() - assert.Equal(t, errDone, err) + _, err = suite.queue.Next() + require.Equal(errDone, err) } -func TestChunkQueue_RetryAll(t *testing.T) { - queue, teardown := setupChunkQueue(t) - defer teardown() - - allocateAddChunksToQueue(t, queue) +func (suite *ChunkQueueTestSuite) TestRetry() { + suite.initChunks() + suite.processChunks() + require := suite.Require() - _, err := queue.Next() - assert.Equal(t, errDone, err) - - queue.RetryAll() - - _, err = queue.Allocate() - assert.Equal(t, errDone, err) - - for i := uint32(0); i < queue.Size(); i++ { - chunk, err := queue.Next() - require.NoError(t, err) - assert.EqualValues(t, i, chunk.Index) + for i := range []int{2, 0} { + suite.queue.Retry(suite.chunks[i].ID) + chunkID, err := suite.queue.Dequeue() + require.NoError(err) + require.Equal(chunkID, suite.chunks[i].ID) } - - _, err = queue.Next() - assert.Equal(t, errDone, err) } -func TestChunkQueue_Size(t *testing.T) { - queue, teardown := setupChunkQueue(t) - defer teardown() - - assert.EqualValues(t, 5, queue.Size()) - - err := queue.Close() - require.NoError(t, err) - assert.EqualValues(t, 0, queue.Size()) +func (suite *ChunkQueueTestSuite) TestRetryAll() { + suite.initChunks() + suite.processChunks() + require := suite.Require() + require.True(suite.queue.IsRequestQueueEmpty()) + suite.queue.RetryAll() + require.Equal(len(suite.chunks), suite.queue.RequestQueueLen()) } -func TestChunkQueue_WaitFor(t *testing.T) { - queue, teardown := setupChunkQueue(t) - defer teardown() - - waitFor1 := queue.WaitFor(1) - waitFor4 := queue.WaitFor(4) - - // Adding 0 and 2 should not trigger waiters - _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}}) - require.NoError(t, err) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}}) - require.NoError(t, err) - select { - case <-waitFor1: - require.Fail(t, "WaitFor(1) should not trigger on 0 or 2") - case <-waitFor4: - require.Fail(t, "WaitFor(4) should not trigger on 0 or 2") - default: +func (suite *ChunkQueueTestSuite) TestWaitFor() { + suite.initChunks() + require := suite.Require() + waitForChs := make([]<-chan bytes.HexBytes, len(suite.chunks)) + for i, c := range suite.chunks { + waitForChs[i] = suite.queue.WaitFor(c.ID) } - // Adding 1 should trigger WaitFor(1), but not WaitFor(4). The channel should be closed. - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}}) - require.NoError(t, err) - assert.EqualValues(t, 1, <-waitFor1) - _, ok := <-waitFor1 - assert.False(t, ok) - select { - case <-waitFor4: - require.Fail(t, "WaitFor(4) should not trigger on 0 or 2") - default: + for _, ch := range waitForChs { + select { + case <-ch: + require.Fail("WaitFor should not trigger") + default: + } } + _, err := suite.queue.Add(suite.chunks[0]) + require.NoError(err) + require.EqualValues(suite.chunks[0].ID, <-waitForChs[0]) + _, ok := <-waitForChs[0] + require.False(ok) + // Fetch the first chunk. At this point, waiting for either 0 (retrieved from pool) or 1 // (queued in pool) should immediately return true. - c, err := queue.Next() - require.NoError(t, err) - assert.EqualValues(t, 0, c.Index) - - w := queue.WaitFor(0) - assert.EqualValues(t, 0, <-w) - _, ok = <-w - assert.False(t, ok) - - w = queue.WaitFor(1) - assert.EqualValues(t, 1, <-w) - _, ok = <-w - assert.False(t, ok) + c, err := suite.queue.Next() + require.NoError(err) + require.EqualValues(suite.chunks[0].ID, c.ID) // Close the queue. This should cause the waiter for 4 to close, and also cause any future // waiters to get closed channels. - err = queue.Close() - require.NoError(t, err) - _, ok = <-waitFor4 - assert.False(t, ok) - - w = queue.WaitFor(3) - _, ok = <-w - assert.False(t, ok) + err = suite.queue.Close() + require.NoError(err) + _, ok = <-waitForChs[2] + require.False(ok) } -func TestNumChunkReturned(t *testing.T) { - queue, teardown := setupChunkQueue(t) - defer teardown() - - assert.EqualValues(t, 5, queue.Size()) - - allocateAddChunksToQueue(t, queue) - assert.EqualValues(t, 5, queue.numChunksReturned()) - - err := queue.Close() - require.NoError(t, err) +func (suite *ChunkQueueTestSuite) initChunks() { + for _, c0 := range suite.chunks { + suite.queue.Enqueue(c0.ID) + c1, err := suite.queue.Dequeue() + suite.Require().NoError(err) + suite.Require().Equal(c0.ID, c1) + } } -// Allocate and add all chunks to the queue -func allocateAddChunksToQueue(t *testing.T, q *chunkQueue) { - t.Helper() - for i := uint32(0); i < q.Size(); i++ { - _, err := q.Allocate() - require.NoError(t, err) - _, err = q.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) - require.NoError(t, err) - _, err = q.Next() - require.NoError(t, err) +func (suite *ChunkQueueTestSuite) processChunks() { + for _, c := range suite.chunks { + added, err := suite.queue.Add(c) + suite.Require().NoError(err) + suite.Require().True(added) + c1, err := suite.queue.Next() + suite.Require().NoError(err) + suite.Require().Equal(c, c1) } } diff --git a/internal/statesync/metrics.gen.go b/internal/statesync/metrics.gen.go index b4d5caa12c..91c83fb657 100644 --- a/internal/statesync/metrics.gen.go +++ b/internal/statesync/metrics.gen.go @@ -38,12 +38,6 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "snapshot_chunk", Help: "The current number of chunks that have been processed.", }, labels).With(labelsAndValues...), - SnapshotChunkTotal: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "snapshot_chunk_total", - Help: "The total number of chunks in the current snapshot.", - }, labels).With(labelsAndValues...), BackFilledBlocks: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -65,7 +59,6 @@ func NopMetrics() *Metrics { ChunkProcessAvgTime: discard.NewGauge(), SnapshotHeight: discard.NewGauge(), SnapshotChunk: discard.NewCounter(), - SnapshotChunkTotal: discard.NewGauge(), BackFilledBlocks: discard.NewCounter(), BackFillBlocksTotal: discard.NewGauge(), } diff --git a/internal/statesync/metrics.go b/internal/statesync/metrics.go index a8a3af9152..b1aea963c3 100644 --- a/internal/statesync/metrics.go +++ b/internal/statesync/metrics.go @@ -21,8 +21,6 @@ type Metrics struct { SnapshotHeight metrics.Gauge // The current number of chunks that have been processed. SnapshotChunk metrics.Counter - // The total number of chunks in the current snapshot. - SnapshotChunkTotal metrics.Gauge // The current number of blocks that have been back-filled. BackFilledBlocks metrics.Counter // The total number of blocks that need to be back-filled. diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 0943586ec2..961a8f9591 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -3,6 +3,7 @@ package statesync import ( "bytes" "context" + "encoding/hex" "errors" "fmt" "runtime/debug" @@ -324,7 +325,7 @@ func (r *Reactor) OnStart(ctx context.Context) error { if r.needsStateSync { r.logger.Info("starting state sync") if _, err := r.Sync(ctx); err != nil { - r.logger.Error("state sync failed; shutting down this node", "err", err) + r.logger.Error("state sync failed; shutting down this node", "error", err) return err } } @@ -393,7 +394,7 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { } if err := r.Backfill(ctx, state); err != nil { - r.logger.Error("backfill failed. Proceeding optimistically...", "err", err) + r.logger.Error("backfill failed. Proceeding optimistically...", "error", err) } if r.eventBus != nil { @@ -496,8 +497,11 @@ func (r *Reactor) backfill( sleepTime time.Duration, lightBlockResponseTimeout time.Duration, ) error { - r.logger.Info("starting backfill process...", "startHeight", startHeight, - "stopHeight", stopHeight, "stopTime", stopTime, "trustedBlockID", trustedBlockID) + r.logger.Info("starting backfill process...", + "startHeight", startHeight, + "stopHeight", stopHeight, + "stopTime", stopTime, + "trustedBlockID", trustedBlockID) r.backfillBlockTotal = startHeight - stopHeight + 1 r.metrics.BackFillBlocksTotal.Set(float64(r.backfillBlockTotal)) @@ -517,7 +521,7 @@ func (r *Reactor) backfill( // time. Ideally we want the verification process to never have to be // waiting on blocks. If it takes 4s to retrieve a block and 1s to verify // it, then steady state involves four workers. - for i := 0; i < int(r.cfg.Fetchers); i++ { + for i := 0; i < r.cfg.Fetchers; i++ { go func() { for { select { @@ -558,7 +562,8 @@ func (r *Reactor) backfill( } else { // we don't punish the peer as it might just have not responded in time r.logger.Info("backfill: error with fetching light block", - "height", height, "err", err) + "height", height, + "error", err) } continue } @@ -568,7 +573,8 @@ func (r *Reactor) backfill( err = lb.ValidateBasic(chainID) if err != nil || lb.Height != height { r.logger.Info("backfill: fetched light block failed validate basic, removing peer...", - "err", err, "height", height) + "height", height, + "error", err) queue.retry(height) if serr := r.sendBlockError(ctx, p2p.PeerError{ NodeID: peer, @@ -675,7 +681,7 @@ func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope *p2p.Envel case *ssproto.SnapshotsRequest: snapshots, err := r.recentSnapshots(ctx, recentSnapshots) if err != nil { - logger.Error("failed to fetch snapshots", "err", err) + logger.Error("failed to fetch snapshots", "error", err) return nil } @@ -683,7 +689,7 @@ func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope *p2p.Envel logger.Info( "advertising snapshot", "height", snapshot.Height, - "format", snapshot.Format, + "version", snapshot.Version, "peer", envelope.From, ) @@ -691,8 +697,7 @@ func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope *p2p.Envel To: envelope.From, Message: &ssproto.SnapshotsResponse{ Height: snapshot.Height, - Format: snapshot.Format, - Chunks: snapshot.Chunks, + Version: snapshot.Version, Hash: snapshot.Hash, Metadata: snapshot.Metadata, }, @@ -708,11 +713,12 @@ func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope *p2p.Envel return nil } - logger.Info("received snapshot", "height", msg.Height, "format", msg.Format) + logger.Info("received snapshot", + "height", msg.Height, + "format", msg.Version) _, err := syncer.AddSnapshot(envelope.From, &snapshot{ Height: msg.Height, - Format: msg.Format, - Chunks: msg.Chunks, + Version: msg.Version, Hash: msg.Hash, Metadata: msg.Metadata, }) @@ -720,13 +726,15 @@ func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope *p2p.Envel logger.Error( "failed to add snapshot", "height", msg.Height, - "format", msg.Format, + "version", msg.Version, "channel", envelope.ChannelID, - "err", err, + "error", err, ) return nil } - logger.Info("added snapshot", "height", msg.Height, "format", msg.Format) + logger.Info("added snapshot", + "height", msg.Height, + "version", msg.Version) default: return fmt.Errorf("received unknown message: %T", msg) @@ -743,35 +751,35 @@ func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope case *ssproto.ChunkRequest: r.logger.Debug("received chunk request", "height", msg.Height, - "format", msg.Format, - "chunk", msg.Index, + "version", msg.Version, + "chunkID", hex.EncodeToString(msg.ChunkId), "peer", envelope.From) resp, err := r.conn.LoadSnapshotChunk(ctx, &abci.RequestLoadSnapshotChunk{ - Height: msg.Height, - Format: msg.Format, - Chunk: msg.Index, + Height: msg.Height, + Version: msg.Version, + ChunkId: msg.ChunkId, }) if err != nil { r.logger.Error("failed to load chunk", "height", msg.Height, - "format", msg.Format, - "chunk", msg.Index, - "err", err, - "peer", envelope.From) + "version", msg.Version, + "chunkID", hex.EncodeToString(msg.ChunkId), + "peer", envelope.From, + "error", err) return nil } r.logger.Debug("sending chunk", "height", msg.Height, - "format", msg.Format, - "chunk", msg.Index, + "version", msg.Version, + "chunkID", hex.EncodeToString(msg.ChunkId), "peer", envelope.From) if err := chunkCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &ssproto.ChunkResponse{ Height: msg.Height, - Format: msg.Format, - Index: msg.Index, + Version: msg.Version, + ChunkId: msg.ChunkId, Chunk: resp.Chunk, Missing: resp.Chunk == nil, }, @@ -788,23 +796,24 @@ func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope r.logger.Debug("received chunk; adding to sync", "height", msg.Height, - "format", msg.Format, - "chunk", msg.Index, + "version", msg.Version, + "chunkID", hex.EncodeToString(msg.ChunkId), + "chunkLen", len(msg.Chunk), "peer", envelope.From) _, err := syncer.AddChunk(&chunk{ - Height: msg.Height, - Format: msg.Format, - Index: msg.Index, - Chunk: msg.Chunk, - Sender: envelope.From, + Height: msg.Height, + Version: msg.Version, + ID: msg.ChunkId, + Chunk: msg.Chunk, + Sender: envelope.From, }) if err != nil { r.logger.Error("failed to add chunk", "height", msg.Height, - "format", msg.Format, - "chunk", msg.Index, - "err", err, - "peer", envelope.From) + "version", msg.Version, + "chunkID", hex.EncodeToString(msg.ChunkId), + "peer", envelope.From, + "error", err) return nil } @@ -821,7 +830,9 @@ func (r *Reactor) handleLightBlockMessage(ctx context.Context, envelope *p2p.Env r.logger.Info("received light block request", "height", msg.Height) lb, err := r.fetchLightBlock(msg.Height) if err != nil { - r.logger.Error("failed to retrieve light block", "err", err, "height", msg.Height) + r.logger.Error("failed to retrieve light block", + "height", msg.Height, + "error", err) return err } if lb == nil { @@ -838,7 +849,7 @@ func (r *Reactor) handleLightBlockMessage(ctx context.Context, envelope *p2p.Env lbproto, err := lb.ToProto() if err != nil { - r.logger.Error("marshaling light block to proto", "err", err) + r.logger.Error("marshaling light block to proto", "error", err) return nil } @@ -862,7 +873,9 @@ func (r *Reactor) handleLightBlockMessage(ctx context.Context, envelope *p2p.Env if errors.Is(err, context.Canceled) { return err } - r.logger.Error("error processing light block response", "err", err, "height", height) + r.logger.Error("error processing light block response", + "height", height, + "error", err) } default: @@ -878,7 +891,9 @@ func (r *Reactor) handleParamsMessage(ctx context.Context, envelope *p2p.Envelop r.logger.Debug("received consensus params request", "height", msg.Height) cp, err := r.stateStore.LoadConsensusParams(int64(msg.Height)) if err != nil { - r.logger.Error("failed to fetch requested consensus params", "err", err, "height", msg.Height) + r.logger.Error("failed to fetch requested consensus params", + "height", msg.Height, + "error", err) return nil } @@ -927,7 +942,7 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, cha err = fmt.Errorf("panic in processing message: %v", e) r.logger.Error( "recovering from processing message panic", - "err", err, + "error", err, "stack", string(debug.Stack()), ) } @@ -975,15 +990,15 @@ func (r *Reactor) processChannels(ctx context.Context, chanTable map[p2p.Channel "envelope_from", envelope.From, "envelope_ch", envelope.ChannelID, "num_chs", len(chanTable), - "err", err, + "error", err, ) return } r.logger.Error("failed to process message", - "err", err, "channel", ch.String(), "ch_id", envelope.ChannelID, - "envelope", envelope) + "envelope", envelope, + "error", err) if serr := ch.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, Err: err, @@ -1076,7 +1091,7 @@ func (r *Reactor) recentSnapshots(ctx context.Context, n uint32) ([]*snapshot, e switch { case a.Height > b.Height: return true - case a.Height == b.Height && a.Format > b.Format: + case a.Height == b.Height && a.Version > b.Version: return true default: return false @@ -1091,8 +1106,7 @@ func (r *Reactor) recentSnapshots(ctx context.Context, n uint32) ([]*snapshot, e snapshots = append(snapshots, &snapshot{ Height: s.Height, - Format: s.Format, - Chunks: s.Chunks, + Version: s.Version, Hash: s.Hash, Metadata: s.Metadata, }) @@ -1190,25 +1204,6 @@ func (r *Reactor) SnapshotHeight() int64 { } return 0 } -func (r *Reactor) SnapshotChunksCount() int64 { - r.mtx.RLock() - defer r.mtx.RUnlock() - - if r.syncer != nil && r.syncer.chunks != nil { - return int64(r.syncer.chunks.numChunksReturned()) - } - return 0 -} - -func (r *Reactor) SnapshotChunksTotal() int64 { - r.mtx.RLock() - defer r.mtx.RUnlock() - - if r.syncer != nil && r.syncer.processingSnapshot != nil { - return int64(r.syncer.processingSnapshot.Chunks) - } - return 0 -} func (r *Reactor) BackFilledBlocks() int64 { r.mtx.RLock() diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index 1bd5487d9d..dcee612c6b 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -91,7 +91,7 @@ func setup( t.Helper() if conn == nil { - conn = &clientmocks.Client{} + conn = clientmocks.NewClient(t) } rts := &reactorTestSuite{ @@ -146,7 +146,7 @@ func setup( rts.paramsPeerErrCh, ) - rts.stateStore = &smmocks.Store{} + rts.stateStore = smmocks.NewStore(t) rts.blockStore = store.NewBlockStore(dbm.NewMemDB()) cfg := config.DefaultStateSyncConfig() @@ -227,38 +227,51 @@ func TestReactor_Sync(t *testing.T) { rts := setup(ctx, t, nil, nil, 100) chain := buildLightBlockChain(ctx, t, 1, 10, time.Now(), rts.privVal) // app accepts any snapshot - rts.conn.On("OfferSnapshot", ctx, mock.IsType(&abci.RequestOfferSnapshot{})). + rts.conn. + On("OfferSnapshot", ctx, mock.IsType(&abci.RequestOfferSnapshot{})). Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, nil) - // app accepts every chunk - rts.conn.On("ApplySnapshotChunk", ctx, mock.IsType(&abci.RequestApplySnapshotChunk{})). - Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) + rts.conn. + On("ApplySnapshotChunk", ctx, mock.IsType(&abci.RequestApplySnapshotChunk{})). + Once(). + Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_COMPLETE_SNAPSHOT}, nil) // app query returns valid state app hash - rts.conn.On("Info", mock.Anything, &proxy.RequestInfo).Return(&abci.ResponseInfo{ - AppVersion: testAppVersion, - LastBlockHeight: snapshotHeight, - LastBlockAppHash: chain[snapshotHeight+1].AppHash, - }, nil) + rts.conn. + On("Info", mock.Anything, &proxy.RequestInfo). + Return(&abci.ResponseInfo{ + AppVersion: testAppVersion, + LastBlockHeight: snapshotHeight, + LastBlockAppHash: chain[snapshotHeight+1].AppHash, + }, nil) // store accepts state and validator sets - rts.stateStore.On("Bootstrap", mock.AnythingOfType("state.State")).Return(nil) - rts.stateStore.On("SaveValidatorSets", mock.AnythingOfType("int64"), mock.AnythingOfType("int64"), - mock.AnythingOfType("*types.ValidatorSet")).Return(nil) + rts.stateStore. + On("Bootstrap", mock.AnythingOfType("state.State")). + Return(nil) + rts.stateStore. + On("SaveValidatorSets", + mock.AnythingOfType("int64"), + mock.AnythingOfType("int64"), + mock.AnythingOfType("*types.ValidatorSet")). + Return(nil) closeCh := make(chan struct{}) defer close(closeCh) + + appHash := []byte{1, 2, 3} + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) go graduallyAddPeers(ctx, t, rts.peerUpdateCh, closeCh, 1*time.Second) go handleSnapshotRequests(ctx, t, rts.snapshotOutCh, rts.snapshotInCh, closeCh, []snapshot{ { - Height: uint64(snapshotHeight), - Format: 1, - Chunks: 1, + Height: uint64(snapshotHeight), + Version: 1, + Hash: appHash, }, }) - go handleChunkRequests(ctx, t, rts.chunkOutCh, rts.chunkInCh, closeCh, []byte("abc")) + go handleChunkRequests(ctx, t, rts.chunkOutCh, rts.chunkInCh, closeCh, appHash, []byte("abc")) go handleConsensusParamsRequest(ctx, t, rts.paramsOutCh, rts.paramsInCh, closeCh) @@ -293,30 +306,31 @@ func TestReactor_ChunkRequest_InvalidRequest(t *testing.T) { } func TestReactor_ChunkRequest(t *testing.T) { + chunkID := []byte{1, 2, 3, 4} testcases := map[string]struct { request *ssproto.ChunkRequest chunk []byte expectResponse *ssproto.ChunkResponse }{ "chunk is returned": { - &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, + &ssproto.ChunkRequest{Height: 1, Version: 1, ChunkId: chunkID}, []byte{1, 2, 3}, - &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: []byte{1, 2, 3}}, + &ssproto.ChunkResponse{Height: 1, Version: 1, ChunkId: chunkID, Chunk: []byte{1, 2, 3}}, }, "empty chunk is returned, as empty": { - &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, + &ssproto.ChunkRequest{Height: 1, Version: 1, ChunkId: chunkID}, []byte{}, - &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: []byte{}}, + &ssproto.ChunkResponse{Height: 1, Version: 1, ChunkId: chunkID, Chunk: []byte{}}, }, "nil (missing) chunk is returned as missing": { - &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, + &ssproto.ChunkRequest{Height: 1, Version: 1, ChunkId: chunkID}, nil, - &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Missing: true}, + &ssproto.ChunkResponse{Height: 1, Version: 1, ChunkId: chunkID, Missing: true}, }, "invalid request": { - &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, + &ssproto.ChunkRequest{Height: 1, Version: 1, ChunkId: chunkID}, nil, - &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Missing: true}, + &ssproto.ChunkResponse{Height: 1, Version: 1, ChunkId: chunkID, Missing: true}, }, } @@ -331,9 +345,9 @@ func TestReactor_ChunkRequest(t *testing.T) { // mock ABCI connection to return local snapshots conn := &clientmocks.Client{} conn.On("LoadSnapshotChunk", mock.Anything, &abci.RequestLoadSnapshotChunk{ - Height: tc.request.Height, - Format: tc.request.Format, - Chunk: tc.request.Index, + Height: tc.request.Height, + Version: tc.request.Version, + ChunkId: tc.request.ChunkId, }).Return(&abci.ResponseLoadSnapshotChunk{Chunk: tc.chunk}, nil) rts := setup(ctx, t, conn, nil, 2) @@ -380,30 +394,30 @@ func TestReactor_SnapshotsRequest(t *testing.T) { "no snapshots": {nil, []*ssproto.SnapshotsResponse{}}, ">10 unordered snapshots": { []*abci.Snapshot{ - {Height: 1, Format: 2, Chunks: 7, Hash: []byte{1, 2}, Metadata: []byte{1}}, - {Height: 2, Format: 2, Chunks: 7, Hash: []byte{2, 2}, Metadata: []byte{2}}, - {Height: 3, Format: 2, Chunks: 7, Hash: []byte{3, 2}, Metadata: []byte{3}}, - {Height: 1, Format: 1, Chunks: 7, Hash: []byte{1, 1}, Metadata: []byte{4}}, - {Height: 2, Format: 1, Chunks: 7, Hash: []byte{2, 1}, Metadata: []byte{5}}, - {Height: 3, Format: 1, Chunks: 7, Hash: []byte{3, 1}, Metadata: []byte{6}}, - {Height: 1, Format: 4, Chunks: 7, Hash: []byte{1, 4}, Metadata: []byte{7}}, - {Height: 2, Format: 4, Chunks: 7, Hash: []byte{2, 4}, Metadata: []byte{8}}, - {Height: 3, Format: 4, Chunks: 7, Hash: []byte{3, 4}, Metadata: []byte{9}}, - {Height: 1, Format: 3, Chunks: 7, Hash: []byte{1, 3}, Metadata: []byte{10}}, - {Height: 2, Format: 3, Chunks: 7, Hash: []byte{2, 3}, Metadata: []byte{11}}, - {Height: 3, Format: 3, Chunks: 7, Hash: []byte{3, 3}, Metadata: []byte{12}}, + {Height: 1, Version: 2, Hash: []byte{1, 2}, Metadata: []byte{1}}, + {Height: 2, Version: 2, Hash: []byte{2, 2}, Metadata: []byte{2}}, + {Height: 3, Version: 2, Hash: []byte{3, 2}, Metadata: []byte{3}}, + {Height: 1, Version: 1, Hash: []byte{1, 1}, Metadata: []byte{4}}, + {Height: 2, Version: 1, Hash: []byte{2, 1}, Metadata: []byte{5}}, + {Height: 3, Version: 1, Hash: []byte{3, 1}, Metadata: []byte{6}}, + {Height: 1, Version: 4, Hash: []byte{1, 4}, Metadata: []byte{7}}, + {Height: 2, Version: 4, Hash: []byte{2, 4}, Metadata: []byte{8}}, + {Height: 3, Version: 4, Hash: []byte{3, 4}, Metadata: []byte{9}}, + {Height: 1, Version: 3, Hash: []byte{1, 3}, Metadata: []byte{10}}, + {Height: 2, Version: 3, Hash: []byte{2, 3}, Metadata: []byte{11}}, + {Height: 3, Version: 3, Hash: []byte{3, 3}, Metadata: []byte{12}}, }, []*ssproto.SnapshotsResponse{ - {Height: 3, Format: 4, Chunks: 7, Hash: []byte{3, 4}, Metadata: []byte{9}}, - {Height: 3, Format: 3, Chunks: 7, Hash: []byte{3, 3}, Metadata: []byte{12}}, - {Height: 3, Format: 2, Chunks: 7, Hash: []byte{3, 2}, Metadata: []byte{3}}, - {Height: 3, Format: 1, Chunks: 7, Hash: []byte{3, 1}, Metadata: []byte{6}}, - {Height: 2, Format: 4, Chunks: 7, Hash: []byte{2, 4}, Metadata: []byte{8}}, - {Height: 2, Format: 3, Chunks: 7, Hash: []byte{2, 3}, Metadata: []byte{11}}, - {Height: 2, Format: 2, Chunks: 7, Hash: []byte{2, 2}, Metadata: []byte{2}}, - {Height: 2, Format: 1, Chunks: 7, Hash: []byte{2, 1}, Metadata: []byte{5}}, - {Height: 1, Format: 4, Chunks: 7, Hash: []byte{1, 4}, Metadata: []byte{7}}, - {Height: 1, Format: 3, Chunks: 7, Hash: []byte{1, 3}, Metadata: []byte{10}}, + {Height: 3, Version: 4, Hash: []byte{3, 4}, Metadata: []byte{9}}, + {Height: 3, Version: 3, Hash: []byte{3, 3}, Metadata: []byte{12}}, + {Height: 3, Version: 2, Hash: []byte{3, 2}, Metadata: []byte{3}}, + {Height: 3, Version: 1, Hash: []byte{3, 1}, Metadata: []byte{6}}, + {Height: 2, Version: 4, Hash: []byte{2, 4}, Metadata: []byte{8}}, + {Height: 2, Version: 3, Hash: []byte{2, 3}, Metadata: []byte{11}}, + {Height: 2, Version: 2, Hash: []byte{2, 2}, Metadata: []byte{2}}, + {Height: 2, Version: 1, Hash: []byte{2, 1}, Metadata: []byte{5}}, + {Height: 1, Version: 4, Hash: []byte{1, 4}, Metadata: []byte{7}}, + {Height: 1, Version: 3, Hash: []byte{1, 3}, Metadata: []byte{10}}, }, }, } @@ -633,7 +647,7 @@ func TestReactor_StateProviderP2P(t *testing.T) { require.Equal(t, commit.BlockID, state.LastBlockID) added, err := rts.reactor.getSyncer().AddSnapshot(peerA, &snapshot{ - Height: 1, Format: 2, Chunks: 7, Hash: []byte{1, 2}, Metadata: []byte{1}, + Height: 1, Version: 2, Hash: []byte{1, 2}, Metadata: []byte{1}, }) require.NoError(t, err) require.True(t, added) @@ -696,14 +710,19 @@ func TestReactor_Backfill(t *testing.T) { } trackingHeight := startHeight - rts.stateStore.On("SaveValidatorSets", mock.AnythingOfType("int64"), mock.AnythingOfType("int64"), - mock.AnythingOfType("*types.ValidatorSet")).Return(func(lh, uh int64, vals *types.ValidatorSet) error { - require.Equal(t, trackingHeight, lh) - require.Equal(t, lh, uh) - require.GreaterOrEqual(t, lh, stopHeight) - trackingHeight-- - return nil - }) + rts.stateStore. + On("SaveValidatorSets", + mock.AnythingOfType("int64"), + mock.AnythingOfType("int64"), + mock.AnythingOfType("*types.ValidatorSet")). + Maybe(). + Return(func(lh, uh int64, vals *types.ValidatorSet) error { + require.Equal(t, trackingHeight, lh) + require.Equal(t, lh, uh) + require.GreaterOrEqual(t, lh, stopHeight) + trackingHeight-- + return nil + }) chain := buildLightBlockChain(ctx, t, stopHeight-1, startHeight+1, stopTime, rts.privVal) @@ -948,8 +967,7 @@ func handleSnapshotRequests( ChannelID: SnapshotChannel, Message: &ssproto.SnapshotsResponse{ Height: snapshot.Height, - Format: snapshot.Format, - Chunks: snapshot.Chunks, + Version: snapshot.Version, Hash: snapshot.Hash, Metadata: snapshot.Metadata, }, @@ -965,6 +983,7 @@ func handleChunkRequests( receivingCh chan p2p.Envelope, sendingCh chan p2p.Envelope, closeCh chan struct{}, + chunkID []byte, chunk []byte, ) { t.Helper() @@ -981,9 +1000,9 @@ func handleChunkRequests( From: envelope.To, ChannelID: ChunkChannel, Message: &ssproto.ChunkResponse{ + ChunkId: chunkID, Height: msg.Height, - Format: msg.Format, - Index: msg.Index, + Version: msg.Version, Chunk: chunk, Missing: false, }, diff --git a/internal/statesync/snapshots.go b/internal/statesync/snapshots.go index 05f70ef57b..255b30fbe8 100644 --- a/internal/statesync/snapshots.go +++ b/internal/statesync/snapshots.go @@ -19,8 +19,7 @@ type snapshotKey [sha256.Size]byte // snapshot contains data about a snapshot. type snapshot struct { Height uint64 - Format uint32 - Chunks uint32 + Version uint32 Hash tmbytes.HexBytes Metadata []byte @@ -36,8 +35,7 @@ func (s *snapshot) Key() snapshotKey { bz := make([]byte, 0, (64+32+32)/8) bz = binary.LittleEndian.AppendUint64(bz, s.Height) - bz = binary.LittleEndian.AppendUint32(bz, s.Format) - bz = binary.LittleEndian.AppendUint32(bz, s.Chunks) + bz = binary.LittleEndian.AppendUint32(bz, s.Version) hasher.Write(bz) hasher.Write(s.Hash) hasher.Write(s.Metadata) @@ -53,9 +51,9 @@ type snapshotPool struct { snapshotPeers map[snapshotKey]map[types.NodeID]types.NodeID // indexes for fast searches - formatIndex map[uint32]map[snapshotKey]bool - heightIndex map[uint64]map[snapshotKey]bool - peerIndex map[types.NodeID]map[snapshotKey]bool + versionIndex map[uint32]map[snapshotKey]bool + heightIndex map[uint64]map[snapshotKey]bool + peerIndex map[types.NodeID]map[snapshotKey]bool // blacklists for rejected items formatBlacklist map[uint32]bool @@ -68,7 +66,7 @@ func newSnapshotPool() *snapshotPool { return &snapshotPool{ snapshots: make(map[snapshotKey]*snapshot), snapshotPeers: make(map[snapshotKey]map[types.NodeID]types.NodeID), - formatIndex: make(map[uint32]map[snapshotKey]bool), + versionIndex: make(map[uint32]map[snapshotKey]bool), heightIndex: make(map[uint64]map[snapshotKey]bool), peerIndex: make(map[types.NodeID]map[snapshotKey]bool), formatBlacklist: make(map[uint32]bool), @@ -88,7 +86,7 @@ func (p *snapshotPool) Add(peerID types.NodeID, snapshot *snapshot) (bool, error defer p.Unlock() switch { - case p.formatBlacklist[snapshot.Format]: + case p.formatBlacklist[snapshot.Version]: return false, nil case p.peerBlacklist[peerID]: return false, nil @@ -113,10 +111,10 @@ func (p *snapshotPool) Add(peerID types.NodeID, snapshot *snapshot) (bool, error } p.snapshots[key] = snapshot - if p.formatIndex[snapshot.Format] == nil { - p.formatIndex[snapshot.Format] = make(map[snapshotKey]bool) + if p.versionIndex[snapshot.Version] == nil { + p.versionIndex[snapshot.Version] = make(map[snapshotKey]bool) } - p.formatIndex[snapshot.Format][key] = true + p.versionIndex[snapshot.Version][key] = true if p.heightIndex[snapshot.Height] == nil { p.heightIndex[snapshot.Height] = make(map[snapshotKey]bool) @@ -216,9 +214,9 @@ func (p *snapshotPool) sorterFactory(candidates []*snapshot) func(int, int) bool return false case len(p.snapshotPeers[a.Key()]) > len(p.snapshotPeers[b.Key()]): return true - case a.Format > b.Format: + case a.Version > b.Version: return true - case a.Format < b.Format: + case a.Version < b.Version: return false default: return false @@ -236,13 +234,13 @@ func (p *snapshotPool) Reject(snapshot *snapshot) { p.removeSnapshot(key) } -// RejectFormat rejects a snapshot format. It will never be used again. -func (p *snapshotPool) RejectFormat(format uint32) { +// RejectVersion rejects a snapshot version. It will never be used again. +func (p *snapshotPool) RejectVersion(version uint32) { p.Lock() defer p.Unlock() - p.formatBlacklist[format] = true - for key := range p.formatIndex[format] { + p.formatBlacklist[version] = true + for key := range p.versionIndex[version] { p.removeSnapshot(key) } } @@ -287,7 +285,7 @@ func (p *snapshotPool) removeSnapshot(key snapshotKey) { } delete(p.snapshots, key) - delete(p.formatIndex[snapshot.Format], key) + delete(p.versionIndex[snapshot.Version], key) delete(p.heightIndex[snapshot.Height], key) for peerID := range p.snapshotPeers[key] { delete(p.peerIndex[peerID], key) diff --git a/internal/statesync/snapshots_test.go b/internal/statesync/snapshots_test.go index 08cb08269d..8e97f05c8f 100644 --- a/internal/statesync/snapshots_test.go +++ b/internal/statesync/snapshots_test.go @@ -12,19 +12,17 @@ func TestSnapshot_Key(t *testing.T) { testcases := map[string]struct { modify func(*snapshot) }{ - "new height": {func(s *snapshot) { s.Height = 9 }}, - "new format": {func(s *snapshot) { s.Format = 9 }}, - "new chunk count": {func(s *snapshot) { s.Chunks = 9 }}, - "new hash": {func(s *snapshot) { s.Hash = []byte{9} }}, - "no metadata": {func(s *snapshot) { s.Metadata = nil }}, + "new height": {func(s *snapshot) { s.Height = 9 }}, + "new format": {func(s *snapshot) { s.Version = 9 }}, + "new hash": {func(s *snapshot) { s.Hash = []byte{9} }}, + "no metadata": {func(s *snapshot) { s.Metadata = nil }}, } for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { s := snapshot{ Height: 3, - Format: 1, - Chunks: 7, + Version: 1, Hash: []byte{1, 2, 3}, Metadata: []byte{255}, } @@ -42,10 +40,9 @@ func TestSnapshotPool_Add(t *testing.T) { // Adding to the pool should work pool := newSnapshotPool() added, err := pool.Add(peerID, &snapshot{ - Height: 1, - Format: 1, - Chunks: 1, - Hash: []byte{1}, + Height: 1, + Version: 1, + Hash: []byte{1}, }) require.NoError(t, err) require.True(t, added) @@ -53,10 +50,9 @@ func TestSnapshotPool_Add(t *testing.T) { // Adding again from a different peer should return false otherNodeID := types.NodeID("bb") added, err = pool.Add(otherNodeID, &snapshot{ - Height: 1, - Format: 1, - Chunks: 1, - Hash: []byte{1}, + Height: 1, + Version: 1, + Hash: []byte{1}, }) require.NoError(t, err) require.False(t, added) @@ -68,7 +64,7 @@ func TestSnapshotPool_Add(t *testing.T) { func TestSnapshotPool_GetPeer(t *testing.T) { pool := newSnapshotPool() - s := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} + s := &snapshot{Height: 1, Version: 1, Hash: []byte{1}} peerAID := types.NodeID("aa") peerBID := types.NodeID("bb") @@ -79,7 +75,7 @@ func TestSnapshotPool_GetPeer(t *testing.T) { _, err = pool.Add(peerBID, s) require.NoError(t, err) - _, err = pool.Add(peerAID, &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{1}}) + _, err = pool.Add(peerAID, &snapshot{Height: 2, Version: 1, Hash: []byte{1}}) require.NoError(t, err) // GetPeer currently picks a random peer, so lets run it until we've seen both. @@ -96,14 +92,14 @@ func TestSnapshotPool_GetPeer(t *testing.T) { } // GetPeer should return empty for an unknown snapshot - peer := pool.GetPeer(&snapshot{Height: 9, Format: 9}) + peer := pool.GetPeer(&snapshot{Height: 9, Version: 9}) require.EqualValues(t, "", peer) } func TestSnapshotPool_GetPeers(t *testing.T) { pool := newSnapshotPool() - s := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} + s := &snapshot{Height: 1, Version: 1, Hash: []byte{1}} peerAID := types.NodeID("aa") peerBID := types.NodeID("bb") @@ -114,7 +110,7 @@ func TestSnapshotPool_GetPeers(t *testing.T) { _, err = pool.Add(peerBID, s) require.NoError(t, err) - _, err = pool.Add(peerAID, &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}}) + _, err = pool.Add(peerAID, &snapshot{Height: 2, Version: 1, Hash: []byte{2}}) require.NoError(t, err) peers := pool.GetPeers(s) @@ -133,11 +129,11 @@ func TestSnapshotPool_Ranked_Best(t *testing.T) { snapshot *snapshot peers []types.NodeID }{ - {&snapshot{Height: 2, Format: 2, Chunks: 4, Hash: []byte{1, 3}}, []types.NodeID{"AA", "BB", "CC", "DD"}}, - {&snapshot{Height: 1, Format: 1, Chunks: 4, Hash: []byte{1, 2}}, []types.NodeID{"AA", "BB", "CC", "DD"}}, - {&snapshot{Height: 2, Format: 2, Chunks: 5, Hash: []byte{1, 2}}, []types.NodeID{"AA", "BB", "CC"}}, - {&snapshot{Height: 2, Format: 1, Chunks: 3, Hash: []byte{1, 2}}, []types.NodeID{"AA", "BB", "CC"}}, - {&snapshot{Height: 1, Format: 2, Chunks: 5, Hash: []byte{1, 2}}, []types.NodeID{"AA", "BB", "CC"}}, + {&snapshot{Height: 2, Version: 2, Hash: []byte{1, 3}}, []types.NodeID{"AA", "BB", "CC", "DD"}}, + {&snapshot{Height: 1, Version: 1, Hash: []byte{1, 2}}, []types.NodeID{"AA", "BB", "CC", "DD"}}, + {&snapshot{Height: 2, Version: 2, Hash: []byte{1, 2}}, []types.NodeID{"AA", "BB", "CC"}}, + {&snapshot{Height: 2, Version: 1, Hash: []byte{1, 2}}, []types.NodeID{"AA", "BB", "CC"}}, + {&snapshot{Height: 1, Version: 2, Hash: []byte{1, 2}}, []types.NodeID{"AA", "BB", "CC"}}, } // Add snapshots in reverse order, to make sure the pool enforces some order. @@ -172,10 +168,10 @@ func TestSnapshotPool_Reject(t *testing.T) { peerID := types.NodeID("aa") snapshots := []*snapshot{ - {Height: 2, Format: 2, Chunks: 1, Hash: []byte{1, 2}}, - {Height: 2, Format: 1, Chunks: 1, Hash: []byte{1, 2}}, - {Height: 1, Format: 2, Chunks: 1, Hash: []byte{1, 2}}, - {Height: 1, Format: 1, Chunks: 1, Hash: []byte{1, 2}}, + {Height: 2, Version: 2, Hash: []byte{1, 2}}, + {Height: 2, Version: 1, Hash: []byte{1, 2}}, + {Height: 1, Version: 2, Hash: []byte{1, 2}}, + {Height: 1, Version: 1, Hash: []byte{1, 2}}, } for _, s := range snapshots { _, err := pool.Add(peerID, s) @@ -189,7 +185,7 @@ func TestSnapshotPool_Reject(t *testing.T) { require.NoError(t, err) require.False(t, added) - added, err = pool.Add(peerID, &snapshot{Height: 3, Format: 3, Chunks: 1, Hash: []byte{1}}) + added, err = pool.Add(peerID, &snapshot{Height: 3, Version: 3, Hash: []byte{1}}) require.NoError(t, err) require.True(t, added) } @@ -200,25 +196,25 @@ func TestSnapshotPool_RejectFormat(t *testing.T) { peerID := types.NodeID("aa") snapshots := []*snapshot{ - {Height: 2, Format: 2, Chunks: 1, Hash: []byte{1, 2}}, - {Height: 2, Format: 1, Chunks: 1, Hash: []byte{1, 2}}, - {Height: 1, Format: 2, Chunks: 1, Hash: []byte{1, 2}}, - {Height: 1, Format: 1, Chunks: 1, Hash: []byte{1, 2}}, + {Height: 2, Version: 2, Hash: []byte{1, 2}}, + {Height: 2, Version: 1, Hash: []byte{1, 2}}, + {Height: 1, Version: 2, Hash: []byte{1, 2}}, + {Height: 1, Version: 1, Hash: []byte{1, 2}}, } for _, s := range snapshots { _, err := pool.Add(peerID, s) require.NoError(t, err) } - pool.RejectFormat(1) + pool.RejectVersion(1) require.Equal(t, []*snapshot{snapshots[0], snapshots[2]}, pool.Ranked()) - added, err := pool.Add(peerID, &snapshot{Height: 3, Format: 1, Chunks: 1, Hash: []byte{1}}) + added, err := pool.Add(peerID, &snapshot{Height: 3, Version: 1, Hash: []byte{1}}) require.NoError(t, err) require.False(t, added) require.Equal(t, []*snapshot{snapshots[0], snapshots[2]}, pool.Ranked()) - added, err = pool.Add(peerID, &snapshot{Height: 3, Format: 3, Chunks: 1, Hash: []byte{1}}) + added, err = pool.Add(peerID, &snapshot{Height: 3, Version: 3, Hash: []byte{1}}) require.NoError(t, err) require.True(t, added) } @@ -229,9 +225,9 @@ func TestSnapshotPool_RejectPeer(t *testing.T) { peerAID := types.NodeID("aa") peerBID := types.NodeID("bb") - s1 := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} - s2 := &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}} - s3 := &snapshot{Height: 3, Format: 1, Chunks: 1, Hash: []byte{2}} + s1 := &snapshot{Height: 1, Version: 1, Hash: []byte{1}} + s2 := &snapshot{Height: 2, Version: 1, Hash: []byte{2}} + s3 := &snapshot{Height: 3, Version: 1, Hash: []byte{2}} _, err := pool.Add(peerAID, s1) require.NoError(t, err) @@ -269,8 +265,8 @@ func TestSnapshotPool_RemovePeer(t *testing.T) { peerAID := types.NodeID("aa") peerBID := types.NodeID("bb") - s1 := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} - s2 := &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}} + s1 := &snapshot{Height: 1, Version: 1, Hash: []byte{1}} + s2 := &snapshot{Height: 2, Version: 1, Hash: []byte{2}} _, err := pool.Add(peerAID, s1) require.NoError(t, err) diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index 03bedb75e4..9dcfb08fea 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -28,6 +28,8 @@ const ( // minimumDiscoveryTime is the lowest allowable time for a // SyncAny discovery time. minimumDiscoveryTime = 5 * time.Second + + dequeueChunkIDTimeoutDefault = 2 * time.Second ) var ( @@ -47,7 +49,8 @@ var ( // errTimeout is returned by Sync() when we've waited too long to receive a chunk. errTimeout = errors.New("timed out waiting for chunk") // errNoSnapshots is returned by SyncAny() if no snapshots are found and discovery is disabled. - errNoSnapshots = errors.New("no suitable snapshots found") + errNoSnapshots = errors.New("no suitable snapshots found") + errStatesyncNotInProgress = errors.New("no state sync in progress") ) // syncer runs a state sync against an ABCI app. Use either SyncAny() to automatically attempt to @@ -61,12 +64,14 @@ type syncer struct { snapshotCh p2p.Channel chunkCh p2p.Channel tempDir string - fetchers int32 + fetchers int retryTimeout time.Duration - mtx sync.RWMutex - chunks *chunkQueue - metrics *Metrics + dequeueChunkIDTimeout time.Duration + + mtx sync.RWMutex + chunkQueue *chunkQueue + metrics *Metrics avgChunkTime int64 lastSyncedSnapshotHeight int64 @@ -78,17 +83,26 @@ type syncer struct { func (s *syncer) AddChunk(chunk *chunk) (bool, error) { s.mtx.RLock() defer s.mtx.RUnlock() - if s.chunks == nil { - return false, errors.New("no state sync in progress") + if s.chunkQueue == nil { + return false, errStatesyncNotInProgress + } + keyVals := []any{ + "height", chunk.Height, + "version", chunk.Version, + "chunk", chunk.ID, } - added, err := s.chunks.Add(chunk) + added, err := s.chunkQueue.Add(chunk) if err != nil { + if errors.Is(err, errNilSnapshot) { + s.logger.Error("Can't add a chunk because of a snapshot is nil", keyVals...) + return false, nil + } return false, err } if added { - s.logger.Debug("Added chunk to queue", "height", chunk.Height, "format", chunk.Format, "chunk", chunk.Index) + s.logger.Debug("Added chunk to queue", keyVals...) } else { - s.logger.Debug("Ignoring duplicate chunk in queue", "height", chunk.Height, "format", chunk.Format, "chunk", chunk.Index) + s.logger.Debug("Ignoring duplicate chunk in requestQueue", keyVals...) } return added, nil } @@ -102,7 +116,9 @@ func (s *syncer) AddSnapshot(peerID types.NodeID, snapshot *snapshot) (bool, err } if added { s.metrics.TotalSnapshots.Add(1) - s.logger.Info("Discovered new snapshot", "height", snapshot.Height, "format", snapshot.Format, + s.logger.Info("Discovered new snapshot", + "height", snapshot.Height, + "format", snapshot.Version, "hash", snapshot.Hash.ShortString()) } return added, nil @@ -157,7 +173,7 @@ func (s *syncer) SyncAny( // the snapshot and chunk queue from the previous loop iteration. var ( snapshot *snapshot - chunks *chunkQueue + queue *chunkQueue err error iters int ) @@ -167,7 +183,7 @@ func (s *syncer) SyncAny( // If not nil, we're going to retry restoration of the same snapshot. if snapshot == nil { snapshot = s.snapshots.Best() - chunks = nil + queue = nil } if snapshot == nil { if discoveryTime == 0 { @@ -184,18 +200,18 @@ func (s *syncer) SyncAny( continue } } - if chunks == nil { - chunks, err = newChunkQueue(snapshot, s.tempDir) + if queue == nil { + queue, err = newChunkQueue(snapshot, s.tempDir, s.fetchers) if err != nil { return sm.State{}, nil, fmt.Errorf("failed to create chunk queue: %w", err) } - defer chunks.Close() // in case we forget to close it elsewhere + defer queue.Close() // in case we forget to close it elsewhere } + queue.Enqueue(snapshot.Hash) s.processingSnapshot = snapshot - s.metrics.SnapshotChunkTotal.Set(float64(snapshot.Chunks)) - newState, commit, err := s.Sync(ctx, snapshot, chunks) + newState, commit, err := s.Sync(ctx, snapshot, queue) switch { case err == nil: s.metrics.SnapshotHeight.Set(float64(snapshot.Height)) @@ -206,27 +222,35 @@ func (s *syncer) SyncAny( return sm.State{}, nil, err case errors.Is(err, errRetrySnapshot): - chunks.RetryAll() - s.logger.Info("Retrying snapshot", "height", snapshot.Height, "format", snapshot.Format, + queue.RetryAll() + s.logger.Info("Retrying snapshot", + "height", snapshot.Height, + "format", snapshot.Version, "hash", snapshot.Hash) continue case errors.Is(err, errTimeout): s.snapshots.Reject(snapshot) s.logger.Error("Timed out waiting for snapshot chunks, rejected snapshot", - "height", snapshot.Height, "format", snapshot.Format, "hash", snapshot.Hash) + "height", snapshot.Height, + "format", snapshot.Version, + "hash", snapshot.Hash) case errors.Is(err, errRejectSnapshot): s.snapshots.Reject(snapshot) - s.logger.Info("Snapshot rejected", "height", snapshot.Height, "format", snapshot.Format, + s.logger.Info("Snapshot rejected", + "height", snapshot.Height, + "format", snapshot.Version, "hash", snapshot.Hash) case errors.Is(err, errRejectFormat): - s.snapshots.RejectFormat(snapshot.Format) - s.logger.Info("Snapshot format rejected", "format", snapshot.Format) + s.snapshots.RejectVersion(snapshot.Version) + s.logger.Info("Snapshot format rejected", "format", snapshot.Version) case errors.Is(err, errRejectSender): - s.logger.Info("Snapshot senders rejected", "height", snapshot.Height, "format", snapshot.Format, + s.logger.Info("Snapshot senders rejected", + "height", snapshot.Height, + "format", snapshot.Version, "hash", snapshot.Hash) for _, peer := range s.snapshots.GetPeers(snapshot) { s.snapshots.RejectPeer(peer) @@ -238,29 +262,29 @@ func (s *syncer) SyncAny( } // Discard snapshot and chunks for next iteration - err = chunks.Close() + err = queue.Close() if err != nil { s.logger.Error("Failed to clean up chunk queue", "err", err) } snapshot = nil - chunks = nil + queue = nil s.processingSnapshot = nil } } // Sync executes a sync for a specific snapshot, returning the latest state and block commit which // the caller must use to bootstrap the node. -func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueue) (sm.State, *types.Commit, error) { +func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, queue *chunkQueue) (sm.State, *types.Commit, error) { s.mtx.Lock() - if s.chunks != nil { + if s.chunkQueue != nil { s.mtx.Unlock() return sm.State{}, nil, errors.New("a state sync is already in progress") } - s.chunks = chunks + s.chunkQueue = queue s.mtx.Unlock() defer func() { s.mtx.Lock() - s.chunks = nil + s.chunkQueue = nil s.mtx.Unlock() }() @@ -280,7 +304,8 @@ func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueu fmt.Errorf("failed to get app hash at height %d. No witnesses remaining", snapshot.Height) } s.logger.Info("failed to get and verify tendermint state. Dropping snapshot and trying again", - "err", err, "height", snapshot.Height) + "error", err, + "height", snapshot.Height) return sm.State{}, nil, errRejectSnapshot } snapshot.trustedAppHash = appHash @@ -288,6 +313,11 @@ func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueu // Offer snapshot to ABCI app. err = s.offerSnapshot(ctx, snapshot) if err != nil { + s.logger.Error("Snapshot wasn't accepted", + "height", snapshot.Height, + "format", snapshot.Version, + "hash", snapshot.Hash, + "error", err) return sm.State{}, nil, err } @@ -295,8 +325,11 @@ func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueu fetchCtx, cancel := context.WithCancel(ctx) defer cancel() fetchStartTime := time.Now() - for i := int32(0); i < s.fetchers; i++ { - go s.fetchChunks(fetchCtx, snapshot, chunks) + + // TODO: this approach of creating will be deprecated in favor of new design + // This epic https://dashpay.atlassian.net/browse/TD-161 contains all the tasks for refactoring + for i := 0; i < s.fetchers; i++ { + go s.fetchChunks(fetchCtx, snapshot, queue) } pctx, pcancel := context.WithTimeout(ctx, 1*time.Minute) @@ -333,7 +366,7 @@ func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueu } // Restore snapshot - err = s.applyChunks(ctx, chunks, fetchStartTime) + err = s.applyChunks(ctx, queue, fetchStartTime) if err != nil { return sm.State{}, nil, err } @@ -344,7 +377,9 @@ func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueu } // Done! 🎉 - s.logger.Info("Snapshot restored", "height", snapshot.Height, "format", snapshot.Format, + s.logger.Info("Snapshot restored", + "height", snapshot.Height, + "version", snapshot.Version, "hash", snapshot.Hash) return state, commit, nil @@ -353,13 +388,14 @@ func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueu // offerSnapshot offers a snapshot to the app. It returns various errors depending on the app's // response, or nil if the snapshot was accepted. func (s *syncer) offerSnapshot(ctx context.Context, snapshot *snapshot) error { - s.logger.Info("Offering snapshot to ABCI app", "height", snapshot.Height, - "format", snapshot.Format, "hash", snapshot.Hash) + s.logger.Info("Offering snapshot to ABCI app", + "height", snapshot.Height, + "version", snapshot.Version, + "hash", snapshot.Hash) resp, err := s.conn.OfferSnapshot(ctx, &abci.RequestOfferSnapshot{ Snapshot: &abci.Snapshot{ Height: snapshot.Height, - Format: snapshot.Format, - Chunks: snapshot.Chunks, + Version: snapshot.Version, Hash: snapshot.Hash, Metadata: snapshot.Metadata, }, @@ -370,8 +406,10 @@ func (s *syncer) offerSnapshot(ctx context.Context, snapshot *snapshot) error { } switch resp.Result { case abci.ResponseOfferSnapshot_ACCEPT: - s.logger.Info("Snapshot accepted, restoring", "height", snapshot.Height, - "format", snapshot.Format, "hash", snapshot.Hash) + s.logger.Info("Snapshot accepted, restoring", + "height", snapshot.Height, + "format", snapshot.Version, + "hash", snapshot.Hash) return nil case abci.ResponseOfferSnapshot_ABORT: return errAbort @@ -388,32 +426,33 @@ func (s *syncer) offerSnapshot(ctx context.Context, snapshot *snapshot) error { // applyChunks applies chunks to the app. It returns various errors depending on the app's // response, or nil once the snapshot is fully restored. -func (s *syncer) applyChunks(ctx context.Context, chunks *chunkQueue, start time.Time) error { +func (s *syncer) applyChunks(ctx context.Context, queue *chunkQueue, start time.Time) error { for { - chunk, err := chunks.Next() - if err == errDone { - return nil - } else if err != nil { + chunk, err := queue.Next() + if err != nil { return fmt.Errorf("failed to fetch chunk: %w", err) } resp, err := s.conn.ApplySnapshotChunk(ctx, &abci.RequestApplySnapshotChunk{ - Index: chunk.Index, - Chunk: chunk.Chunk, - Sender: string(chunk.Sender), + ChunkId: chunk.ID, + Chunk: chunk.Chunk, + Sender: string(chunk.Sender), }) if err != nil { - return fmt.Errorf("failed to apply chunk %v: %w", chunk.Index, err) + return fmt.Errorf("failed to apply chunkID %x: %w", chunk.ID, err) } - s.logger.Info("Applied snapshot chunk to ABCI app", "height", chunk.Height, - "format", chunk.Format, "chunk", chunk.Index, "total", chunks.Size()) + s.logger.Info("applied snapshot chunk to ABCI app", + "height", chunk.Height, + "version", chunk.Version, + "chunkID", chunk.ID.String()) // Discard and refetch any chunks as requested by the app - for _, index := range resp.RefetchChunks { - err := chunks.Discard(index) + for _, chunkID := range resp.RefetchChunks { + err := queue.Discard(chunkID) if err != nil { - return fmt.Errorf("failed to discard chunk %v: %w", index, err) + return fmt.Errorf("failed to discard chunkID %x: %w", chunkID, err) } + queue.Enqueue(chunkID) } // Reject any senders as requested by the app @@ -422,21 +461,27 @@ func (s *syncer) applyChunks(ctx context.Context, chunks *chunkQueue, start time peerID := types.NodeID(sender) s.snapshots.RejectPeer(peerID) - if err := chunks.DiscardSender(peerID); err != nil { + if err := queue.DiscardSender(peerID); err != nil { return fmt.Errorf("failed to reject sender: %w", err) } } } + s.logger.Debug("snapshot chunk applied", + "result", resp.Result.String(), + "chunkID", chunk.ID.String()) + switch resp.Result { case abci.ResponseApplySnapshotChunk_ACCEPT: - s.metrics.SnapshotChunk.Add(1) - s.avgChunkTime = time.Since(start).Nanoseconds() / int64(chunks.numChunksReturned()) - s.metrics.ChunkProcessAvgTime.Set(float64(s.avgChunkTime)) + queue.Enqueue(resp.NextChunks...) + s.acceptChunk(queue, start) + case abci.ResponseApplySnapshotChunk_COMPLETE_SNAPSHOT: + s.acceptChunk(queue, start) + return nil case abci.ResponseApplySnapshotChunk_ABORT: return errAbort case abci.ResponseApplySnapshotChunk_RETRY: - chunks.Retry(chunk.Index) + queue.Retry(chunk.ID) case abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT: return errRetrySnapshot case abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT: @@ -447,55 +492,50 @@ func (s *syncer) applyChunks(ctx context.Context, chunks *chunkQueue, start time } } -// fetchChunks requests chunks from peers, receiving allocations from the chunk queue. Chunks -// will be received from the reactor via syncer.AddChunks() to chunkQueue.Add(). -func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *chunkQueue) { - var ( - next = true - index uint32 - err error - ) +func (s *syncer) acceptChunk(queue *chunkQueue, start time.Time) { + s.metrics.SnapshotChunk.Add(1) + s.avgChunkTime = time.Since(start).Nanoseconds() / int64(queue.DoneChunksCount()) + s.metrics.ChunkProcessAvgTime.Set(float64(s.avgChunkTime)) +} +// fetchChunks requests chunks from peers, receiving allocations from the chunk queue. Chunks +// will be received from the reactor via syncer.AddChunks() to queue.Add(). +func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, queue *chunkQueue) { + ticker := time.NewTicker(s.retryTimeout) + defer ticker.Stop() + dequeueChunkIDTimeout := s.dequeueChunkIDTimeout + if dequeueChunkIDTimeout == 0 { + dequeueChunkIDTimeout = dequeueChunkIDTimeoutDefault + } for { - if next { - index, err = chunks.Allocate() - if errors.Is(err, errDone) { - // Keep checking until the context is canceled (restore is done), in case any - // chunks need to be refetched. - select { - case <-ctx.Done(): - return - case <-time.After(2 * time.Second): - continue - } - } - if err != nil { - s.logger.Error("Failed to allocate chunk from queue", "err", err) + if queue.IsRequestQueueEmpty() { + select { + case <-ctx.Done(): return + case <-time.After(dequeueChunkIDTimeout): + continue } } - s.logger.Info("Fetching snapshot chunk", "height", snapshot.Height, - "format", snapshot.Format, "chunk", index, "total", chunks.Size()) - - ticker := time.NewTicker(s.retryTimeout) - defer ticker.Stop() - - if err := s.requestChunk(ctx, snapshot, index); err != nil { + ID, err := queue.Dequeue() + if errors.Is(err, errQueueEmpty) { + continue + } + s.logger.Info("Fetching snapshot chunk", + "height", snapshot.Height, + "version", snapshot.Version, + "chunk", ID) + ticker.Reset(s.retryTimeout) + if err := s.requestChunk(ctx, snapshot, ID); err != nil { return } - select { - case <-chunks.WaitFor(index): - next = true - + case <-queue.WaitFor(ID): + // do nothing case <-ticker.C: - next = false - + s.chunkQueue.Enqueue(ID) case <-ctx.Done(): return } - - ticker.Stop() } } @@ -504,33 +544,32 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch // returns nil if there are no peers for the given snapshot or the // request is successfully made and an error if the request cannot be // completed -func (s *syncer) requestChunk(ctx context.Context, snapshot *snapshot, chunk uint32) error { +func (s *syncer) requestChunk(ctx context.Context, snapshot *snapshot, chunkID tmbytes.HexBytes) error { peer := s.snapshots.GetPeer(snapshot) if peer == "" { - s.logger.Error("No valid peers found for snapshot", "height", snapshot.Height, - "format", snapshot.Format, "hash", snapshot.Hash) + s.logger.Error("No valid peers found for snapshot", + "height", snapshot.Height, + "version", snapshot.Version, + "hash", snapshot.Hash) return nil } s.logger.Debug("Requesting snapshot chunk", "height", snapshot.Height, - "format", snapshot.Format, - "chunk", chunk, + "version", snapshot.Version, + "chunkID", chunkID.String(), "peer", peer) msg := p2p.Envelope{ To: peer, Message: &ssproto.ChunkRequest{ - Height: snapshot.Height, - Format: snapshot.Format, - Index: chunk, + Height: snapshot.Height, + Version: snapshot.Version, + ChunkId: chunkID, }, } - if err := s.chunkCh.Send(ctx, msg); err != nil { - return err - } - return nil + return s.chunkCh.Send(ctx, msg) } // verifyApp verifies the sync, checking the app hash, last block height and app version @@ -543,7 +582,7 @@ func (s *syncer) verifyApp(ctx context.Context, snapshot *snapshot, appVersion u // sanity check that the app version in the block matches the application's own record // of its version if resp.AppVersion != appVersion { - // An error here most likely means that the app hasn't inplemented state sync + // An error here most likely means that the app hasn't implemented state sync // or the Info call correctly return fmt.Errorf("app version mismatch. Expected: %d, got: %d", appVersion, resp.AppVersion) diff --git a/internal/statesync/syncer_test.go b/internal/statesync/syncer_test.go index 94ae2d74fb..d1b9172333 100644 --- a/internal/statesync/syncer_test.go +++ b/internal/statesync/syncer_test.go @@ -6,30 +6,72 @@ import ( "testing" "time" - sync "github.com/sasha-s/go-deadlock" - - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" clientmocks "github.com/tendermint/tendermint/abci/client/mocks" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/statesync/mocks" tmbytes "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/libs/log" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) -func TestSyncer_SyncAny(t *testing.T) { +type SyncerTestSuite struct { + suite.Suite + + ctx context.Context + conn *clientmocks.Client + stateProvider *mocks.StateProvider + syncer *syncer + logger log.Logger + snapshotChannel p2p.Channel + snapshotInCh chan p2p.Envelope + snapshotOutCh chan p2p.Envelope + chunkChannel p2p.Channel + chunkInCh chan p2p.Envelope + chunkOutCh chan p2p.Envelope +} + +func TestSyncerTestSuite(t *testing.T) { + suite.Run(t, new(SyncerTestSuite)) +} + +func (suite *SyncerTestSuite) SetupTest() { + suite.ctx = context.Background() + suite.stateProvider = mocks.NewStateProvider(suite.T()) + + suite.snapshotChannel, suite.snapshotInCh, suite.snapshotOutCh, _ = makeChannel(SnapshotChannel, "snapshot") + suite.chunkChannel, suite.chunkInCh, suite.chunkOutCh, _ = makeChannel(ChunkChannel, "chunk") + suite.conn = clientmocks.NewClient(suite.T()) + suite.logger = log.NewNopLogger() + suite.syncer = &syncer{ + logger: suite.logger, + stateProvider: suite.stateProvider, + conn: suite.conn, + snapshots: newSnapshotPool(), + tempDir: suite.T().TempDir(), + fetchers: 1, + snapshotCh: suite.snapshotChannel, + chunkCh: suite.chunkChannel, + retryTimeout: 100 * time.Millisecond, + dequeueChunkIDTimeout: 50 * time.Millisecond, + metrics: NopMetrics(), + } +} + +func (suite *SyncerTestSuite) TestSyncAny() { if testing.Short() { - t.Skip("skipping test in short mode") + suite.T().Skip("skipping test in short mode") } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(suite.ctx) defer cancel() state := sm.State{ @@ -60,300 +102,315 @@ func TestSyncer_SyncAny(t *testing.T) { } commit := &types.Commit{BlockID: types.BlockID{Hash: []byte("blockhash")}} + s := &snapshot{Height: 1, Version: 1, Hash: []byte{0}} chunks := []*chunk{ - {Height: 1, Format: 1, Index: 0, Chunk: []byte{1, 1, 0}}, - {Height: 1, Format: 1, Index: 1, Chunk: []byte{1, 1, 1}}, - {Height: 1, Format: 1, Index: 2, Chunk: []byte{1, 1, 2}}, + {Height: 1, Version: 1, ID: []byte{0}, Chunk: []byte{0}}, + {Height: 1, Version: 1, ID: []byte{1}, Chunk: []byte{1}}, + {Height: 1, Version: 1, ID: []byte{2}, Chunk: []byte{2}}, + {Height: 1, Version: 1, ID: []byte{3}, Chunk: []byte{3}}, } - s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, uint64(1)).Return(state.LastAppHash, nil) - stateProvider.On("AppHash", mock.Anything, uint64(2)).Return(tmbytes.HexBytes("app_hash_2"), nil) - stateProvider.On("Commit", mock.Anything, uint64(1)).Return(commit, nil) - stateProvider.On("State", mock.Anything, uint64(1)).Return(state, nil) - conn := &clientmocks.Client{} + suite.stateProvider. + On("AppHash", mock.Anything, uint64(1)). + Return(state.LastAppHash, nil) + suite.stateProvider. + On("AppHash", mock.Anything, uint64(2)). + Return(tmbytes.HexBytes("app_hash_2"), nil) + suite.stateProvider. + On("Commit", mock.Anything, uint64(1)). + Return(commit, nil) + suite.stateProvider. + On("State", mock.Anything, uint64(1)). + Return(state, nil) peerAID := types.NodeID("aa") peerBID := types.NodeID("bb") peerCID := types.NodeID("cc") - rts := setup(ctx, t, conn, stateProvider, 4) - - rts.reactor.syncer = rts.syncer // Adding a chunk should error when no sync is in progress - _, err := rts.syncer.AddChunk(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{1}}) - require.Error(t, err) + _, err := suite.syncer.AddChunk(&chunk{Height: 1, Version: 1, ID: []byte{0}, Chunk: []byte{1}}) + suite.Require().Error(err) // Adding a couple of peers should trigger snapshot discovery messages - err = rts.syncer.AddPeer(ctx, peerAID) - require.NoError(t, err) - e := <-rts.snapshotOutCh - require.Equal(t, &ssproto.SnapshotsRequest{}, e.Message) - require.Equal(t, peerAID, e.To) - - err = rts.syncer.AddPeer(ctx, peerBID) - require.NoError(t, err) - e = <-rts.snapshotOutCh - require.Equal(t, &ssproto.SnapshotsRequest{}, e.Message) - require.Equal(t, peerBID, e.To) + err = suite.syncer.AddPeer(ctx, peerAID) + suite.Require().NoError(err) + e := <-suite.snapshotOutCh + suite.Require().Equal(&ssproto.SnapshotsRequest{}, e.Message) + suite.Require().Equal(peerAID, e.To) + + err = suite.syncer.AddPeer(ctx, peerBID) + suite.Require().NoError(err) + e = <-suite.snapshotOutCh + suite.Require().Equal(&ssproto.SnapshotsRequest{}, e.Message) + suite.Require().Equal(peerBID, e.To) // Both peers report back with snapshots. One of them also returns a snapshot we don't want, in // format 2, which will be rejected by the ABCI application. - new, err := rts.syncer.AddSnapshot(peerAID, s) - require.NoError(t, err) - require.True(t, new) + added, err := suite.syncer.AddSnapshot(peerAID, s) + suite.Require().NoError(err) + suite.Require().True(added) - new, err = rts.syncer.AddSnapshot(peerBID, s) - require.NoError(t, err) - require.False(t, new) + added, err = suite.syncer.AddSnapshot(peerBID, s) + suite.Require().NoError(err) + suite.Require().False(added) - s2 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1}} - new, err = rts.syncer.AddSnapshot(peerBID, s2) - require.NoError(t, err) - require.True(t, new) + s2 := &snapshot{Height: 2, Version: 2, Hash: []byte{1}} + added, err = suite.syncer.AddSnapshot(peerBID, s2) + suite.Require().NoError(err) + suite.Require().True(added) - new, err = rts.syncer.AddSnapshot(peerCID, s2) - require.NoError(t, err) - require.False(t, new) + added, err = suite.syncer.AddSnapshot(peerCID, s2) + suite.Require().NoError(err) + suite.Require().False(added) // We start a sync, with peers sending back chunks when requested. We first reject the snapshot // with height 2 format 2, and accept the snapshot at height 1. - conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ - Snapshot: &abci.Snapshot{ - Height: 2, - Format: 2, - Chunks: 3, - Hash: []byte{1}, - }, - AppHash: []byte("app_hash_2"), - }).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) - conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ - Snapshot: &abci.Snapshot{ - Height: s.Height, - Format: s.Format, - Chunks: s.Chunks, - Hash: s.Hash, - Metadata: s.Metadata, - }, - AppHash: []byte("app_hash"), - }).Times(2).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, nil) - - chunkRequests := make(map[uint32]int) - chunkRequestsMtx := sync.Mutex{} - - chunkProcessDone := make(chan struct{}) + suite.conn. + On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + Snapshot: &abci.Snapshot{ + Height: 2, + Version: 2, + Hash: []byte{1}, + }, + AppHash: []byte("app_hash_2"), + }). + Once(). + Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) + suite.conn. + On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + Snapshot: &abci.Snapshot{ + Height: s.Height, + Version: s.Version, + Hash: s.Hash, + Metadata: s.Metadata, + }, + AppHash: []byte("app_hash"), + }). + Once(). + Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, nil) + chunkRequests := make([]int, len(chunks)) go func() { - defer close(chunkProcessDone) - var seen int - for { - if seen >= 4 { - return - } - + for i := 0; i < 5; i++ { select { case <-ctx.Done(): - t.Logf("sent %d chunks", seen) return - case e := <-rts.chunkOutCh: + case e := <-suite.chunkOutCh: msg, ok := e.Message.(*ssproto.ChunkRequest) - assert.True(t, ok) + suite.Require().True(ok) - assert.EqualValues(t, 1, msg.Height) - assert.EqualValues(t, 1, msg.Format) - assert.LessOrEqual(t, msg.Index, uint32(len(chunks))) + suite.Require().EqualValues(1, msg.Height) + suite.Require().EqualValues(1, msg.Version) - added, err := rts.syncer.AddChunk(chunks[msg.Index]) - assert.NoError(t, err) - assert.True(t, added) + added, err := suite.syncer.AddChunk(chunks[msg.ChunkId[0]]) + suite.Require().NoError(err) + suite.Require().True(added) - chunkRequestsMtx.Lock() - chunkRequests[msg.Index]++ - chunkRequestsMtx.Unlock() - seen++ - t.Logf("added chunk (%d of 4): %d", seen, msg.Index) + chunkRequests[msg.ChunkId[0]]++ + + suite.T().Logf("added chunkID %x", msg.ChunkId) } } }() - - // The first time we're applying chunk 2 we tell it to retry the snapshot and discard chunk 1, - // which should cause it to keep the existing chunk 0 and 2, and restart restoration from - // beginning. We also wait for a little while, to exercise the retry logic in fetchChunks(). - conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ - Index: 2, Chunk: []byte{1, 1, 2}, - }).Once().Run(func(args mock.Arguments) { time.Sleep(1 * time.Second) }).Return( - &abci.ResponseApplySnapshotChunk{ - Result: abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT, - RefetchChunks: []uint32{1}, + var reqs []*abci.RequestApplySnapshotChunk + for i := 0; i < len(chunks); i++ { + reqs = append(reqs, &abci.RequestApplySnapshotChunk{ + ChunkId: chunks[i].ID, + Chunk: chunks[i].Chunk, + }) + } + resps := []*abci.ResponseApplySnapshotChunk{ + { + Result: abci.ResponseApplySnapshotChunk_ACCEPT, + NextChunks: [][]byte{chunks[1].ID}, + }, + { + Result: abci.ResponseApplySnapshotChunk_ACCEPT, + NextChunks: [][]byte{chunks[2].ID}, + }, + { + Result: abci.ResponseApplySnapshotChunk_ACCEPT, + RefetchChunks: [][]byte{chunks[0].ID}, + }, + { + Result: abci.ResponseApplySnapshotChunk_ACCEPT, + NextChunks: [][]byte{chunks[3].ID}, + }, + {Result: abci.ResponseApplySnapshotChunk_COMPLETE_SNAPSHOT}, + } + applySnapshotChunks := []struct { + req *abci.RequestApplySnapshotChunk + resp *abci.ResponseApplySnapshotChunk + }{ + {req: reqs[0], resp: resps[0]}, + {req: reqs[1], resp: resps[1]}, + {req: reqs[2], resp: resps[2]}, + {req: reqs[0], resp: resps[3]}, + {req: reqs[3], resp: resps[4]}, + } + for _, asc := range applySnapshotChunks { + suite.conn. + On("ApplySnapshotChunk", mock.Anything, asc.req). + Once(). + Return(asc.resp, nil) + } + suite.conn. + On("Info", mock.Anything, &proxy.RequestInfo). + Once(). + Return(&abci.ResponseInfo{ + AppVersion: testAppVersion, + LastBlockHeight: 1, + LastBlockAppHash: []byte("app_hash"), }, nil) - conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ - Index: 0, Chunk: []byte{1, 1, 0}, - }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ - Index: 1, Chunk: []byte{1, 1, 1}, - }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ - Index: 2, Chunk: []byte{1, 1, 2}, - }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - conn.On("Info", mock.Anything, &proxy.RequestInfo).Return(&abci.ResponseInfo{ - AppVersion: testAppVersion, - LastBlockHeight: 1, - LastBlockAppHash: []byte("app_hash"), - }, nil) - - newState, lastCommit, err := rts.syncer.SyncAny(ctx, 0, func() error { return nil }) - require.NoError(t, err) - - <-chunkProcessDone - - chunkRequestsMtx.Lock() - require.Equal(t, map[uint32]int{0: 1, 1: 2, 2: 1}, chunkRequests) - chunkRequestsMtx.Unlock() - - expectState := state - require.Equal(t, expectState, newState) - require.Equal(t, commit, lastCommit) + newState, lastCommit, err := suite.syncer.SyncAny(ctx, 0, func() error { return nil }) + suite.Require().NoError(err) - require.Equal(t, len(chunks), int(rts.syncer.processingSnapshot.Chunks)) - require.Equal(t, expectState.LastBlockHeight, rts.syncer.lastSyncedSnapshotHeight) - require.True(t, rts.syncer.avgChunkTime > 0) + suite.Require().Equal([]int{0: 2, 1: 1, 2: 1, 3: 1}, chunkRequests) - require.Equal(t, int64(rts.syncer.processingSnapshot.Chunks), rts.reactor.SnapshotChunksTotal()) - require.Equal(t, rts.syncer.lastSyncedSnapshotHeight, rts.reactor.SnapshotHeight()) - require.Equal(t, time.Duration(rts.syncer.avgChunkTime), rts.reactor.ChunkProcessAvgTime()) - require.Equal(t, int64(len(rts.syncer.snapshots.snapshots)), rts.reactor.TotalSnapshots()) - require.Equal(t, int64(0), rts.reactor.SnapshotChunksCount()) + expectState := state + suite.Require().Equal(expectState, newState) + suite.Require().Equal(commit, lastCommit) - conn.AssertExpectations(t) + suite.Require().Equal(expectState.LastBlockHeight, suite.syncer.lastSyncedSnapshotHeight) + suite.Require().True(suite.syncer.avgChunkTime > 0) } -func TestSyncer_SyncAny_noSnapshots(t *testing.T) { - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, mock.Anything).Return(tmbytes.HexBytes("app_hash"), nil) - - ctx, cancel := context.WithCancel(context.Background()) +func (suite *SyncerTestSuite) TestSyncAnyNoSnapshots() { + ctx, cancel := context.WithCancel(suite.ctx) defer cancel() - rts := setup(ctx, t, nil, stateProvider, 2) - - _, _, err := rts.syncer.SyncAny(ctx, 0, func() error { return nil }) - require.Equal(t, errNoSnapshots, err) + _, _, err := suite.syncer.SyncAny(ctx, 0, func() error { return nil }) + suite.Require().Equal(errNoSnapshots, err) } -func TestSyncer_SyncAny_abort(t *testing.T) { - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, mock.Anything).Return(tmbytes.HexBytes("app_hash"), nil) - - ctx, cancel := context.WithCancel(context.Background()) +func (suite *SyncerTestSuite) TestSyncAnyAbort() { + ctx, cancel := context.WithCancel(suite.ctx) defer cancel() - rts := setup(ctx, t, nil, stateProvider, 2) + suite.stateProvider. + On("AppHash", mock.Anything, mock.Anything). + Maybe(). + Return(tmbytes.HexBytes("app_hash"), nil) - s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + s := &snapshot{Height: 1, Version: 1, Hash: []byte{1, 2, 3}} peerID := types.NodeID("aa") - _, err := rts.syncer.AddSnapshot(peerID, s) - require.NoError(t, err) + _, err := suite.syncer.AddSnapshot(peerID, s) + suite.Require().NoError(err) - rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ - Snapshot: toABCI(s), AppHash: []byte("app_hash"), - }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) + suite.conn. + On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + Snapshot: toABCI(s), AppHash: []byte("app_hash"), + }). + Once(). + Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) - _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) - require.Equal(t, errAbort, err) - rts.conn.AssertExpectations(t) + _, _, err = suite.syncer.SyncAny(ctx, 0, func() error { return nil }) + suite.Require().Equal(errAbort, err) } -func TestSyncer_SyncAny_reject(t *testing.T) { - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, mock.Anything).Return(tmbytes.HexBytes("app_hash"), nil) - - ctx, cancel := context.WithCancel(context.Background()) +func (suite *SyncerTestSuite) TestSyncAnyReject() { + ctx, cancel := context.WithCancel(suite.ctx) defer cancel() - rts := setup(ctx, t, nil, stateProvider, 2) + suite.stateProvider. + On("AppHash", mock.Anything, mock.Anything). + Maybe(). + Return(tmbytes.HexBytes("app_hash"), nil) // s22 is tried first, then s12, then s11, then errNoSnapshots - s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} - s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} - s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + s22 := &snapshot{Height: 2, Version: 2, Hash: []byte{1, 2, 3}} + s12 := &snapshot{Height: 1, Version: 2, Hash: []byte{1, 2, 3}} + s11 := &snapshot{Height: 1, Version: 1, Hash: []byte{1, 2, 3}} peerID := types.NodeID("aa") - _, err := rts.syncer.AddSnapshot(peerID, s22) - require.NoError(t, err) - - _, err = rts.syncer.AddSnapshot(peerID, s12) - require.NoError(t, err) - - _, err = rts.syncer.AddSnapshot(peerID, s11) - require.NoError(t, err) - - rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ - Snapshot: toABCI(s22), AppHash: []byte("app_hash"), - }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - - rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ - Snapshot: toABCI(s12), AppHash: []byte("app_hash"), - }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - - rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ - Snapshot: toABCI(s11), AppHash: []byte("app_hash"), - }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - - _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) - require.Equal(t, errNoSnapshots, err) - rts.conn.AssertExpectations(t) + _, err := suite.syncer.AddSnapshot(peerID, s22) + suite.Require().NoError(err) + + _, err = suite.syncer.AddSnapshot(peerID, s12) + suite.Require().NoError(err) + + _, err = suite.syncer.AddSnapshot(peerID, s11) + suite.Require().NoError(err) + + suite.conn. + On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + Snapshot: toABCI(s22), AppHash: []byte("app_hash"), + }). + Once(). + Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) + + suite.conn. + On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + Snapshot: toABCI(s12), AppHash: []byte("app_hash"), + }). + Once(). + Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) + + suite.conn. + On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + Snapshot: toABCI(s11), AppHash: []byte("app_hash"), + }). + Once(). + Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) + + _, _, err = suite.syncer.SyncAny(ctx, 0, func() error { return nil }) + suite.Require().Equal(errNoSnapshots, err) } -func TestSyncer_SyncAny_reject_format(t *testing.T) { - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, mock.Anything).Return(tmbytes.HexBytes("app_hash"), nil) - - ctx, cancel := context.WithCancel(context.Background()) +func (suite *SyncerTestSuite) TestSyncAnyRejectFormat() { + ctx, cancel := context.WithCancel(suite.ctx) defer cancel() - rts := setup(ctx, t, nil, stateProvider, 2) + suite.stateProvider. + On("AppHash", mock.Anything, mock.Anything). + Maybe(). + Return(tmbytes.HexBytes("app_hash"), nil) // s22 is tried first, which reject s22 and s12, then s11 will abort. - s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} - s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} - s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + s22 := &snapshot{Height: 2, Version: 2, Hash: []byte{1, 2, 3}} + s12 := &snapshot{Height: 1, Version: 2, Hash: []byte{1, 2, 3}} + s11 := &snapshot{Height: 1, Version: 1, Hash: []byte{1, 2, 3}} peerID := types.NodeID("aa") - _, err := rts.syncer.AddSnapshot(peerID, s22) - require.NoError(t, err) + _, err := suite.syncer.AddSnapshot(peerID, s22) + suite.Require().NoError(err) - _, err = rts.syncer.AddSnapshot(peerID, s12) - require.NoError(t, err) + _, err = suite.syncer.AddSnapshot(peerID, s12) + suite.Require().NoError(err) - _, err = rts.syncer.AddSnapshot(peerID, s11) - require.NoError(t, err) + _, err = suite.syncer.AddSnapshot(peerID, s11) + suite.Require().NoError(err) - rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ - Snapshot: toABCI(s22), AppHash: []byte("app_hash"), - }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) + suite.conn. + On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + Snapshot: toABCI(s22), AppHash: []byte("app_hash"), + }). + Once(). + Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) - rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ - Snapshot: toABCI(s11), AppHash: []byte("app_hash"), - }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) + suite.conn. + On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + Snapshot: toABCI(s11), AppHash: []byte("app_hash"), + }). + Once(). + Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) - _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) - require.Equal(t, errAbort, err) - rts.conn.AssertExpectations(t) + _, _, err = suite.syncer.SyncAny(ctx, 0, func() error { return nil }) + suite.Require().Equal(errAbort, err) } -func TestSyncer_SyncAny_reject_sender(t *testing.T) { - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, mock.Anything).Return(tmbytes.HexBytes("app_hash"), nil) - - ctx, cancel := context.WithCancel(context.Background()) +func (suite *SyncerTestSuite) TestSyncAnyRejectSender() { + ctx, cancel := context.WithCancel(suite.ctx) defer cancel() - rts := setup(ctx, t, nil, stateProvider, 2) + suite.stateProvider. + On("AppHash", mock.Anything, mock.Anything). + Maybe(). + Return(tmbytes.HexBytes("app_hash"), nil) peerAID := types.NodeID("aa") peerBID := types.NodeID("bb") @@ -362,70 +419,77 @@ func TestSyncer_SyncAny_reject_sender(t *testing.T) { // sbc will be offered first, which will be rejected with reject_sender, causing all snapshots // submitted by both b and c (i.e. sb, sc, sbc) to be rejected. Finally, sa will reject and // errNoSnapshots is returned. - sa := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} - sb := &snapshot{Height: 2, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} - sc := &snapshot{Height: 3, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} - sbc := &snapshot{Height: 4, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} - - _, err := rts.syncer.AddSnapshot(peerAID, sa) - require.NoError(t, err) - - _, err = rts.syncer.AddSnapshot(peerBID, sb) - require.NoError(t, err) - - _, err = rts.syncer.AddSnapshot(peerCID, sc) - require.NoError(t, err) - - _, err = rts.syncer.AddSnapshot(peerBID, sbc) - require.NoError(t, err) - - _, err = rts.syncer.AddSnapshot(peerCID, sbc) - require.NoError(t, err) - - rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ - Snapshot: toABCI(sbc), AppHash: []byte("app_hash"), - }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_SENDER}, nil) - - rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ - Snapshot: toABCI(sa), AppHash: []byte("app_hash"), - }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) + sa := &snapshot{Height: 1, Version: 1, Hash: []byte{1, 2, 3}} + sb := &snapshot{Height: 2, Version: 1, Hash: []byte{1, 2, 3}} + sc := &snapshot{Height: 3, Version: 1, Hash: []byte{1, 2, 3}} + sbc := &snapshot{Height: 4, Version: 1, Hash: []byte{1, 2, 3}} + + snapshots := []struct { + peerID types.NodeID + snapshot *snapshot + }{ + {peerAID, sa}, + {peerBID, sb}, + {peerCID, sc}, + {peerBID, sbc}, + {peerCID, sbc}, + } + for _, s := range snapshots { + _, err := suite.syncer.AddSnapshot(s.peerID, s.snapshot) + suite.Require().NoError(err) + } - _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) - require.Equal(t, errNoSnapshots, err) - rts.conn.AssertExpectations(t) + suite.conn. + On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + Snapshot: toABCI(sbc), AppHash: []byte("app_hash"), + }). + Once(). + Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_SENDER}, nil) + + suite.conn. + On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + Snapshot: toABCI(sa), AppHash: []byte("app_hash"), + }). + Once(). + Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) + + _, _, err := suite.syncer.SyncAny(ctx, 0, func() error { return nil }) + suite.Require().Equal(errNoSnapshots, err) } -func TestSyncer_SyncAny_abciError(t *testing.T) { - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, mock.Anything).Return(tmbytes.HexBytes("app_hash"), nil) - - ctx, cancel := context.WithCancel(context.Background()) +func (suite *SyncerTestSuite) TestSyncAnyAbciError() { + ctx, cancel := context.WithCancel(suite.ctx) defer cancel() - rts := setup(ctx, t, nil, stateProvider, 2) + suite.stateProvider. + On("AppHash", mock.Anything, mock.Anything). + Maybe(). + Return(tmbytes.HexBytes("app_hash"), nil) errBoom := errors.New("boom") - s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + s := &snapshot{Height: 1, Version: 1, Hash: []byte{1, 2, 3}} peerID := types.NodeID("aa") - _, err := rts.syncer.AddSnapshot(peerID, s) - require.NoError(t, err) + _, err := suite.syncer.AddSnapshot(peerID, s) + suite.Require().NoError(err) - rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ - Snapshot: toABCI(s), AppHash: []byte("app_hash"), - }).Once().Return(nil, errBoom) + suite.conn. + On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + Snapshot: toABCI(s), AppHash: []byte("app_hash"), + }). + Once(). + Return(nil, errBoom) - _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) - require.True(t, errors.Is(err, errBoom)) - rts.conn.AssertExpectations(t) + _, _, err = suite.syncer.SyncAny(ctx, 0, func() error { return nil }) + suite.Require().True(errors.Is(err, errBoom)) } -func TestSyncer_offerSnapshot(t *testing.T) { +func (suite *SyncerTestSuite) TestOfferSnapshot() { unknownErr := errors.New("unknown error") boom := errors.New("boom") - testcases := map[string]struct { + testCases := map[string]struct { result abci.ResponseOfferSnapshot_Result err error expectErr error @@ -440,300 +504,373 @@ func TestSyncer_offerSnapshot(t *testing.T) { "unknown non-zero": {9, nil, unknownErr}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - for name, tc := range testcases { - tc := tc - t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithCancel(ctx) + for name, tc := range testCases { + suite.Run(name, func() { + suite.SetupTest() // reset + ctx, cancel := context.WithCancel(suite.ctx) defer cancel() - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, mock.Anything).Return(tmbytes.HexBytes("app_hash"), nil) + s := &snapshot{Height: 1, Version: 1, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")} + suite.conn. + On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ + Snapshot: toABCI(s), + AppHash: []byte("app_hash"), + }). + Return(&abci.ResponseOfferSnapshot{Result: tc.result}, tc.err) - rts := setup(ctx, t, nil, stateProvider, 2) - - s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")} - rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ - Snapshot: toABCI(s), - AppHash: []byte("app_hash"), - }).Return(&abci.ResponseOfferSnapshot{Result: tc.result}, tc.err) - - err := rts.syncer.offerSnapshot(ctx, s) + err := suite.syncer.offerSnapshot(ctx, s) if tc.expectErr == unknownErr { - require.Error(t, err) + suite.Require().Error(err) } else { unwrapped := errors.Unwrap(err) if unwrapped != nil { err = unwrapped } - require.Equal(t, tc.expectErr, err) + suite.Require().Equal(tc.expectErr, err) } }) } } -func TestSyncer_applyChunks_Results(t *testing.T) { +func (suite *SyncerTestSuite) TestApplyChunksResults() { unknownErr := errors.New("unknown error") boom := errors.New("boom") - testcases := map[string]struct { - result abci.ResponseApplySnapshotChunk_Result + suite.stateProvider. + On("AppHash", mock.Anything, mock.Anything). + Maybe(). + Return(tmbytes.HexBytes("app_hash"), nil) + + testCases := map[string]struct { + resps []*abci.ResponseApplySnapshotChunk err error expectErr error }{ - "accept": {abci.ResponseApplySnapshotChunk_ACCEPT, nil, nil}, - "abort": {abci.ResponseApplySnapshotChunk_ABORT, nil, errAbort}, - "retry": {abci.ResponseApplySnapshotChunk_RETRY, nil, nil}, - "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT, nil, errRetrySnapshot}, - "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT, nil, errRejectSnapshot}, - "unknown": {abci.ResponseApplySnapshotChunk_UNKNOWN, nil, unknownErr}, - "error": {0, boom, boom}, - "unknown non-zero": {9, nil, unknownErr}, + "accept": { + resps: []*abci.ResponseApplySnapshotChunk{ + {Result: abci.ResponseApplySnapshotChunk_ACCEPT, NextChunks: [][]byte{{1}}}, + {Result: abci.ResponseApplySnapshotChunk_COMPLETE_SNAPSHOT}, + }, + }, + "abort": { + resps: []*abci.ResponseApplySnapshotChunk{ + {Result: abci.ResponseApplySnapshotChunk_ABORT}, + }, + err: errAbort, + expectErr: errAbort, + }, + "retry": { + resps: []*abci.ResponseApplySnapshotChunk{ + {Result: abci.ResponseApplySnapshotChunk_RETRY}, + {Result: abci.ResponseApplySnapshotChunk_COMPLETE_SNAPSHOT}, + }, + }, + "retry_snapshot": { + resps: []*abci.ResponseApplySnapshotChunk{ + {Result: abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT}, + }, + err: errRejectSnapshot, + expectErr: errRejectSnapshot, + }, + "reject_snapshot": { + resps: []*abci.ResponseApplySnapshotChunk{ + {Result: abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT}, + }, + err: errRejectSnapshot, + expectErr: errRejectSnapshot, + }, + "unknown": { + resps: []*abci.ResponseApplySnapshotChunk{ + {Result: abci.ResponseApplySnapshotChunk_UNKNOWN}, + }, + err: unknownErr, + expectErr: unknownErr, + }, + "error": { + resps: []*abci.ResponseApplySnapshotChunk{ + {Result: abci.ResponseApplySnapshotChunk_ACCEPT}, + }, + err: boom, + expectErr: boom, + }, + "unknown non-zero": { + resps: []*abci.ResponseApplySnapshotChunk{ + {Result: abci.ResponseApplySnapshotChunk_ACCEPT}, + }, + err: unknownErr, + expectErr: unknownErr, + }, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - for name, tc := range testcases { + for name, tc := range testCases { tc := tc - t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithCancel(ctx) + suite.Run(name, func() { + suite.SetupTest() // reset + ctx, cancel := context.WithCancel(suite.ctx) defer cancel() - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, mock.Anything).Return(tmbytes.HexBytes("app_hash"), nil) - - rts := setup(ctx, t, nil, stateProvider, 2) - + s := &snapshot{Height: 1, Version: 1, Hash: []byte{1, 2, 3}} body := []byte{1, 2, 3} - chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 1}, t.TempDir()) - require.NoError(t, err) + chunks, err := newChunkQueue(s, suite.T().TempDir(), 10) + suite.Require().NoError(err) fetchStartTime := time.Now() - _, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: body}) - require.NoError(t, err) - - rts.conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ - Index: 0, Chunk: body, - }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: tc.result}, tc.err) - if tc.result == abci.ResponseApplySnapshotChunk_RETRY { - rts.conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ - Index: 0, Chunk: body, - }).Once().Return(&abci.ResponseApplySnapshotChunk{ - Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) + c := &chunk{Height: 1, Version: 1, ID: []byte{0}, Chunk: body} + chunks.Enqueue(c.ID) + + for _, resp := range tc.resps { + suite.conn. + On("ApplySnapshotChunk", mock.Anything, mock.Anything). + Once(). + Return(resp, tc.err) } + go func() { + for i := 0; i < len(tc.resps); i++ { + for chunks.IsRequestQueueEmpty() { + time.Sleep(5 * time.Millisecond) + } + chunkID, err := chunks.Dequeue() + suite.Require().NoError(err) + added, err := chunks.Add(&chunk{Height: 1, Version: 1, ID: chunkID, Chunk: body}) + suite.Require().NoError(err) + suite.Require().True(added) + } + }() - err = rts.syncer.applyChunks(ctx, chunks, fetchStartTime) + err = suite.syncer.applyChunks(ctx, chunks, fetchStartTime) if tc.expectErr == unknownErr { - require.Error(t, err) + suite.Require().Error(err) } else { unwrapped := errors.Unwrap(err) if unwrapped != nil { err = unwrapped } - require.Equal(t, tc.expectErr, err) + suite.Require().Equal(tc.expectErr, err) } - - rts.conn.AssertExpectations(t) }) } } -func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { +func (suite *SyncerTestSuite) TestApplyChunksRefetchChunks() { + suite.stateProvider. + On("AppHash", mock.Anything, mock.Anything). + Maybe(). + Return(tmbytes.HexBytes("app_hash"), nil) + // Discarding chunks via refetch_chunks should work the same for all results - testcases := map[string]struct { - result abci.ResponseApplySnapshotChunk_Result + testCases := map[string]struct { + resp []*abci.ResponseApplySnapshotChunk }{ - "accept": {abci.ResponseApplySnapshotChunk_ACCEPT}, - "abort": {abci.ResponseApplySnapshotChunk_ABORT}, - "retry": {abci.ResponseApplySnapshotChunk_RETRY}, - "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT}, - "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT}, + "accept": { + resp: []*abci.ResponseApplySnapshotChunk{ + {Result: abci.ResponseApplySnapshotChunk_ACCEPT, NextChunks: [][]byte{{1}}}, + { + Result: abci.ResponseApplySnapshotChunk_ACCEPT, + NextChunks: [][]byte{{2}}, + RefetchChunks: [][]byte{{1}}, + }, + {Result: abci.ResponseApplySnapshotChunk_ACCEPT}, + {Result: abci.ResponseApplySnapshotChunk_COMPLETE_SNAPSHOT}, + }, + }, + // TODO: disabled because refetch works the same for all results + //"abort": {abci.ResponseApplySnapshotChunk_ABORT}, + //"retry": {abci.ResponseApplySnapshotChunk_RETRY}, + //"retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT}, + //"reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - for name, tc := range testcases { - tc := tc - t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, mock.Anything).Return(tmbytes.HexBytes("app_hash"), nil) - - rts := setup(ctx, t, nil, stateProvider, 2) - chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 3}, t.TempDir()) - require.NoError(t, err) + chunks := []*chunk{ + {Height: 1, Version: 1, ID: []byte{0}, Chunk: []byte{0}}, + {Height: 1, Version: 1, ID: []byte{1}, Chunk: []byte{1}}, + {Height: 1, Version: 1, ID: []byte{2}, Chunk: []byte{2}}, + } + ctx, cancel := context.WithCancel(suite.ctx) + defer cancel() + for name, tc := range testCases { + suite.Run(name, func() { + s := &snapshot{Height: 1, Version: 1, Hash: []byte{1, 2, 3}} + queue, err := newChunkQueue(s, suite.T().TempDir(), 1) + suite.Require().NoError(err) + queue.Enqueue(chunks[0].ID) fetchStartTime := time.Now() - - added, err := chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{0}}) - require.True(t, added) - require.NoError(t, err) - added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 1, Chunk: []byte{1}}) - require.True(t, added) - require.NoError(t, err) - added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 2, Chunk: []byte{2}}) - require.True(t, added) - require.NoError(t, err) - - // The first two chunks are accepted, before the last one asks for 1 to be refetched - rts.conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ - Index: 0, Chunk: []byte{0}, - }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ - Index: 1, Chunk: []byte{1}, - }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ - Index: 2, Chunk: []byte{2}, - }).Once().Return(&abci.ResponseApplySnapshotChunk{ - Result: tc.result, - RefetchChunks: []uint32{1}, - }, nil) - - // Since removing the chunk will cause Next() to block, we spawn a goroutine, then - // check the queue contents, and finally close the queue to end the goroutine. - // We don't really care about the result of applyChunks, since it has separate test. + for _, resp := range tc.resp { + suite.conn. + On("ApplySnapshotChunk", mock.Anything, mock.Anything). + Once(). + Return(resp, nil) + } go func() { - rts.syncer.applyChunks(ctx, chunks, fetchStartTime) //nolint:errcheck // purposefully ignore error + for i := 0; i < len(tc.resp); i++ { + for queue.IsRequestQueueEmpty() { + time.Sleep(10 * time.Millisecond) + } + chunkID, err := queue.Dequeue() + suite.Require().NoError(err) + added, err := queue.Add(chunks[int(chunkID[0])]) + suite.Require().NoError(err) + suite.Require().True(added) + } }() - - time.Sleep(50 * time.Millisecond) - require.True(t, chunks.Has(0)) - require.False(t, chunks.Has(1)) - require.True(t, chunks.Has(2)) - - require.NoError(t, chunks.Close()) + _ = suite.syncer.applyChunks(ctx, queue, fetchStartTime) + suite.Require().NoError(queue.Close()) }) } } -func TestSyncer_applyChunks_RejectSenders(t *testing.T) { - // Banning chunks senders via ban_chunk_senders should work the same for all results - testcases := map[string]struct { - result abci.ResponseApplySnapshotChunk_Result - }{ - "accept": {abci.ResponseApplySnapshotChunk_ACCEPT}, - "abort": {abci.ResponseApplySnapshotChunk_ABORT}, - "retry": {abci.ResponseApplySnapshotChunk_RETRY}, - "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT}, - "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - for name, tc := range testcases { - tc := tc - t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, mock.Anything).Return(tmbytes.HexBytes("app_hash"), nil) - - rts := setup(ctx, t, nil, stateProvider, 2) - - // Set up three peers across two snapshots, and ask for one of them to be banned. - // It should be banned from all snapshots. - peerAID := types.NodeID("aa") - peerBID := types.NodeID("bb") - peerCID := types.NodeID("cc") - - s1 := &snapshot{Height: 1, Format: 1, Chunks: 3} - s2 := &snapshot{Height: 2, Format: 1, Chunks: 3} +func (suite *SyncerTestSuite) TestApplyChunksRejectSenders() { + suite.stateProvider. + On("AppHash", mock.Anything, mock.Anything). + Maybe(). + Return(tmbytes.HexBytes("app_hash"), nil) - _, err := rts.syncer.AddSnapshot(peerAID, s1) - require.NoError(t, err) + // Set up three peers across two snapshots, and ask for one of them to be banned. + // It should be banned from all snapshots. + peerAID := types.NodeID("aa") + peerBID := types.NodeID("bb") + peerCID := types.NodeID("cc") - _, err = rts.syncer.AddSnapshot(peerAID, s2) - require.NoError(t, err) + chunks := []*chunk{ + {Height: 1, Version: 1, ID: []byte{0}, Chunk: []byte{0}, Sender: peerAID}, + {Height: 1, Version: 1, ID: []byte{1}, Chunk: []byte{1}, Sender: peerBID}, + {Height: 1, Version: 1, ID: []byte{2}, Chunk: []byte{2}, Sender: peerCID}, + } - _, err = rts.syncer.AddSnapshot(peerBID, s1) - require.NoError(t, err) + s1 := &snapshot{Height: 1, Version: 1, Hash: []byte{1, 2, 3}} + s2 := &snapshot{Height: 2, Version: 1, Hash: []byte{1, 2, 3}} - _, err = rts.syncer.AddSnapshot(peerBID, s2) - require.NoError(t, err) + peerSnapshots := []struct { + peerID types.NodeID + snapshot []*snapshot + }{ + {peerID: peerAID, snapshot: []*snapshot{s1, s2}}, + {peerID: peerBID, snapshot: []*snapshot{s1, s2}}, + {peerID: peerCID, snapshot: []*snapshot{s1, s2}}, + } - _, err = rts.syncer.AddSnapshot(peerCID, s1) - require.NoError(t, err) + // Banning chunks senders via ban_chunk_senders should work the same for all results + testCases := map[string]struct { + chunks []*chunk + resps []*abci.ResponseApplySnapshotChunk + }{ + "accept": { + chunks: chunks, + resps: []*abci.ResponseApplySnapshotChunk{ + {Result: abci.ResponseApplySnapshotChunk_ACCEPT, NextChunks: [][]byte{{1}}}, + { + Result: abci.ResponseApplySnapshotChunk_ACCEPT, + NextChunks: [][]byte{{2}}, + RejectSenders: []string{string(peerBID)}, + }, + {Result: abci.ResponseApplySnapshotChunk_COMPLETE_SNAPSHOT}, + }, + }, + "abort": { + chunks: chunks, + resps: []*abci.ResponseApplySnapshotChunk{ + {Result: abci.ResponseApplySnapshotChunk_ACCEPT, NextChunks: [][]byte{{1}}}, + {Result: abci.ResponseApplySnapshotChunk_ACCEPT, NextChunks: [][]byte{{2}}}, + {Result: abci.ResponseApplySnapshotChunk_ABORT, RejectSenders: []string{string(peerBID)}}, + }, + }, + "retry": { + chunks: chunks, + resps: []*abci.ResponseApplySnapshotChunk{ + {Result: abci.ResponseApplySnapshotChunk_ACCEPT, NextChunks: [][]byte{{1}, {2}}}, + {Result: abci.ResponseApplySnapshotChunk_ACCEPT}, + {Result: abci.ResponseApplySnapshotChunk_RETRY, RejectSenders: []string{string(peerBID)}}, + {Result: abci.ResponseApplySnapshotChunk_COMPLETE_SNAPSHOT}, + }, + }, + "retry_snapshot": { + chunks: chunks, + resps: []*abci.ResponseApplySnapshotChunk{ + {Result: abci.ResponseApplySnapshotChunk_ACCEPT, NextChunks: [][]byte{{1}, {2}}}, + {Result: abci.ResponseApplySnapshotChunk_ACCEPT}, + {Result: abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT, RejectSenders: []string{string(peerBID)}}, + }, + }, + "reject_snapshot": { + chunks: chunks, + resps: []*abci.ResponseApplySnapshotChunk{ + {Result: abci.ResponseApplySnapshotChunk_ACCEPT, NextChunks: [][]byte{{1}, {2}}}, + {Result: abci.ResponseApplySnapshotChunk_ACCEPT}, + {Result: abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT, RejectSenders: []string{string(peerBID)}}, + }, + }, + } + ctx, cancel := context.WithCancel(suite.ctx) + defer cancel() - _, err = rts.syncer.AddSnapshot(peerCID, s2) - require.NoError(t, err) + for name, tc := range testCases { + tc := tc + suite.Run(name, func() { + for _, peerSnapshot := range peerSnapshots { + for _, s := range peerSnapshot.snapshot { + _, err := suite.syncer.AddSnapshot(peerSnapshot.peerID, s) + suite.Require().NoError(err) + } + } - chunks, err := newChunkQueue(s1, t.TempDir()) - require.NoError(t, err) + queue, err := newChunkQueue(s1, suite.T().TempDir(), 10) + suite.Require().NoError(err) + queue.Enqueue(tc.chunks[0].ID) fetchStartTime := time.Now() - added, err := chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{0}, Sender: peerAID}) - require.True(t, added) - require.NoError(t, err) - - added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 1, Chunk: []byte{1}, Sender: peerBID}) - require.True(t, added) - require.NoError(t, err) - - added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 2, Chunk: []byte{2}, Sender: peerCID}) - require.True(t, added) - require.NoError(t, err) - - // The first two chunks are accepted, before the last one asks for b sender to be rejected - rts.conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ - Index: 0, Chunk: []byte{0}, Sender: "aa", - }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ - Index: 1, Chunk: []byte{1}, Sender: "bb", - }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ - Index: 2, Chunk: []byte{2}, Sender: "cc", - }).Once().Return(&abci.ResponseApplySnapshotChunk{ - Result: tc.result, - RejectSenders: []string{string(peerBID)}, - }, nil) - - // On retry, the last chunk will be tried again, so we just accept it then. - if tc.result == abci.ResponseApplySnapshotChunk_RETRY { - rts.conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ - Index: 2, Chunk: []byte{2}, Sender: "cc", - }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) + go func() { + for i := 0; i < len(tc.resps); i++ { + for queue.IsRequestQueueEmpty() { + time.Sleep(10 * time.Millisecond) + } + chunkID, err := queue.Dequeue() + suite.Require().NoError(err) + added, err := queue.Add(chunks[int(chunkID[0])]) + suite.Require().True(added) + suite.Require().NoError(err) + } + }() + + for _, resp := range tc.resps { + suite.conn. + On("ApplySnapshotChunk", mock.Anything, mock.Anything). + Once(). + Return(resp, nil) } // We don't really care about the result of applyChunks, since it has separate test. // However, it will block on e.g. retry result, so we spawn a goroutine that will - // be shut down when the chunk queue closes. - go func() { - rts.syncer.applyChunks(ctx, chunks, fetchStartTime) //nolint:errcheck // purposefully ignore error - }() + // be shut down when the chunk requestQueue closes. - time.Sleep(50 * time.Millisecond) + _ = suite.syncer.applyChunks(ctx, queue, fetchStartTime) - s1peers := rts.syncer.snapshots.GetPeers(s1) - require.Len(t, s1peers, 2) - require.EqualValues(t, "aa", s1peers[0]) - require.EqualValues(t, "cc", s1peers[1]) + s1peers := suite.syncer.snapshots.GetPeers(s1) + suite.Require().Len(s1peers, 2) + suite.Require().EqualValues(peerAID, s1peers[0]) + suite.Require().EqualValues(peerCID, s1peers[1]) - rts.syncer.snapshots.GetPeers(s1) - require.Len(t, s1peers, 2) - require.EqualValues(t, "aa", s1peers[0]) - require.EqualValues(t, "cc", s1peers[1]) + suite.syncer.snapshots.GetPeers(s1) + suite.Require().Len(s1peers, 2) + suite.Require().EqualValues(peerAID, s1peers[0]) + suite.Require().EqualValues(peerCID, s1peers[1]) - require.NoError(t, chunks.Close()) + suite.Require().NoError(queue.Close()) }) } } -func TestSyncer_verifyApp(t *testing.T) { +func (suite *SyncerTestSuite) TestSyncerVerifyApp() { + ctx, cancel := context.WithCancel(suite.ctx) + defer cancel() + boom := errors.New("boom") const appVersion = 9 appVersionMismatchErr := errors.New("app version mismatch. Expected: 9, got: 2") - s := &snapshot{Height: 3, Format: 1, Chunks: 5, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")} + s := &snapshot{Height: 3, Version: 1, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")} - testcases := map[string]struct { + testCases := map[string]struct { response *abci.ResponseInfo err error expectErr error @@ -760,24 +897,19 @@ func TestSyncer_verifyApp(t *testing.T) { }, nil, errVerifyFailed}, "error": {nil, boom, boom}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - for name, tc := range testcases { - tc := tc - t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - rts := setup(ctx, t, nil, nil, 2) - - rts.conn.On("Info", mock.Anything, &proxy.RequestInfo).Return(tc.response, tc.err) - err := rts.syncer.verifyApp(ctx, s, appVersion) + for name, tc := range testCases { + suite.Run(name, func() { + suite.conn. + On("Info", mock.Anything, &proxy.RequestInfo). + Once(). + Return(tc.response, tc.err) + err := suite.syncer.verifyApp(ctx, s, appVersion) unwrapped := errors.Unwrap(err) if unwrapped != nil { err = unwrapped } - require.Equal(t, tc.expectErr, err) + suite.Require().Equal(tc.expectErr, err) }) } } @@ -785,9 +917,15 @@ func TestSyncer_verifyApp(t *testing.T) { func toABCI(s *snapshot) *abci.Snapshot { return &abci.Snapshot{ Height: s.Height, - Format: s.Format, - Chunks: s.Chunks, + Version: s.Version, Hash: s.Hash, Metadata: s.Metadata, } } + +func makeChannel(ID p2p.ChannelID, name string) (p2p.Channel, chan p2p.Envelope, chan p2p.Envelope, chan p2p.PeerError) { + inCh := make(chan p2p.Envelope, 1) + outCh := make(chan p2p.Envelope, 1) + errCh := make(chan p2p.PeerError, 1) + return p2p.NewChannel(ID, name, inCh, outCh, errCh), inCh, outCh, errCh +} diff --git a/libs/ds/ordered_map.go b/libs/ds/ordered_map.go new file mode 100644 index 0000000000..a9325a5590 --- /dev/null +++ b/libs/ds/ordered_map.go @@ -0,0 +1,71 @@ +package ds + +// OrderedMap is a map with a deterministic iteration order +// this datastructure is not thread-safe +type OrderedMap[T comparable, V any] struct { + keys map[T]int + values []V +} + +// NewOrderedMap returns a new OrderedMap +func NewOrderedMap[T comparable, V any]() *OrderedMap[T, V] { + return &OrderedMap[T, V]{ + keys: make(map[T]int), + } +} + +// Put adds a key-value pair to the map +func (m *OrderedMap[T, V]) Put(key T, val V) { + i, ok := m.keys[key] + if ok { + m.values[i] = val + return + } + m.keys[key] = len(m.values) + m.values = append(m.values, val) +} + +// Get returns the value for a given key +func (m *OrderedMap[T, V]) Get(key T) (V, bool) { + i, ok := m.keys[key] + if !ok { + var v V + return v, false + } + return m.values[i], true +} + +// Has returns true if the map contains the given key +func (m *OrderedMap[T, V]) Has(key T) bool { + _, ok := m.keys[key] + return ok +} + +// Delete removes a key-value pair from the map +func (m *OrderedMap[T, V]) Delete(key T) { + i, ok := m.keys[key] + if !ok { + return + } + delete(m.keys, key) + m.values = append(m.values[:i], m.values[i+1:]...) +} + +// Values returns all values in the map +func (m *OrderedMap[T, V]) Values() []V { + return append([]V{}, m.values...) +} + +// Keys returns all keys in the map +func (m *OrderedMap[T, V]) Keys() []T { + keys := make([]T, len(m.keys)) + for k, v := range m.keys { + keys[v] = k + } + return keys +} + +// Len returns a number of the map +func (m *OrderedMap[T, V]) Len() int { + return len(m.keys) +} diff --git a/libs/ds/ordered_map_test.go b/libs/ds/ordered_map_test.go new file mode 100644 index 0000000000..15a96f0252 --- /dev/null +++ b/libs/ds/ordered_map_test.go @@ -0,0 +1,42 @@ +package ds + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestOrderedMap(t *testing.T) { + om := NewOrderedMap[string, int]() + require.Equal(t, 0, om.Len()) + _, ok := om.Get("a") + require.False(t, ok) + require.False(t, om.Has("a")) + om.Put("a", 1) + require.True(t, om.Has("a")) + require.Equal(t, 1, om.Len()) + val, ok := om.Get("a") + require.Equal(t, 1, val) + require.True(t, ok) + require.Equal(t, 1, om.Len()) + om.Put("a", 2) + val, ok = om.Get("a") + require.Equal(t, 2, val) + require.True(t, ok) + require.Equal(t, 1, om.Len()) + om.Put("b", 3) + val, ok = om.Get("b") + require.Equal(t, 3, val) + require.True(t, ok) + require.Equal(t, 2, om.Len()) + + require.Equal(t, []int{2, 3}, om.Values()) + require.Equal(t, []string{"a", "b"}, om.Keys()) + + om.Delete("b") + require.Equal(t, []int{2}, om.Values()) + require.Equal(t, []string{"a"}, om.Keys()) + + // delete unknown key + om.Delete("c") +} diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 6fffa43032..0c72d7d4c9 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -159,9 +159,9 @@ message RequestOfferSnapshot { // Used during state sync to retrieve snapshot chunks from peers. message RequestLoadSnapshotChunk { - uint64 height = 1; // The height of the snapshot the chunks belongs to. - uint32 format = 2; // The application-specific format of the snapshot the chunk belongs to. - uint32 chunk = 3; // The chunk index, starting from 0 for the initial chunk. + uint64 height = 1; // The height of the snapshot the chunks belongs to. + uint32 version = 2; // The application-specific format of the snapshot the chunk belongs to. + bytes chunk_id = 3; // The chunk id is a hash of the node of subtree of the snapshot. } // Applies a snapshot chunk. @@ -177,9 +177,9 @@ message RequestLoadSnapshotChunk { // it will reject the snapshot and try a different one via OfferSnapshot. The application should be prepared to reset // and accept it or abort as appropriate. message RequestApplySnapshotChunk { - uint32 index = 1; // The chunk index, starting from 0. Tenderdash applies chunks sequentially. - bytes chunk = 2; // The binary chunk contents, as returned by LoadSnapshotChunk. - string sender = 3; // The P2P ID of the node who sent this chunk. + bytes chunk_id = 1; // The chunk index, starting from 0. Tenderdash applies chunks sequentially. + bytes chunk = 2; // The binary chunk contents, as returned by LoadSnapshotChunk. + string sender = 3; // The P2P ID of the node who sent this chunk. } // Prepare new block proposal, potentially altering list of transactions. @@ -605,19 +605,22 @@ message ResponseApplySnapshotChunk { Result result = 1; // The result of applying this chunk. // Refetch and reapply the given chunks, regardless of `result`. // Only the listed chunks will be refetched, and reapplied in sequential order. - repeated uint32 refetch_chunks = 2; + repeated bytes refetch_chunks = 2; // Reject the given P2P senders, regardless of `Result`. Any chunks already applied will not be refetched // unless explicitly requested, but queued chunks from these senders will be discarded, and new chunks // or other snapshots rejected. repeated string reject_senders = 3; + // Next chunks provides the list of chunks that should be requested next, if any. + repeated bytes next_chunks = 4; enum Result { - UNKNOWN = 0; // Unknown result, abort all snapshot restoration - ACCEPT = 1; // Chunk successfully accepted - ABORT = 2; // Abort all snapshot restoration - RETRY = 3; // Retry chunk (combine with refetch and reject) - RETRY_SNAPSHOT = 4; // Retry snapshot (combine with refetch and reject) - REJECT_SNAPSHOT = 5; // Reject this snapshot, try others + UNKNOWN = 0; // Unknown result, abort all snapshot restoration + ACCEPT = 1; // Chunk successfully accepted + ABORT = 2; // Abort all snapshot restoration + RETRY = 3; // Retry chunk (combine with refetch and reject) + RETRY_SNAPSHOT = 4; // Retry snapshot (combine with refetch and reject) + REJECT_SNAPSHOT = 5; // Reject this snapshot, try others + COMPLETE_SNAPSHOT = 6; // Complete this snapshot, no more chunks } } @@ -825,8 +828,7 @@ message Misbehavior { message Snapshot { uint64 height = 1; // The height at which the snapshot was taken - uint32 format = 2; // The application-specific snapshot format - uint32 chunks = 3; // Number of chunks in the snapshot + uint32 version = 2; // The application-specific snapshot version bytes hash = 4; // Arbitrary snapshot hash, equal only if identical bytes metadata = 5; // Arbitrary application metadata } diff --git a/proto/tendermint/statesync/types.pb.go b/proto/tendermint/statesync/types.pb.go index 6f52e1dbb1..16e03447ec 100644 --- a/proto/tendermint/statesync/types.pb.go +++ b/proto/tendermint/statesync/types.pb.go @@ -62,10 +62,9 @@ var xxx_messageInfo_SnapshotsRequest proto.InternalMessageInfo type SnapshotsResponse struct { Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` - Chunks uint32 `protobuf:"varint,3,opt,name=chunks,proto3" json:"chunks,omitempty"` - Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` - Metadata []byte `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"` + Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + Hash []byte `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash,omitempty"` + Metadata []byte `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` } func (m *SnapshotsResponse) Reset() { *m = SnapshotsResponse{} } @@ -108,16 +107,9 @@ func (m *SnapshotsResponse) GetHeight() uint64 { return 0 } -func (m *SnapshotsResponse) GetFormat() uint32 { +func (m *SnapshotsResponse) GetVersion() uint32 { if m != nil { - return m.Format - } - return 0 -} - -func (m *SnapshotsResponse) GetChunks() uint32 { - if m != nil { - return m.Chunks + return m.Version } return 0 } @@ -137,9 +129,9 @@ func (m *SnapshotsResponse) GetMetadata() []byte { } type ChunkRequest struct { - Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` - Index uint32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + ChunkId []byte `protobuf:"bytes,3,opt,name=chunk_id,json=chunkId,proto3" json:"chunk_id,omitempty"` } func (m *ChunkRequest) Reset() { *m = ChunkRequest{} } @@ -182,24 +174,24 @@ func (m *ChunkRequest) GetHeight() uint64 { return 0 } -func (m *ChunkRequest) GetFormat() uint32 { +func (m *ChunkRequest) GetVersion() uint32 { if m != nil { - return m.Format + return m.Version } return 0 } -func (m *ChunkRequest) GetIndex() uint32 { +func (m *ChunkRequest) GetChunkId() []byte { if m != nil { - return m.Index + return m.ChunkId } - return 0 + return nil } type ChunkResponse struct { Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` - Index uint32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` + Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + ChunkId []byte `protobuf:"bytes,3,opt,name=chunk_id,json=chunkId,proto3" json:"chunk_id,omitempty"` Chunk []byte `protobuf:"bytes,4,opt,name=chunk,proto3" json:"chunk,omitempty"` Missing bool `protobuf:"varint,5,opt,name=missing,proto3" json:"missing,omitempty"` } @@ -244,18 +236,18 @@ func (m *ChunkResponse) GetHeight() uint64 { return 0 } -func (m *ChunkResponse) GetFormat() uint32 { +func (m *ChunkResponse) GetVersion() uint32 { if m != nil { - return m.Format + return m.Version } return 0 } -func (m *ChunkResponse) GetIndex() uint32 { +func (m *ChunkResponse) GetChunkId() []byte { if m != nil { - return m.Index + return m.ChunkId } - return 0 + return nil } func (m *ChunkResponse) GetChunk() []byte { @@ -470,34 +462,34 @@ func init() { func init() { proto.RegisterFile("tendermint/statesync/types.proto", fileDescriptor_a1c2869546ca7914) } var fileDescriptor_a1c2869546ca7914 = []byte{ - // 420 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0xcf, 0x8b, 0xd3, 0x40, - 0x14, 0xce, 0x68, 0x76, 0x5d, 0x5e, 0xb7, 0xba, 0x3b, 0x14, 0x09, 0x61, 0x8d, 0x31, 0x17, 0x0b, - 0x42, 0x02, 0xeb, 0x51, 0xbc, 0x6c, 0xaf, 0x1e, 0x24, 0xd5, 0x8b, 0x97, 0x32, 0x4d, 0xc7, 0x24, - 0xd8, 0xcc, 0xc4, 0xcc, 0x04, 0x2c, 0x78, 0xf5, 0xe4, 0xc5, 0x3f, 0xab, 0xc7, 0x1e, 0x3d, 0x89, - 0xb4, 0xff, 0x88, 0xcc, 0x8f, 0xb6, 0xc1, 0x16, 0x8a, 0xde, 0xde, 0xf7, 0xcd, 0xf7, 0xbe, 0x79, - 0x6f, 0xf8, 0x06, 0x42, 0x49, 0xd9, 0x8c, 0x36, 0x55, 0xc9, 0x64, 0x22, 0x24, 0x91, 0x54, 0x2c, - 0x58, 0x96, 0xc8, 0x45, 0x4d, 0x45, 0x5c, 0x37, 0x5c, 0x72, 0x3c, 0xd8, 0x2b, 0xe2, 0x9d, 0xc2, - 0x1f, 0xe4, 0x3c, 0xe7, 0x5a, 0x90, 0xa8, 0xca, 0x68, 0xfd, 0x9b, 0x8e, 0x9b, 0xf6, 0xe8, 0x3a, - 0xf9, 0x4f, 0x0e, 0x4e, 0x6b, 0xd2, 0x90, 0xca, 0x1e, 0x47, 0x18, 0xae, 0xc6, 0x8c, 0xd4, 0xa2, - 0xe0, 0x52, 0xa4, 0xf4, 0x73, 0x4b, 0x85, 0x8c, 0xbe, 0x23, 0xb8, 0xee, 0x90, 0xa2, 0xe6, 0x4c, - 0x50, 0xfc, 0x18, 0xce, 0x0b, 0x5a, 0xe6, 0x85, 0xf4, 0x50, 0x88, 0x86, 0x6e, 0x6a, 0x91, 0xe2, - 0x3f, 0xf2, 0xa6, 0x22, 0xd2, 0xbb, 0x17, 0xa2, 0x61, 0x3f, 0xb5, 0x48, 0xf1, 0x59, 0xd1, 0xb2, - 0x4f, 0xc2, 0xbb, 0x6f, 0x78, 0x83, 0x30, 0x06, 0xb7, 0x20, 0xa2, 0xf0, 0xdc, 0x10, 0x0d, 0x2f, - 0x53, 0x5d, 0x63, 0x1f, 0x2e, 0x2a, 0x2a, 0xc9, 0x8c, 0x48, 0xe2, 0x9d, 0x69, 0x7e, 0x87, 0xa3, - 0x77, 0x70, 0x39, 0x52, 0x9d, 0x76, 0xba, 0x7f, 0x9e, 0x63, 0x00, 0x67, 0x25, 0x9b, 0xd1, 0x2f, - 0x76, 0x0c, 0x03, 0xa2, 0x6f, 0x08, 0xfa, 0xd6, 0xf6, 0x3f, 0xf7, 0x3b, 0xea, 0xab, 0x58, 0xbd, - 0xa7, 0x5d, 0xcf, 0x00, 0xec, 0xc1, 0x83, 0xaa, 0x14, 0xa2, 0x64, 0xb9, 0x5e, 0xef, 0x22, 0xdd, - 0xc2, 0xe8, 0x05, 0x5c, 0xbf, 0x51, 0xd7, 0xdc, 0xcd, 0x79, 0x76, 0x6a, 0xc5, 0x68, 0x0c, 0xb8, - 0x2b, 0xb6, 0x83, 0xbf, 0x86, 0xde, 0x5c, 0xb1, 0x93, 0xa9, 0xa2, 0x75, 0x4b, 0xef, 0xf6, 0x26, - 0xee, 0x24, 0xc8, 0xe4, 0xa1, 0xd3, 0x0a, 0xf3, 0x5d, 0x1d, 0x3d, 0x87, 0xfe, 0x5b, 0x9d, 0x88, - 0x53, 0xb7, 0x7f, 0x85, 0x87, 0x5b, 0xe1, 0x89, 0x27, 0x4b, 0xe1, 0x2a, 0x53, 0x02, 0x26, 0x5a, - 0x31, 0x31, 0x71, 0xd3, 0x8f, 0xd7, 0xbb, 0x7d, 0x76, 0x38, 0xd6, 0x68, 0xab, 0x34, 0xe6, 0x77, - 0xee, 0xf2, 0xd7, 0x53, 0x27, 0x7d, 0x94, 0xfd, 0x45, 0xbf, 0x5f, 0xae, 0x03, 0xb4, 0x5a, 0x07, - 0xe8, 0xf7, 0x3a, 0x40, 0x3f, 0x36, 0x81, 0xb3, 0xda, 0x04, 0xce, 0xcf, 0x4d, 0xe0, 0x7c, 0x78, - 0x95, 0x97, 0xb2, 0x68, 0xa7, 0x71, 0xc6, 0xab, 0xa4, 0x1b, 0xf6, 0x7d, 0x69, 0xbe, 0xcc, 0xb1, - 0x4f, 0x37, 0x3d, 0xd7, 0x67, 0x2f, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x04, 0xd6, 0xcd, 0xf3, - 0x93, 0x03, 0x00, 0x00, + // 418 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0xc1, 0xaa, 0xd3, 0x40, + 0x14, 0xcd, 0x68, 0xde, 0x7b, 0xe5, 0xf6, 0x55, 0x5f, 0x87, 0x22, 0x31, 0xd4, 0x18, 0xb3, 0x31, + 0x20, 0x24, 0x50, 0x97, 0xe2, 0xa6, 0x5d, 0x09, 0x2e, 0x24, 0xc5, 0x8d, 0x2e, 0xca, 0x34, 0x19, + 0x92, 0x60, 0x33, 0x13, 0x33, 0x13, 0xa1, 0xe0, 0x27, 0xb8, 0xf0, 0xb3, 0xba, 0xec, 0xd2, 0x95, + 0x48, 0xfb, 0x23, 0x92, 0x49, 0xd2, 0x06, 0xeb, 0xa3, 0xd0, 0xdd, 0x3d, 0x67, 0xce, 0x9c, 0x7b, + 0xef, 0x70, 0x06, 0x6c, 0x49, 0x59, 0x44, 0x8b, 0x2c, 0x65, 0xd2, 0x17, 0x92, 0x48, 0x2a, 0xd6, + 0x2c, 0xf4, 0xe5, 0x3a, 0xa7, 0xc2, 0xcb, 0x0b, 0x2e, 0x39, 0x1e, 0x1d, 0x15, 0xde, 0x41, 0x61, + 0x8e, 0x62, 0x1e, 0x73, 0x25, 0xf0, 0xab, 0xaa, 0xd6, 0x9a, 0xe3, 0x8e, 0x9b, 0xf2, 0xe8, 0x3a, + 0x99, 0xcf, 0x4e, 0x4e, 0x73, 0x52, 0x90, 0xac, 0x39, 0x76, 0x30, 0xdc, 0xcd, 0x19, 0xc9, 0x45, + 0xc2, 0xa5, 0x08, 0xe8, 0xd7, 0x92, 0x0a, 0xe9, 0x94, 0x30, 0xec, 0x70, 0x22, 0xe7, 0x4c, 0x50, + 0xfc, 0x04, 0xae, 0x13, 0x9a, 0xc6, 0x89, 0x34, 0x90, 0x8d, 0x5c, 0x3d, 0x68, 0x10, 0x36, 0xe0, + 0xe6, 0x1b, 0x2d, 0x44, 0xca, 0x99, 0xf1, 0xc0, 0x46, 0xee, 0x20, 0x68, 0x21, 0xc6, 0xa0, 0x27, + 0x44, 0x24, 0xc6, 0x43, 0x1b, 0xb9, 0xb7, 0x81, 0xaa, 0xb1, 0x09, 0xbd, 0x8c, 0x4a, 0x12, 0x11, + 0x49, 0x0c, 0x5d, 0xf1, 0x07, 0xec, 0x7c, 0x86, 0xdb, 0x59, 0x52, 0xb2, 0x2f, 0xcd, 0x18, 0x17, + 0x74, 0x7c, 0x0a, 0xbd, 0xb0, 0x72, 0x58, 0xa4, 0x51, 0xd3, 0xf5, 0x46, 0xe1, 0x77, 0x91, 0xf3, + 0x03, 0xc1, 0xa0, 0x71, 0xbf, 0x78, 0xa1, 0xfb, 0xed, 0xf1, 0x08, 0xae, 0x54, 0xd9, 0x2c, 0x55, + 0x83, 0xca, 0x2a, 0x4b, 0x85, 0x48, 0x59, 0x6c, 0x5c, 0xd9, 0xc8, 0xed, 0x05, 0x2d, 0x74, 0x5e, + 0xc1, 0xf0, 0x7d, 0xd5, 0x6d, 0xba, 0xe2, 0xe1, 0xb9, 0x85, 0x9d, 0x39, 0xe0, 0xae, 0xb8, 0x99, + 0xff, 0x2d, 0xf4, 0x57, 0x15, 0xbb, 0x58, 0x56, 0xb4, 0xba, 0xd2, 0x9f, 0x8c, 0xbd, 0x4e, 0x70, + 0xea, 0x18, 0x74, 0xae, 0xc2, 0xea, 0x50, 0x3b, 0x2f, 0x61, 0xf0, 0x41, 0x05, 0xe1, 0x5c, 0xf7, + 0xef, 0xf0, 0xa8, 0x15, 0x9e, 0x79, 0xb9, 0x00, 0xee, 0xc2, 0x4a, 0xc0, 0x44, 0x29, 0x16, 0x75, + 0xca, 0xd4, 0x13, 0xf6, 0x27, 0x2f, 0x4e, 0xc7, 0x9a, 0xb5, 0xca, 0xda, 0x7c, 0xaa, 0x6f, 0x7e, + 0x3f, 0xd7, 0x82, 0xc7, 0xe1, 0x3f, 0xf4, 0xc7, 0xcd, 0xce, 0x42, 0xdb, 0x9d, 0x85, 0xfe, 0xec, + 0x2c, 0xf4, 0x73, 0x6f, 0x69, 0xdb, 0xbd, 0xa5, 0xfd, 0xda, 0x5b, 0xda, 0xa7, 0x37, 0x71, 0x2a, + 0x93, 0x72, 0xe9, 0x85, 0x3c, 0xf3, 0xbb, 0x19, 0x3f, 0x96, 0xf5, 0x4f, 0xf9, 0xdf, 0x5f, 0x5b, + 0x5e, 0xab, 0xb3, 0xd7, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc2, 0xc9, 0x7f, 0x18, 0x8a, 0x03, + 0x00, 0x00, } func (m *SnapshotsRequest) Marshal() (dAtA []byte, err error) { @@ -548,22 +540,17 @@ func (m *SnapshotsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { copy(dAtA[i:], m.Metadata) i = encodeVarintTypes(dAtA, i, uint64(len(m.Metadata))) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x22 } if len(m.Hash) > 0 { i -= len(m.Hash) copy(dAtA[i:], m.Hash) i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x1a } - if m.Chunks != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Chunks)) - i-- - dAtA[i] = 0x18 - } - if m.Format != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Format)) + if m.Version != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Version)) i-- dAtA[i] = 0x10 } @@ -595,13 +582,15 @@ func (m *ChunkRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Index != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + if len(m.ChunkId) > 0 { + i -= len(m.ChunkId) + copy(dAtA[i:], m.ChunkId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChunkId))) i-- - dAtA[i] = 0x18 + dAtA[i] = 0x1a } - if m.Format != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Format)) + if m.Version != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Version)) i-- dAtA[i] = 0x10 } @@ -650,13 +639,15 @@ func (m *ChunkResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - if m.Index != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + if len(m.ChunkId) > 0 { + i -= len(m.ChunkId) + copy(dAtA[i:], m.ChunkId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChunkId))) i-- - dAtA[i] = 0x18 + dAtA[i] = 0x1a } - if m.Format != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Format)) + if m.Version != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Version)) i-- dAtA[i] = 0x10 } @@ -826,11 +817,8 @@ func (m *SnapshotsResponse) Size() (n int) { if m.Height != 0 { n += 1 + sovTypes(uint64(m.Height)) } - if m.Format != 0 { - n += 1 + sovTypes(uint64(m.Format)) - } - if m.Chunks != 0 { - n += 1 + sovTypes(uint64(m.Chunks)) + if m.Version != 0 { + n += 1 + sovTypes(uint64(m.Version)) } l = len(m.Hash) if l > 0 { @@ -852,11 +840,12 @@ func (m *ChunkRequest) Size() (n int) { if m.Height != 0 { n += 1 + sovTypes(uint64(m.Height)) } - if m.Format != 0 { - n += 1 + sovTypes(uint64(m.Format)) + if m.Version != 0 { + n += 1 + sovTypes(uint64(m.Version)) } - if m.Index != 0 { - n += 1 + sovTypes(uint64(m.Index)) + l = len(m.ChunkId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) } return n } @@ -870,11 +859,12 @@ func (m *ChunkResponse) Size() (n int) { if m.Height != 0 { n += 1 + sovTypes(uint64(m.Height)) } - if m.Format != 0 { - n += 1 + sovTypes(uint64(m.Format)) + if m.Version != 0 { + n += 1 + sovTypes(uint64(m.Version)) } - if m.Index != 0 { - n += 1 + sovTypes(uint64(m.Index)) + l = len(m.ChunkId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) } l = len(m.Chunk) if l > 0 { @@ -1043,9 +1033,9 @@ func (m *SnapshotsResponse) Unmarshal(dAtA []byte) error { } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } - m.Format = 0 + m.Version = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -1055,31 +1045,12 @@ func (m *SnapshotsResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Format |= uint32(b&0x7F) << shift + m.Version |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) - } - m.Chunks = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Chunks |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) } @@ -1113,7 +1084,7 @@ func (m *SnapshotsResponse) Unmarshal(dAtA []byte) error { m.Hash = []byte{} } iNdEx = postIndex - case 5: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } @@ -1218,9 +1189,9 @@ func (m *ChunkRequest) Unmarshal(dAtA []byte) error { } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } - m.Format = 0 + m.Version = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -1230,16 +1201,16 @@ func (m *ChunkRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Format |= uint32(b&0x7F) << shift + m.Version |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunkId", wireType) } - m.Index = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -1249,11 +1220,26 @@ func (m *ChunkRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Index |= uint32(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChunkId = append(m.ChunkId[:0], dAtA[iNdEx:postIndex]...) + if m.ChunkId == nil { + m.ChunkId = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -1325,9 +1311,9 @@ func (m *ChunkResponse) Unmarshal(dAtA []byte) error { } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } - m.Format = 0 + m.Version = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -1337,16 +1323,16 @@ func (m *ChunkResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Format |= uint32(b&0x7F) << shift + m.Version |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunkId", wireType) } - m.Index = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -1356,11 +1342,26 @@ func (m *ChunkResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Index |= uint32(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChunkId = append(m.ChunkId[:0], dAtA[iNdEx:postIndex]...) + if m.ChunkId == nil { + m.ChunkId = []byte{} + } + iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) diff --git a/proto/tendermint/statesync/types.proto b/proto/tendermint/statesync/types.proto index 8b8bf1ccbf..764595cc2a 100644 --- a/proto/tendermint/statesync/types.proto +++ b/proto/tendermint/statesync/types.proto @@ -11,24 +11,23 @@ message SnapshotsRequest {} message SnapshotsResponse { uint64 height = 1; - uint32 format = 2; - uint32 chunks = 3; - bytes hash = 4; - bytes metadata = 5; + uint32 version = 2; + bytes hash = 3; + bytes metadata = 4; } message ChunkRequest { - uint64 height = 1; - uint32 format = 2; - uint32 index = 3; + uint64 height = 1; + uint32 version = 2; + bytes chunk_id = 3; } message ChunkResponse { - uint64 height = 1; - uint32 format = 2; - uint32 index = 3; - bytes chunk = 4; - bool missing = 5; + uint64 height = 1; + uint32 version = 2; + bytes chunk_id = 3; + bytes chunk = 4; + bool missing = 5; } message LightBlockRequest { diff --git a/rpc/client/mock/status_test.go b/rpc/client/mock/status_test.go index fb70ca9d93..9f250c53d4 100644 --- a/rpc/client/mock/status_test.go +++ b/rpc/client/mock/status_test.go @@ -31,7 +31,6 @@ func TestStatus(t *testing.T) { ChunkProcessAvgTime: time.Duration(10), SnapshotHeight: 10, SnapshotChunksCount: 9, - SnapshotChunksTotal: 10, BackFilledBlocks: 9, BackFillBlocksTotal: 10, }, @@ -69,7 +68,6 @@ func TestStatus(t *testing.T) { assert.EqualValues(t, time.Duration(10), st.SyncInfo.ChunkProcessAvgTime) assert.EqualValues(t, 10, st.SyncInfo.SnapshotHeight) assert.EqualValues(t, 9, status.SyncInfo.SnapshotChunksCount) - assert.EqualValues(t, 10, status.SyncInfo.SnapshotChunksTotal) assert.EqualValues(t, 9, status.SyncInfo.BackFilledBlocks) assert.EqualValues(t, 10, status.SyncInfo.BackFillBlocksTotal) } diff --git a/rpc/coretypes/responses.go b/rpc/coretypes/responses.go index 565394362d..04652b3c65 100644 --- a/rpc/coretypes/responses.go +++ b/rpc/coretypes/responses.go @@ -111,7 +111,6 @@ type SyncInfo struct { ChunkProcessAvgTime time.Duration `json:"chunk_process_avg_time,string"` SnapshotHeight int64 `json:"snapshot_height,string"` SnapshotChunksCount int64 `json:"snapshot_chunks_count,string"` - SnapshotChunksTotal int64 `json:"snapshot_chunks_total,string"` BackFilledBlocks int64 `json:"backfilled_blocks,string"` BackFillBlocksTotal int64 `json:"backfill_blocks_total,string"` } diff --git a/spec/abci++/api.md b/spec/abci++/api.md index 43a1d3e13e..eebd8feef0 100644 --- a/spec/abci++/api.md +++ b/spec/abci++/api.md @@ -267,7 +267,7 @@ Applies a snapshot chunk. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| index | [uint32](#uint32) | | The chunk index, starting from 0. Tenderdash applies chunks sequentially. | +| chunk_id | [bytes](#bytes) | | The chunk index, starting from 0. Tenderdash applies chunks sequentially. | | chunk | [bytes](#bytes) | | The binary chunk contents, as returned by LoadSnapshotChunk. | | sender | [string](#string) | | The P2P ID of the node who sent this chunk. | @@ -499,8 +499,8 @@ Used during state sync to retrieve snapshot chunks from peers. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | | height | [uint64](#uint64) | | The height of the snapshot the chunks belongs to. | -| format | [uint32](#uint32) | | The application-specific format of the snapshot the chunk belongs to. | -| chunk | [uint32](#uint32) | | The chunk index, starting from 0 for the initial chunk. | +| version | [uint32](#uint32) | | The application-specific format of the snapshot the chunk belongs to. | +| chunk_id | [bytes](#bytes) | | The chunk id is a hash of the node of subtree of the snapshot. | @@ -819,8 +819,9 @@ from this condition, but not sure), and _p_ receives a Precommit message for rou | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | | result | [ResponseApplySnapshotChunk.Result](#tendermint-abci-ResponseApplySnapshotChunk-Result) | | The result of applying this chunk. | -| refetch_chunks | [uint32](#uint32) | repeated | Refetch and reapply the given chunks, regardless of `result`. Only the listed chunks will be refetched, and reapplied in sequential order. | +| refetch_chunks | [bytes](#bytes) | repeated | Refetch and reapply the given chunks, regardless of `result`. Only the listed chunks will be refetched, and reapplied in sequential order. | | reject_senders | [string](#string) | repeated | Reject the given P2P senders, regardless of `Result`. Any chunks already applied will not be refetched unless explicitly requested, but queued chunks from these senders will be discarded, and new chunks or other snapshots rejected. | +| next_chunks | [bytes](#bytes) | repeated | Next chunks provides the list of chunks that should be requested next, if any. | @@ -1088,8 +1089,7 @@ nondeterministic | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | | height | [uint64](#uint64) | | The height at which the snapshot was taken | -| format | [uint32](#uint32) | | The application-specific snapshot format | -| chunks | [uint32](#uint32) | | Number of chunks in the snapshot | +| version | [uint32](#uint32) | | The application-specific snapshot version | | hash | [bytes](#bytes) | | Arbitrary snapshot hash, equal only if identical | | metadata | [bytes](#bytes) | | Arbitrary application metadata | @@ -1256,6 +1256,7 @@ Type of transaction check | RETRY | 3 | Retry chunk (combine with refetch and reject) | | RETRY_SNAPSHOT | 4 | Retry snapshot (combine with refetch and reject) | | REJECT_SNAPSHOT | 5 | Reject this snapshot, try others | +| COMPLETE_SNAPSHOT | 6 | Complete this snapshot, no more chunks |