Skip to content

Commit

Permalink
chore!: cherry pick revert dynamic commits (#2081)
Browse files Browse the repository at this point in the history
  • Loading branch information
evan-forbes authored Jul 12, 2023
2 parents 4bbcbcb + 7a176b2 commit ebfc416
Show file tree
Hide file tree
Showing 22 changed files with 100 additions and 77 deletions.
10 changes: 9 additions & 1 deletion app/prepare_proposal.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,15 @@ func (app *App) PrepareProposal(req abci.RequestPrepareProposal) abci.ResponsePr

// create the new data root by creating the data availability header (merkle
// roots of each row and col of the erasure data).
dah := da.NewDataAvailabilityHeader(eds)
dah, err := da.NewDataAvailabilityHeader(eds)
if err != nil {
app.Logger().Error(
"failure to create new data availability header",
"error",
err.Error(),
)
panic(err)
}

// tendermint doesn't need to use any of the erasure data, as only the
// protobuf encoded version of the block data is gossiped.
Expand Down
6 changes: 5 additions & 1 deletion app/process_proposal.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,11 @@ func (app *App) ProcessProposal(req abci.RequestProcessProposal) (resp abci.Resp
return reject()
}

dah := da.NewDataAvailabilityHeader(eds)
dah, err := da.NewDataAvailabilityHeader(eds)
if err != nil {
logInvalidPropBlockError(app.Logger(), req.Header, "failure to create new data availability header", err)
return reject()
}
// by comparing the hashes we know the computed IndexWrappers (with the share indexes of the PFB's blobs)
// are identical and that square layout is consistent. This also means that the share commitment rules
// have been followed and thus each blobs share commitment should be valid
Expand Down
3 changes: 2 additions & 1 deletion app/test/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,8 @@ func (s *IntegrationTestSuite) TestShareInclusionProof() {
func ExtendBlobTest(t *testing.T, block *coretypes.Block) {
eds, err := app.ExtendBlock(block.Data, block.Header.Version.App)
require.NoError(t, err)
dah := da.NewDataAvailabilityHeader(eds)
dah, err := da.NewDataAvailabilityHeader(eds)
require.NoError(t, err)
if !assert.Equal(t, dah.Hash(), block.DataHash.Bytes()) {
// save block to json file for further debugging if this occurs
b, err := json.MarshalIndent(block, "", " ")
Expand Down
6 changes: 4 additions & 2 deletions app/test/process_proposal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,8 @@ func TestProcessProposal(t *testing.T) {
eds, err := da.ExtendShares(shares.ToBytes(dataSquare))
require.NoError(t, err)

dah := da.NewDataAvailabilityHeader(eds)
dah, err := da.NewDataAvailabilityHeader(eds)
require.NoError(t, err)
// replace the hash of the prepare proposal response with the hash of a data
// square with a tampered sequence start indicator
d.Hash = dah.Hash()
Expand Down Expand Up @@ -323,6 +324,7 @@ func calculateNewDataHash(t *testing.T, txs [][]byte) []byte {
require.NoError(t, err)
eds, err := da.ExtendShares(shares.ToBytes(dataSquare))
require.NoError(t, err)
dah := da.NewDataAvailabilityHeader(eds)
dah, err := da.NewDataAvailabilityHeader(eds)
require.NoError(t, err)
return dah.Hash()
}
2 changes: 1 addition & 1 deletion app/test/qgb_rpc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ func TestQGBRPCQueries(t *testing.T) {
t.Skip("skipping QGB integration test in short mode.")
}
tmCfg := testnode.DefaultTendermintConfig()
tmCfg.Consensus.TargetHeightDuration = time.Millisecond
tmCfg.Consensus.TimeoutCommit = time.Millisecond

cctx, _, _ := testnode.NewNetwork(
t,
Expand Down
11 changes: 0 additions & 11 deletions cmd/celestia-appd/cmd/overrides.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,21 +2,10 @@ package cmd

import (
"github.com/celestiaorg/celestia-app/app"
"github.com/celestiaorg/celestia-app/pkg/appconsts"
"github.com/cosmos/cosmos-sdk/server"
"github.com/spf13/cobra"
)

// overrideServerConfig applies overrides to the embedded server context's
// configurations.
func overrideServerConfig(command *cobra.Command) error {
ctx := server.GetServerContextFromCmd(command)
ctx.Config.Consensus.TimeoutPropose = appconsts.TimeoutPropose
ctx.Config.Consensus.TargetHeightDuration = appconsts.TargetHeightDuration
ctx.Config.Consensus.SkipTimeoutCommit = false
return server.SetCmdServerContext(command, ctx)
}

// setDefaultConsensusParams sets the default consensus parameters for the
// embedded server context.
func setDefaultConsensusParams(command *cobra.Command) error {
Expand Down
9 changes: 2 additions & 7 deletions cmd/celestia-appd/cmd/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ func NewRootCmd() *cobra.Command {
tmCfg.Mempool.MaxTxBytes = 2 * 1024 * 1024 // 2 MiB
tmCfg.Mempool.Version = "v1" // prioritized mempool
tmCfg.Consensus.TimeoutPropose = appconsts.TimeoutPropose
tmCfg.Consensus.TargetHeightDuration = appconsts.TargetHeightDuration
tmCfg.Consensus.TimeoutCommit = appconsts.TimeoutCommit
tmCfg.Consensus.SkipTimeoutCommit = false

customAppTemplate, customAppConfig := initAppConfig()
Expand All @@ -109,12 +109,7 @@ func NewRootCmd() *cobra.Command {
return err
}

err = setDefaultConsensusParams(cmd)
if err != nil {
return err
}

return overrideServerConfig(cmd)
return setDefaultConsensusParams(cmd)
},
SilenceUsage: true,
}
Expand Down
6 changes: 3 additions & 3 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ require (
require (
cosmossdk.io/errors v1.0.0-beta.7
cosmossdk.io/math v1.0.0-rc.0
github.com/celestiaorg/rsmt2d v0.9.0
github.com/celestiaorg/rsmt2d v0.10.0
github.com/cosmos/cosmos-proto v1.0.0-alpha8
github.com/cosmos/cosmos-sdk v0.46.13
github.com/cosmos/gogoproto v1.4.10
Expand Down Expand Up @@ -198,8 +198,8 @@ require (
)

replace (
github.com/cosmos/cosmos-sdk => github.com/celestiaorg/cosmos-sdk v1.15.0-sdk-v0.46.13
github.com/cosmos/cosmos-sdk => github.com/celestiaorg/cosmos-sdk v1.16.0-sdk-v0.46.13
github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1
github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.23.0-tm-v0.34.28
github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.24.0-tm-v0.34.28
)
12 changes: 6 additions & 6 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -170,18 +170,18 @@ github.com/bufbuild/protocompile v0.1.0 h1:HjgJBI85hY/qmW5tw/66sNDZ7z0UDdVSi/5r4
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
github.com/celestiaorg/celestia-core v1.23.0-tm-v0.34.28 h1:G7/rq6xTnuFf3XsVZEcl/Sa6vtagm9NQNhaUaSgjvy0=
github.com/celestiaorg/celestia-core v1.23.0-tm-v0.34.28/go.mod h1:J/GsBjoTZaFz71VeyrLZbG8rV+Rzi6oFEUZUipQ97hQ=
github.com/celestiaorg/cosmos-sdk v1.15.0-sdk-v0.46.13 h1:vaQKgaOm0w58JAvOgn2iDohqjH7kvvRqVKiMcBDWifA=
github.com/celestiaorg/cosmos-sdk v1.15.0-sdk-v0.46.13/go.mod h1:G9XkhOJZde36FH0kt/1ayg4ZaioZEQmmRfMa/zQig0I=
github.com/celestiaorg/celestia-core v1.24.0-tm-v0.34.28 h1:eXS3v26nob8Xs2+flKHVxcTzhzQW44KgTcooR3OxnK4=
github.com/celestiaorg/celestia-core v1.24.0-tm-v0.34.28/go.mod h1:J/GsBjoTZaFz71VeyrLZbG8rV+Rzi6oFEUZUipQ97hQ=
github.com/celestiaorg/cosmos-sdk v1.16.0-sdk-v0.46.13 h1:N1PrCWcYkaODeIQyyVBmDKDTwiQWZ31bgtTEYIGeby8=
github.com/celestiaorg/cosmos-sdk v1.16.0-sdk-v0.46.13/go.mod h1:xpBZc/OYZ736hp0IZlBGNUhEgCD9C+bKs8yNLZibyv0=
github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 h1:CJdIpo8n5MFP2MwK0gSRcOVlDlFdQJO1p+FqdxYzmvc=
github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4/go.mod h1:fzuHnhzj1pUygGz+1ZkB3uQbEUL4htqCGJ4Qs2LwMZA=
github.com/celestiaorg/nmt v0.17.0 h1:/k8YLwJvuHgT/jQ435zXKaDX811+sYEMXL4B/vYdSLU=
github.com/celestiaorg/nmt v0.17.0/go.mod h1:ZndCeAR4l9lxm7W51ouoyTo1cxhtFgK+4DpEIkxRA3A=
github.com/celestiaorg/quantum-gravity-bridge v1.3.0 h1:9zPIp7w1FWfkPnn16y3S4FpFLnQtS7rm81CUVcHEts0=
github.com/celestiaorg/quantum-gravity-bridge v1.3.0/go.mod h1:6WOajINTDEUXpSj5UZzod16UZ96ZVB/rFNKyM+Mt1gI=
github.com/celestiaorg/rsmt2d v0.9.0 h1:kon78I748ZqjNzI8OAqPN+2EImuZuanj/6gTh8brX3o=
github.com/celestiaorg/rsmt2d v0.9.0/go.mod h1:E06nDxfoeBDltWRvTR9dLviiUZI5/6mLXAuhSJzz3Iw=
github.com/celestiaorg/rsmt2d v0.10.0 h1:8dprr6CW5mCk5YPnbiLdirojw9YsJOE+XB+GORb8sT0=
github.com/celestiaorg/rsmt2d v0.10.0/go.mod h1:BiCZkCJfhDHUEOJKXUeu+CudjluecKvRTqHcuxKvodc=
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
Expand Down
5 changes: 1 addition & 4 deletions pkg/appconsts/consensus_consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,5 @@ import "time"

const (
TimeoutPropose = time.Second * 10
// TargetHeightDuration is the intended block interval duration (i.e. the
// time between blocks). Note that this is a target because CometBFT does
// not guarantee a fixed block interval.
TargetHeightDuration = time.Second * 15
TimeoutCommit = time.Second * 11
)
28 changes: 20 additions & 8 deletions pkg/da/data_availability_header.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,18 +38,27 @@ type DataAvailabilityHeader struct {
hash []byte
}

// NewDataAvailabilityHeader generates a DataAvailability header using the provided square size and shares
func NewDataAvailabilityHeader(eds *rsmt2d.ExtendedDataSquare) DataAvailabilityHeader {
// generate the row and col roots using the EDS
// NewDataAvailabilityHeader generates a DataAvailability header using the
// provided extended data square.
func NewDataAvailabilityHeader(eds *rsmt2d.ExtendedDataSquare) (DataAvailabilityHeader, error) {
rowRoots, err := eds.RowRoots()
if err != nil {
return DataAvailabilityHeader{}, err
}
colRoots, err := eds.ColRoots()
if err != nil {
return DataAvailabilityHeader{}, err
}

dah := DataAvailabilityHeader{
RowRoots: eds.RowRoots(),
ColumnRoots: eds.ColRoots(),
RowRoots: rowRoots,
ColumnRoots: colRoots,
}

// generate the hash of the data using the new roots
// Generate the hash of the data using the new roots
dah.Hash()

return dah
return dah, nil
}

func ExtendShares(s [][]byte) (*rsmt2d.ExtendedDataSquare, error) {
Expand Down Expand Up @@ -167,7 +176,10 @@ func MinDataAvailabilityHeader() DataAvailabilityHeader {
if err != nil {
panic(err)
}
dah := NewDataAvailabilityHeader(eds)
dah, err := NewDataAvailabilityHeader(eds)
if err != nil {
panic(err)
}
return dah
}

Expand Down
15 changes: 9 additions & 6 deletions pkg/da/data_availability_header_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,11 @@ func TestNewDataAvailabilityHeader(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
eds, err := ExtendShares(tt.shares)
require.NoError(t, err)
resdah := NewDataAvailabilityHeader(eds)
require.Equal(t, tt.squareSize*2, uint64(len(resdah.ColumnRoots)), tt.name)
require.Equal(t, tt.squareSize*2, uint64(len(resdah.RowRoots)), tt.name)
require.Equal(t, tt.expectedHash, resdah.hash, tt.name)
got, err := NewDataAvailabilityHeader(eds)
require.NoError(t, err)
require.Equal(t, tt.squareSize*2, uint64(len(got.ColumnRoots)), tt.name)
require.Equal(t, tt.squareSize*2, uint64(len(got.RowRoots)), tt.name)
require.Equal(t, tt.expectedHash, got.hash, tt.name)
})
}
}
Expand Down Expand Up @@ -106,7 +107,8 @@ func TestDataAvailabilityHeaderProtoConversion(t *testing.T) {
shares := generateShares(appconsts.DefaultSquareSizeUpperBound * appconsts.DefaultSquareSizeUpperBound)
eds, err := ExtendShares(shares)
require.NoError(t, err)
bigdah := NewDataAvailabilityHeader(eds)
bigdah, err := NewDataAvailabilityHeader(eds)
require.NoError(t, err)

tests := []test{
{
Expand Down Expand Up @@ -143,7 +145,8 @@ func Test_DAHValidateBasic(t *testing.T) {
shares := generateShares(maxSize)
eds, err := ExtendShares(shares)
require.NoError(t, err)
bigdah := NewDataAvailabilityHeader(eds)
bigdah, err := NewDataAvailabilityHeader(eds)
require.NoError(t, err)

// make a mutant dah that has too many roots
var tooBigDah DataAvailabilityHeader
Expand Down
3 changes: 2 additions & 1 deletion pkg/inclusion/nmt_caching_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,8 @@ func TestEDSSubRootCacher(t *testing.T) {
eds, err := rsmt2d.ComputeExtendedDataSquare(d, appconsts.DefaultCodec(), stc.Constructor)
require.NoError(t, err)

dah := da.NewDataAvailabilityHeader(eds)
dah, err := da.NewDataAvailabilityHeader(eds)
require.NoError(t, err)

for i := range dah.RowRoots[:squareSize] {
expectedSubTreeRoots := calculateSubTreeRoots(t, eds.Row(uint(i))[:squareSize], 2)
Expand Down
12 changes: 10 additions & 2 deletions pkg/proof/proof.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,18 @@ func NewShareInclusionProof(
return types.ShareProof{}, err
}

edsRowRoots := eds.RowRoots()
edsRowRoots, err := eds.RowRoots()
if err != nil {
return types.ShareProof{}, err
}

edsColRoots, err := eds.ColRoots()
if err != nil {
return types.ShareProof{}, err
}

// create the binary merkle inclusion proof for all the square rows to the data root
_, allProofs := merkle.ProofsFromByteSlices(append(edsRowRoots, eds.ColRoots()...))
_, allProofs := merkle.ProofsFromByteSlices(append(edsRowRoots, edsColRoots...))
rowProofs := make([]*merkle.Proof, endRow-startRow+1)
rowRoots := make([]tmbytes.HexBytes, endRow-startRow+1)
for i := startRow; i <= endRow; i++ {
Expand Down
3 changes: 2 additions & 1 deletion pkg/proof/proof_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,8 @@ func TestNewShareInclusionProof(t *testing.T) {

// create the new data root by creating the data availability header (merkle
// roots of each row and col of the erasure data).
dah := da.NewDataAvailabilityHeader(eds)
dah, err := da.NewDataAvailabilityHeader(eds)
require.NoError(t, err)
dataRoot := dah.Hash()

type test struct {
Expand Down
3 changes: 2 additions & 1 deletion pkg/square/square_fuzz_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,8 @@ func FuzzSquare(f *testing.F) {
cacher := inclusion.NewSubtreeCacher(uint64(s.Size()))
eds, err := rsmt2d.ComputeExtendedDataSquare(shares.ToBytes(s), appconsts.DefaultCodec(), cacher.Constructor)
require.NoError(t, err)
dah := da.NewDataAvailabilityHeader(eds)
dah, err := da.NewDataAvailabilityHeader(eds)
require.NoError(t, err)

decoder := encoding.MakeConfig(app.ModuleEncodingRegisters...).TxConfig.TxDecoder()

Expand Down
3 changes: 2 additions & 1 deletion pkg/square/square_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,8 @@ func TestSquareShareCommitments(t *testing.T) {
cacher := inclusion.NewSubtreeCacher(uint64(dataSquare.Size()))
eds, err := rsmt2d.ComputeExtendedDataSquare(shares.ToBytes(dataSquare), appconsts.DefaultCodec(), cacher.Constructor)
require.NoError(t, err)
dah := da.NewDataAvailabilityHeader(eds)
dah, err := da.NewDataAvailabilityHeader(eds)
require.NoError(t, err)
decoder := encoding.MakeConfig(app.ModuleEncodingRegisters...).TxConfig.TxDecoder()

for pfbIndex := 0; pfbIndex < numTxs; pfbIndex++ {
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/pkg/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ func MakeConfig(node *Node) (*config.Config, error) {
// FIXME: This values get overridden by the timeout consts in the app package.
// We should modify this if we want to quicken the time of the blocks.
cfg.Consensus.TimeoutPropose = 1000 * time.Millisecond
cfg.Consensus.TargetHeightDuration = 300 * time.Millisecond
cfg.Consensus.TimeoutCommit = 100 * time.Millisecond
return cfg, nil
}

Expand Down
28 changes: 14 additions & 14 deletions test/util/network/network.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,20 +87,20 @@ func DefaultConfig() network.Config {
},
)
},
GenesisState: app.ModuleBasics.DefaultGenesis(encCfg.Codec),
TargetHeightDuration: 2 * time.Second,
ChainID: "chain-" + tmrand.Str(6),
NumValidators: 1,
BondDenom: app.BondDenom,
MinGasPrices: fmt.Sprintf("0.000006%s", app.BondDenom),
AccountTokens: sdk.TokensFromConsensusPower(1000, sdk.DefaultPowerReduction),
StakingTokens: sdk.TokensFromConsensusPower(500, sdk.DefaultPowerReduction),
BondedTokens: sdk.TokensFromConsensusPower(100, sdk.DefaultPowerReduction),
PruningStrategy: pruningtypes.PruningOptionNothing,
CleanupDir: true,
SigningAlgo: string(hd.Secp256k1Type),
KeyringOptions: []keyring.Option{},
PrintMnemonic: false,
GenesisState: app.ModuleBasics.DefaultGenesis(encCfg.Codec),
TimeoutCommit: 2 * time.Second,
ChainID: "chain-" + tmrand.Str(6),
NumValidators: 1,
BondDenom: app.BondDenom,
MinGasPrices: fmt.Sprintf("0.000006%s", app.BondDenom),
AccountTokens: sdk.TokensFromConsensusPower(1000, sdk.DefaultPowerReduction),
StakingTokens: sdk.TokensFromConsensusPower(500, sdk.DefaultPowerReduction),
BondedTokens: sdk.TokensFromConsensusPower(100, sdk.DefaultPowerReduction),
PruningStrategy: pruningtypes.PruningOptionNothing,
CleanupDir: true,
SigningAlgo: string(hd.Secp256k1Type),
KeyringOptions: []keyring.Option{},
PrintMnemonic: false,
}
}

Expand Down
4 changes: 2 additions & 2 deletions test/util/testnode/full_node.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ func DefaultTendermintConfig() *config.Config {
tmCfg := config.DefaultConfig()
// Reduce the target height duration so that blocks are produced faster
// during tests.
tmCfg.Consensus.TargetHeightDuration = 300 * time.Millisecond
tmCfg.Consensus.TimeoutCommit = 100 * time.Millisecond
tmCfg.Consensus.TimeoutPropose = 200 * time.Millisecond

// set the mempool's MaxTxBytes to allow the testnode to accept a
Expand Down Expand Up @@ -266,7 +266,7 @@ func DefaultNetwork(t *testing.T) (accounts []string, cctx Context) {
}

tmCfg := DefaultTendermintConfig()
tmCfg.Consensus.TargetHeightDuration = time.Millisecond * 1
tmCfg.Consensus.TimeoutCommit = time.Millisecond * 1
appConf := DefaultAppConfig()

cctx, _, _ = NewNetwork(t, DefaultParams(), tmCfg, appConf, accounts)
Expand Down
2 changes: 1 addition & 1 deletion x/qgb/client/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ func (s *CLITestSuite) SetupSuite() {
cfg.EnableTMLogging = false
cfg.MinGasPrices = "0utia"
cfg.NumValidators = 1
cfg.TargetHeightDuration = time.Millisecond
cfg.TimeoutCommit = time.Millisecond
s.cfg = cfg

numAccounts := 120
Expand Down
4 changes: 2 additions & 2 deletions x/upgrade/test/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,9 @@ func (s *UpgradeTestSuite) SetupSuite() {
for i := 0; i < len(accounts); i++ {
accounts[i] = tmrand.Str(9)
}
blockTime := time.Millisecond * 300
blockTime := 3 * time.Second
tmCfg := config.DefaultConfig()
tmCfg.Consensus.TargetHeightDuration = blockTime
tmCfg.Consensus.TimeoutCommit = blockTime
tmCfg.RPC.ListenAddress = fmt.Sprintf("tcp://127.0.0.1:%d", getFreePort())
tmCfg.P2P.ListenAddress = fmt.Sprintf("tcp://127.0.0.1:%d", getFreePort())
tmCfg.RPC.GRPCListenAddress = fmt.Sprintf("tcp://127.0.0.1:%d", getFreePort())
Expand Down

0 comments on commit ebfc416

Please sign in to comment.