Skip to content

Commit

Permalink
fix(mempool): remove TxRecvRatePunishPeer option
Browse files Browse the repository at this point in the history
  • Loading branch information
lklimek committed May 13, 2024
1 parent 29b4ef9 commit de541c9
Show file tree
Hide file tree
Showing 5 changed files with 3 additions and 18 deletions.
6 changes: 0 additions & 6 deletions config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -901,12 +901,6 @@ type MempoolConfig struct {
// Default: 0
TxRecvRateLimit float64 `mapstructure:"tx-recv-rate-limit"`

// TxRecvRatePunishPeer set to true means that when the rate limit set in TxRecvRateLimit is reached, the
// peer will be punished (disconnected). If set to false, the peer will be throttled (messages will be dropped).
//
// Default: false
TxRecvRatePunishPeer bool `mapstructure:"tx-recv-rate-punish-peer"`

// TxEnqueueTimeout defines how long new mempool transaction will wait when internal
// processing queue is full (most likely due to busy CheckTx execution).
// Once the timeout is reached, the transaction will be silently dropped.
Expand Down
8 changes: 2 additions & 6 deletions config/toml.go
Original file line number Diff line number Diff line change
Expand Up @@ -459,23 +459,19 @@ ttl-num-blocks = {{ .Mempool.TTLNumBlocks }}
# tx-send-rate-limit is the rate limit for sending transactions to peers, in transactions per second.
# This rate limit is shared across all peers.
# If zero, the rate limiter is disabled.
#
# Default: 0
tx-send-rate-limit = {{ .Mempool.TxSendRateLimit }}
# tx-recv-rate-limit is the rate limit for receiving transactions from peers, in transactions per second.
# This rate limit is shared across all peers.
# If zero, the rate limiter is disabled.
#
# Default: 0
tx-recv-rate-limit = {{ .Mempool.TxRecvRateLimit }}
# tx-recv-rate-punish-peer set to true means that when tx-recv-rate-limit is reached, the peer will be punished
# (disconnected). If set to false, the peer will be throttled (messages will be dropped).
#
# Default: false
tx-recv-rate-punish-peer = {{ .Mempool.TxRecvRatePunishPeer }}
# TxEnqueueTimeout defines how many nanoseconds new mempool transaction (received
# from other nodes) will wait when internal processing queue is full
# (most likely due to busy CheckTx execution).Once the timeout is reached, the transaction
Expand Down
1 change: 0 additions & 1 deletion internal/p2p/channel_params.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,6 @@ func ChannelDescriptors(cfg *config.Config) map[ChannelID]*ChannelDescriptor {
SendRateBurst: int(5 * cfg.Mempool.TxSendRateLimit),
RecvRateLimit: rate.Limit(cfg.Mempool.TxRecvRateLimit),
RecvRateBurst: int(10 * cfg.Mempool.TxRecvRateLimit), // twice as big as send, to avoid false punishment
RecvRateShouldErr: cfg.Mempool.TxRecvRatePunishPeer,
EnqueueTimeout: cfg.Mempool.TxEnqueueTimeout,
},
}
Expand Down
4 changes: 0 additions & 4 deletions internal/p2p/conn/connection.go
Original file line number Diff line number Diff line change
Expand Up @@ -618,10 +618,6 @@ type ChannelDescriptor struct {
/// RecvRateLimit is used to limit the rate of receiving messages, per second.
RecvRateLimit rate.Limit
RecvRateBurst int
// RecvRateShouldErr is used to determine if the rate limiter should
// report an error whenever recv rate limit is exceeded, most likely
// causing the peer to disconnect.
RecvRateShouldErr bool

// RecvBufferCapacity defines the max number of inbound messages for a
// given p2p Channel queue.
Expand Down
2 changes: 1 addition & 1 deletion internal/p2p/router.go
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ func (r *Router) OpenChannel(ctx context.Context, chDesc *ChannelDescriptor) (Ch
if chDesc.SendRateLimit > 0 || chDesc.RecvRateLimit > 0 {
channel = NewThrottledChannel(channel,
chDesc.SendRateLimit, chDesc.SendRateBurst,
chDesc.RecvRateLimit, chDesc.RecvRateBurst, chDesc.RecvRateShouldErr,
chDesc.RecvRateLimit, chDesc.RecvRateBurst, false,
r.logger)
}

Expand Down

0 comments on commit de541c9

Please sign in to comment.