From dc976f8b113dbae3bdb51133215147e862eecc2a Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 21 Aug 2024 12:16:24 -0500 Subject: [PATCH 1/2] Re-enable G115 lint --- .golangci.yml | 1 - arbcompress/compress_common.go | 4 +++- arbnode/batch_poster.go | 20 +++++++++++++------- arbnode/dataposter/data_poster.go | 7 +++++++ arbnode/dataposter/dbstorage/storage.go | 2 +- arbnode/dataposter/slice/slicestorage.go | 4 ++-- arbnode/dataposter/storage/time.go | 2 ++ arbnode/dataposter/storage_test.go | 1 + arbnode/inbox_reader.go | 4 ++-- arbnode/inbox_tracker.go | 3 +++ arbnode/node.go | 2 ++ arbnode/transaction_streamer.go | 1 + arbos/addressSet/addressSet_test.go | 1 + arbos/addressTable/addressTable.go | 1 + arbos/arbosState/initialization_test.go | 1 + arbos/l1pricing/l1pricing.go | 4 ++-- arbos/l1pricing_test.go | 6 +++--- arbos/l2pricing/l2pricing_test.go | 3 +++ arbos/l2pricing/model.go | 12 ++++++++---- arbos/storage/storage.go | 8 +++----- arbstate/inbox.go | 6 +++--- arbutil/block_message_relation.go | 1 + arbutil/correspondingl1blocknumber.go | 1 + blocks_reexecutor/blocks_reexecutor.go | 3 ++- broadcaster/backlog/backlog.go | 9 +++++++-- broadcaster/backlog/backlog_test.go | 4 ++-- broadcaster/broadcaster.go | 1 + cmd/nitro/nitro.go | 3 ++- cmd/staterecovery/staterecovery.go | 1 + das/aggregator.go | 1 + das/dasRpcClient.go | 1 + das/dasRpcServer.go | 2 ++ das/dastree/dastree.go | 15 +++++++++------ das/db_storage_service.go | 4 +++- das/local_file_storage_service.go | 6 ++++++ das/local_file_storage_service_test.go | 1 + das/s3_storage_service.go | 4 +++- das/sign_after_store_das_writer.go | 1 + das/simple_das_reader_aggregator.go | 12 ++++++------ das/util.go | 2 ++ execution/gethexec/api.go | 1 + execution/gethexec/sequencer.go | 5 +++++ execution/gethexec/tx_pre_checker.go | 1 + execution/nodeInterface/NodeInterface.go | 7 +++++++ precompiles/ArbAddressTable.go | 8 ++++---- precompiles/ArbRetryableTx.go | 7 ++++--- precompiles/ArbSys.go | 4 ++-- relay/relay_stress_test.go | 2 +- staker/block_challenge_backend.go | 2 +- staker/block_validator.go | 7 +++++++ staker/challenge-cache/cache.go | 4 ++-- staker/challenge_manager.go | 2 +- staker/challenge_test.go | 2 +- staker/l1_validator.go | 2 ++ staker/rollup_watcher.go | 2 +- staker/staker.go | 4 ++++ system_tests/block_validator_test.go | 1 + system_tests/forwarder_test.go | 1 + system_tests/initialization_test.go | 1 + system_tests/outbox_test.go | 1 + system_tests/program_recursive_test.go | 1 + system_tests/program_test.go | 5 +++++ system_tests/recreatestate_rpc_test.go | 7 +++++++ system_tests/seq_nonce_test.go | 1 + system_tests/seqinbox_test.go | 6 +++++- system_tests/snap_sync_test.go | 2 ++ system_tests/twonodeslong_test.go | 1 + system_tests/unsupported_txtypes_test.go | 4 ++-- util/arbmath/bips.go | 4 ++-- util/arbmath/math.go | 14 ++++++++++++++ util/arbmath/math_test.go | 3 +++ util/arbmath/uint24.go | 8 +++++--- util/headerreader/header_reader.go | 1 + util/merkletree/merkleTree.go | 4 ++-- util/rpcclient/rpcclient.go | 16 +++++++++------- util/sharedmetrics/sharedmetrics.go | 2 ++ util/testhelpers/testhelpers.go | 1 + validator/client/validation_client.go | 1 + validator/server_arb/execution_run_test.go | 2 +- validator/server_arb/machine_cache.go | 1 + validator/server_arb/validator_spawner.go | 3 +++ validator/server_jit/jit_machine.go | 15 ++++++++++++--- wavmio/stub.go | 6 +++--- wsbroadcastserver/clientconnection.go | 1 + 84 files changed, 247 insertions(+), 91 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index e788eca6c0..0594670137 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -44,7 +44,6 @@ linters-settings: gosec: excludes: - G404 # checks that random numbers are securely generated - - G115 # Potential integer overflow when converting between integer types govet: enable-all: true diff --git a/arbcompress/compress_common.go b/arbcompress/compress_common.go index a61dd9a171..997232e7cc 100644 --- a/arbcompress/compress_common.go +++ b/arbcompress/compress_common.go @@ -17,6 +17,8 @@ func compressedBufferSizeFor(length int) int { return length + (length>>10)*8 + 64 // actual limit is: length + (length >> 14) * 4 + 6 } -func CompressLevel(input []byte, level int) ([]byte, error) { +func CompressLevel(input []byte, level uint64) ([]byte, error) { + // level is trusted and shouldn't be anything crazy + // #nosec G115 return Compress(input, uint32(level), EmptyDictionary) } diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 71239efdbb..d0764cc783 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -121,7 +121,7 @@ type BatchPoster struct { nextRevertCheckBlock int64 // the last parent block scanned for reverting batches postedFirstBatch bool // indicates if batch poster has posted the first batch - accessList func(SequencerInboxAccs, AfterDelayedMessagesRead int) types.AccessList + accessList func(SequencerInboxAccs, AfterDelayedMessagesRead uint64) types.AccessList } type l1BlockBound int @@ -374,7 +374,7 @@ func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, e } // Dataposter sender may be external signer address, so we should initialize // access list after initializing dataposter. - b.accessList = func(SequencerInboxAccs, AfterDelayedMessagesRead int) types.AccessList { + b.accessList = func(SequencerInboxAccs, AfterDelayedMessagesRead uint64) types.AccessList { if !b.config().UseAccessLists || opts.L1Reader.IsParentChainArbitrum() { // Access lists cost gas instead of saving gas when posting to L2s, // because data is expensive in comparison to computation. @@ -433,8 +433,8 @@ type AccessListOpts struct { BridgeAddr common.Address DataPosterAddr common.Address GasRefunderAddr common.Address - SequencerInboxAccs int - AfterDelayedMessagesRead int + SequencerInboxAccs uint64 + AfterDelayedMessagesRead uint64 } // AccessList returns access list (contracts, storage slots) for batchposter. @@ -476,12 +476,12 @@ func AccessList(opts *AccessListOpts) types.AccessList { }, } - for _, v := range []struct{ slotIdx, val int }{ + for _, v := range []struct{ slotIdx, val uint64 }{ {7, opts.SequencerInboxAccs - 1}, // - sequencerInboxAccs[sequencerInboxAccs.length - 1]; (keccak256(7, sequencerInboxAccs.length - 1)) {7, opts.SequencerInboxAccs}, // - sequencerInboxAccs.push(...); (keccak256(7, sequencerInboxAccs.length)) {6, opts.AfterDelayedMessagesRead - 1}, // - delayedInboxAccs[afterDelayedMessagesRead - 1]; (keccak256(6, afterDelayedMessagesRead - 1)) } { - sb := arbutil.SumBytes(arbutil.PaddedKeccak256([]byte{byte(v.slotIdx)}), big.NewInt(int64(v.val)).Bytes()) + sb := arbutil.SumBytes(arbutil.PaddedKeccak256([]byte{byte(v.slotIdx)}), new(big.Int).SetUint64(v.val).Bytes()) l[1].StorageKeys = append(l[1].StorageKeys, common.Hash(sb)) } @@ -603,9 +603,12 @@ func (b *BatchPoster) pollForL1PriceData(ctx context.Context) { l1GasPrice = blobFeePerByte.Uint64() / 16 } } + // #nosec G115 blobGasUsedGauge.Update(int64(*h.BlobGasUsed)) } + // #nosec G115 blockGasUsedGauge.Update(int64(h.GasUsed)) + // #nosec G115 blockGasLimitGauge.Update(int64(h.GasLimit)) suggestedTipCap, err := b.l1Reader.Client().SuggestGasTipCap(ctx) if err != nil { @@ -613,6 +616,7 @@ func (b *BatchPoster) pollForL1PriceData(ctx context.Context) { } else { suggestedTipCapGauge.Update(suggestedTipCap.Int64()) } + // #nosec G115 l1GasPriceGauge.Update(int64(l1GasPrice)) case <-ctx.Done(): return @@ -1176,6 +1180,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if err != nil { return false, err } + // #nosec G115 firstMsgTime := time.Unix(int64(firstMsg.Message.Header.Timestamp), 0) lastPotentialMsg, err := b.streamer.GetMessage(msgCount - 1) @@ -1403,7 +1408,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if len(kzgBlobs)*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock { return false, fmt.Errorf("produced %v blobs for batch but a block can only hold %v (compressed batch was %v bytes long)", len(kzgBlobs), params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob, len(sequencerMsg)) } - accessList := b.accessList(int(batchPosition.NextSeqNum), int(b.building.segments.delayedMsg)) + accessList := b.accessList(batchPosition.NextSeqNum, b.building.segments.delayedMsg) // On restart, we may be trying to estimate gas for a batch whose successor has // already made it into pending state, if not latest state. // In that case, we might get a revert with `DelayedBackwards()`. @@ -1505,6 +1510,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) messagesPerBatch = 1 } backlog := uint64(unpostedMessages) / messagesPerBatch + // #nosec G115 batchPosterEstimatedBatchBacklogGauge.Update(int64(backlog)) if backlog > 10 { logLevel := log.Warn diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 5630a52947..6a483929b2 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -359,6 +359,7 @@ func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64, thi if err != nil { return fmt.Errorf("getting nonce of a dataposter sender: %w", err) } + // #nosec G115 latestUnconfirmedNonceGauge.Update(int64(unconfirmedNonce)) if nextNonce >= cfg.MaxMempoolTransactions+unconfirmedNonce { return fmt.Errorf("%w: transaction nonce: %d, unconfirmed nonce: %d, max mempool size: %d", ErrExceedsMaxMempoolSize, nextNonce, unconfirmedNonce, cfg.MaxMempoolTransactions) @@ -371,6 +372,7 @@ func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64, thi if err != nil { return fmt.Errorf("getting nonce of a dataposter sender: %w", err) } + // #nosec G115 latestUnconfirmedNonceGauge.Update(int64(unconfirmedNonce)) if unconfirmedNonce > nextNonce { return fmt.Errorf("latest on-chain nonce %v is greater than to next nonce %v", unconfirmedNonce, nextNonce) @@ -525,6 +527,7 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u if err != nil { return nil, nil, nil, fmt.Errorf("failed to get latest nonce %v blocks ago (block %v): %w", config.NonceRbfSoftConfs, softConfBlock, err) } + // #nosec G115 latestSoftConfirmedNonceGauge.Update(int64(softConfNonce)) suggestedTip, err := p.client.SuggestGasTipCap(ctx) @@ -1052,6 +1055,7 @@ func (p *DataPoster) updateNonce(ctx context.Context) error { } return nil } + // #nosec G115 latestFinalizedNonceGauge.Update(int64(nonce)) log.Info("Data poster transactions confirmed", "previousNonce", p.nonce, "newNonce", nonce, "previousL1Block", p.lastBlock, "newL1Block", header.Number) if len(p.errorCount) > 0 { @@ -1132,6 +1136,7 @@ func (p *DataPoster) Start(ctxIn context.Context) { log.Warn("Failed to get latest nonce", "err", err) return minWait } + // #nosec G115 latestUnconfirmedNonceGauge.Update(int64(unconfirmedNonce)) // We use unconfirmedNonce here to replace-by-fee transactions that aren't in a block, // excluding those that are in an unconfirmed block. If a reorg occurs, we'll continue @@ -1154,7 +1159,9 @@ func (p *DataPoster) Start(ctxIn context.Context) { confirmedNonce := unconfirmedNonce - 1 confirmedMeta, err := p.queue.Get(ctx, confirmedNonce) if err == nil && confirmedMeta != nil { + // #nosec G115 totalQueueWeightGauge.Update(int64(arbmath.SaturatingUSub(latestCumulativeWeight, confirmedMeta.CumulativeWeight()))) + // #nosec G115 totalQueueLengthGauge.Update(int64(arbmath.SaturatingUSub(latestNonce, confirmedNonce))) } else { log.Error("Failed to fetch latest confirmed tx from queue", "confirmedNonce", confirmedNonce, "err", err, "confirmedMeta", confirmedMeta) diff --git a/arbnode/dataposter/dbstorage/storage.go b/arbnode/dataposter/dbstorage/storage.go index 97055193a6..37ebfa5099 100644 --- a/arbnode/dataposter/dbstorage/storage.go +++ b/arbnode/dataposter/dbstorage/storage.go @@ -42,7 +42,7 @@ func (s *Storage) FetchContents(_ context.Context, startingIndex uint64, maxResu var res []*storage.QueuedTransaction it := s.db.NewIterator([]byte(""), idxToKey(startingIndex)) defer it.Release() - for i := 0; i < int(maxResults); i++ { + for i := uint64(0); i < maxResults; i++ { if !it.Next() { break } diff --git a/arbnode/dataposter/slice/slicestorage.go b/arbnode/dataposter/slice/slicestorage.go index 69de7564a3..8685ed6f54 100644 --- a/arbnode/dataposter/slice/slicestorage.go +++ b/arbnode/dataposter/slice/slicestorage.go @@ -89,8 +89,8 @@ func (s *Storage) Put(_ context.Context, index uint64, prev, new *storage.Queued } s.queue = append(s.queue, newEnc) } else if index >= s.firstNonce { - queueIdx := int(index - s.firstNonce) - if queueIdx > len(s.queue) { + queueIdx := index - s.firstNonce + if queueIdx > uint64(len(s.queue)) { return fmt.Errorf("attempted to set out-of-bounds index %v in queue starting at %v of length %v", index, s.firstNonce, len(s.queue)) } prevEnc, err := s.encDec().Encode(prev) diff --git a/arbnode/dataposter/storage/time.go b/arbnode/dataposter/storage/time.go index aa15f29170..82f8a3dbf5 100644 --- a/arbnode/dataposter/storage/time.go +++ b/arbnode/dataposter/storage/time.go @@ -34,11 +34,13 @@ func (b *RlpTime) DecodeRLP(s *rlp.Stream) error { if err != nil { return err } + // #nosec G115 *b = RlpTime(time.Unix(int64(enc.Seconds), int64(enc.Nanos))) return nil } func (b RlpTime) EncodeRLP(w io.Writer) error { + // #nosec G115 return rlp.Encode(w, rlpTimeEncoding{ Seconds: uint64(time.Time(b).Unix()), Nanos: uint64(time.Time(b).Nanosecond()), diff --git a/arbnode/dataposter/storage_test.go b/arbnode/dataposter/storage_test.go index e2aa321e0d..8934d92b45 100644 --- a/arbnode/dataposter/storage_test.go +++ b/arbnode/dataposter/storage_test.go @@ -362,6 +362,7 @@ func TestLength(t *testing.T) { if err != nil { t.Fatalf("Length() unexpected error: %v", err) } + // #nosec G115 if want := arbmath.MaxInt(0, 20-int(tc.pruneFrom)); got != want { t.Errorf("Length() = %d want %d", got, want) } diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index 77a0b6e7a2..fd050b5f67 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -437,8 +437,8 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { } delayedMessages, err := r.delayedBridge.LookupMessagesInRange(ctx, from, to, func(batchNum uint64) ([]byte, error) { if len(sequencerBatches) > 0 && batchNum >= sequencerBatches[0].SequenceNumber { - idx := int(batchNum - sequencerBatches[0].SequenceNumber) - if idx < len(sequencerBatches) { + idx := batchNum - sequencerBatches[0].SequenceNumber + if idx < uint64(len(sequencerBatches)) { return sequencerBatches[idx].Serialize(ctx, r.l1Reader.Client()) } log.Warn("missing mentioned batch in L1 message lookup", "batch", batchNum) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index 23b81bde62..fe4149c80e 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -804,6 +804,7 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L if len(messages) > 0 { latestTimestamp = messages[len(messages)-1].Message.Header.Timestamp } + // #nosec G115 log.Info( "InboxTracker", "sequencerBatchCount", pos, @@ -811,7 +812,9 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L "l1Block", latestL1Block, "l1Timestamp", time.Unix(int64(latestTimestamp), 0), ) + // #nosec G115 inboxLatestBatchGauge.Update(int64(pos)) + // #nosec G115 inboxLatestBatchMessageGauge.Update(int64(newMessageCount)) if t.validator != nil { diff --git a/arbnode/node.go b/arbnode/node.go index c66598618f..93b58e800f 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -515,6 +515,7 @@ func createNodeImpl( if err != nil { return nil, err } + // #nosec G115 sequencerInbox, err := NewSequencerInbox(l1client, deployInfo.SequencerInbox, int64(deployInfo.DeployedAt)) if err != nil { return nil, err @@ -639,6 +640,7 @@ func createNodeImpl( tmpAddress := common.HexToAddress(config.Staker.ContractWalletAddress) existingWalletAddress = &tmpAddress } + // #nosec G115 wallet, err = validatorwallet.NewContract(dp, existingWalletAddress, deployInfo.ValidatorWalletCreator, deployInfo.Rollup, l1Reader, txOptsValidator, int64(deployInfo.DeployedAt), func(common.Address) {}, getExtraGas) if err != nil { return nil, err diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 90e7feddc6..a5bab8342f 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -840,6 +840,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil // Active broadcast reorg and L1 messages at or before start of broadcast messages // Or no active broadcast reorg and broadcast messages start before or immediately after last L1 message if messagesAfterPos >= broadcastStartPos { + // #nosec G115 broadcastSliceIndex := int(messagesAfterPos - broadcastStartPos) messagesOldLen := len(messages) if broadcastSliceIndex < len(s.broadcasterQueuedMessages) { diff --git a/arbos/addressSet/addressSet_test.go b/arbos/addressSet/addressSet_test.go index 7d06c74f0b..d32e07a546 100644 --- a/arbos/addressSet/addressSet_test.go +++ b/arbos/addressSet/addressSet_test.go @@ -316,6 +316,7 @@ func checkIfRectifyMappingWorks(t *testing.T, aset *AddressSet, owners []common. Fail(t, "RectifyMapping did not fix the mismatch") } + // #nosec G115 if clearList && int(size(t, aset)) != index+1 { Fail(t, "RectifyMapping did not fix the mismatch") } diff --git a/arbos/addressTable/addressTable.go b/arbos/addressTable/addressTable.go index 3fbb7b3782..566c71b689 100644 --- a/arbos/addressTable/addressTable.go +++ b/arbos/addressTable/addressTable.go @@ -118,6 +118,7 @@ func (atab *AddressTable) Decompress(buf []byte) (common.Address, uint64, error) if !exists { return common.Address{}, 0, errors.New("invalid index in compressed address") } + // #nosec G115 numBytesRead := uint64(rd.Size() - int64(rd.Len())) return addr, numBytesRead, nil } diff --git a/arbos/arbosState/initialization_test.go b/arbos/arbosState/initialization_test.go index 34802392fe..b0fe1d0dac 100644 --- a/arbos/arbosState/initialization_test.go +++ b/arbos/arbosState/initialization_test.go @@ -109,6 +109,7 @@ func pseudorandomAccountInitInfoForTesting(prand *testhelpers.PseudoRandomDataSo } func pseudorandomHashHashMapForTesting(prand *testhelpers.PseudoRandomDataSource, maxItems uint64) map[common.Hash]common.Hash { + // #nosec G115 size := int(prand.GetUint64() % maxItems) ret := make(map[common.Hash]common.Hash) for i := 0; i < size; i++ { diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index 9e00eeb581..392bf36d37 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -509,7 +509,7 @@ func (ps *L1PricingState) getPosterUnitsWithoutCache(tx *types.Transaction, post return 0 } - l1Bytes, err := byteCountAfterBrotliLevel(txBytes, int(brotliCompressionLevel)) + l1Bytes, err := byteCountAfterBrotliLevel(txBytes, brotliCompressionLevel) if err != nil { panic(fmt.Sprintf("failed to compress tx: %v", err)) } @@ -594,7 +594,7 @@ func (ps *L1PricingState) PosterDataCost(message *core.Message, poster common.Ad return am.BigMulByUint(pricePerUnit, units), units } -func byteCountAfterBrotliLevel(input []byte, level int) (uint64, error) { +func byteCountAfterBrotliLevel(input []byte, level uint64) (uint64, error) { compressed, err := arbcompress.CompressLevel(input, level) if err != nil { return 0, err diff --git a/arbos/l1pricing_test.go b/arbos/l1pricing_test.go index 6e2b1b7eec..1cda4b3d82 100644 --- a/arbos/l1pricing_test.go +++ b/arbos/l1pricing_test.go @@ -100,7 +100,7 @@ func expectedResultsForL1Test(input *l1PricingTest) *l1TestExpectedResults { availableFunds = availableFundsCap } } - fundsWantedForRewards := big.NewInt(int64(input.unitReward * input.unitsPerSecond)) + fundsWantedForRewards := new(big.Int).SetUint64(input.unitReward * input.unitsPerSecond) unitsAllocated := arbmath.UintToBig(input.unitsPerSecond) if arbmath.BigLessThan(availableFunds, fundsWantedForRewards) { ret.rewardRecipientBalance = availableFunds @@ -111,7 +111,7 @@ func expectedResultsForL1Test(input *l1PricingTest) *l1TestExpectedResults { uncappedAvailableFunds = arbmath.BigSub(uncappedAvailableFunds, ret.rewardRecipientBalance) ret.unitsRemaining = (3 * input.unitsPerSecond) - unitsAllocated.Uint64() - maxCollectable := big.NewInt(int64(input.fundsSpent)) + maxCollectable := new(big.Int).SetUint64(input.fundsSpent) if arbmath.BigLessThan(availableFunds, maxCollectable) { maxCollectable = availableFunds } @@ -170,7 +170,7 @@ func _testL1PricingFundsDue(t *testing.T, testParams *l1PricingTest, expectedRes Require(t, err) // create some fake collection - balanceAdded := big.NewInt(int64(testParams.fundsCollectedPerSecond * 3)) + balanceAdded := new(big.Int).SetUint64(testParams.fundsCollectedPerSecond * 3) unitsAdded := testParams.unitsPerSecond * 3 evm.StateDB.AddBalance(l1pricing.L1PricerFundsPoolAddress, uint256.MustFromBig(balanceAdded)) err = l1p.SetL1FeesAvailable(balanceAdded) diff --git a/arbos/l2pricing/l2pricing_test.go b/arbos/l2pricing/l2pricing_test.go index 57759d7f82..aa1e785f70 100644 --- a/arbos/l2pricing/l2pricing_test.go +++ b/arbos/l2pricing/l2pricing_test.go @@ -40,6 +40,7 @@ func TestPricingModelExp(t *testing.T) { // show that running at the speed limit with a full pool is a steady-state colors.PrintBlue("full pool & speed limit") for seconds := 0; seconds < 4; seconds++ { + // #nosec G115 fakeBlockUpdate(t, pricing, int64(seconds)*int64(limit), uint64(seconds)) if getPrice(t, pricing) != minPrice { Fail(t, "price changed when it shouldn't have") @@ -50,6 +51,7 @@ func TestPricingModelExp(t *testing.T) { // note that for large enough spans of time the price will rise a miniscule amount due to the pool's avg colors.PrintBlue("pool target & speed limit") for seconds := 0; seconds < 4; seconds++ { + // #nosec G115 fakeBlockUpdate(t, pricing, int64(seconds)*int64(limit), uint64(seconds)) if getPrice(t, pricing) != minPrice { Fail(t, "price changed when it shouldn't have") @@ -59,6 +61,7 @@ func TestPricingModelExp(t *testing.T) { // show that running over the speed limit escalates the price before the pool drains colors.PrintBlue("exceeding the speed limit") for { + // #nosec G115 fakeBlockUpdate(t, pricing, 8*int64(limit), 1) newPrice := getPrice(t, pricing) if newPrice < price { diff --git a/arbos/l2pricing/model.go b/arbos/l2pricing/model.go index 131af2c2cf..476effa8aa 100644 --- a/arbos/l2pricing/model.go +++ b/arbos/l2pricing/model.go @@ -30,22 +30,26 @@ func (ps *L2PricingState) AddToGasPool(gas int64) error { return err } // pay off some of the backlog with the added gas, stopping at 0 - backlog = arbmath.SaturatingUCast[uint64](arbmath.SaturatingSub(int64(backlog), gas)) + if gas > 0 { + backlog = arbmath.SaturatingUSub(backlog, uint64(gas)) + } else { + backlog = arbmath.SaturatingUAdd(backlog, uint64(-gas)) + } return ps.SetGasBacklog(backlog) } // UpdatePricingModel updates the pricing model with info from the last block func (ps *L2PricingState) UpdatePricingModel(l2BaseFee *big.Int, timePassed uint64, debug bool) { speedLimit, _ := ps.SpeedLimitPerSecond() - _ = ps.AddToGasPool(int64(timePassed * speedLimit)) + _ = ps.AddToGasPool(arbmath.SaturatingCast[int64](arbmath.SaturatingUMul(timePassed, speedLimit))) inertia, _ := ps.PricingInertia() tolerance, _ := ps.BacklogTolerance() backlog, _ := ps.GasBacklog() minBaseFee, _ := ps.MinBaseFeeWei() baseFee := minBaseFee if backlog > tolerance*speedLimit { - excess := int64(backlog - tolerance*speedLimit) - exponentBips := arbmath.NaturalToBips(excess) / arbmath.Bips(inertia*speedLimit) + excess := arbmath.SaturatingCast[int64](backlog - tolerance*speedLimit) + exponentBips := arbmath.NaturalToBips(excess) / arbmath.SaturatingCast[arbmath.Bips](inertia*speedLimit) baseFee = arbmath.BigMulByBips(minBaseFee, arbmath.ApproxExpBasisPoints(exponentBips, 4)) } _ = ps.SetBaseFeeWei(baseFee) diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 6e6c976644..352726778d 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -156,11 +156,6 @@ func (s *Storage) GetUint64ByUint64(key uint64) (uint64, error) { return s.GetUint64(util.UintToHash(key)) } -func (s *Storage) GetUint32(key common.Hash) (uint32, error) { - value, err := s.Get(key) - return uint32(value.Big().Uint64()), err -} - func (s *Storage) Set(key common.Hash, value common.Hash) error { if s.burner.ReadOnly() { log.Error("Read-only burner attempted to mutate state", "key", key, "value", value) @@ -420,6 +415,7 @@ func (sbu *StorageBackedInt64) Get() (int64, error) { if !raw.Big().IsUint64() { panic("invalid value found in StorageBackedInt64 storage") } + // #nosec G115 return int64(raw.Big().Uint64()), err // see implementation note above } @@ -477,6 +473,7 @@ func (sbu *StorageBackedUint16) Get() (uint16, error) { if !big.IsUint64() || big.Uint64() > math.MaxUint16 { panic("expected uint16 compatible value in storage") } + // #nosec G115 return uint16(big.Uint64()), err } @@ -517,6 +514,7 @@ func (sbu *StorageBackedUint32) Get() (uint32, error) { if !big.IsUint64() || big.Uint64() > math.MaxUint32 { panic("expected uint32 compatible value in storage") } + // #nosec G115 return uint32(big.Uint64()), err } diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 753ca19cd6..b58a7420b7 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -246,7 +246,7 @@ func (r *inboxMultiplexer) IsCachedSegementLast() bool { if r.delayedMessagesRead < seqMsg.afterDelayedMessages { return false } - for segmentNum := int(r.cachedSegmentNum) + 1; segmentNum < len(seqMsg.segments); segmentNum++ { + for segmentNum := r.cachedSegmentNum + 1; segmentNum < uint64(len(seqMsg.segments)); segmentNum++ { segment := seqMsg.segments[segmentNum] if len(segment) == 0 { continue @@ -276,7 +276,7 @@ func (r *inboxMultiplexer) getNextMsg() (*arbostypes.MessageWithMetadata, error) if segmentNum >= uint64(len(seqMsg.segments)) { break } - segment = seqMsg.segments[int(segmentNum)] + segment = seqMsg.segments[segmentNum] if len(segment) == 0 { segmentNum++ continue @@ -322,7 +322,7 @@ func (r *inboxMultiplexer) getNextMsg() (*arbostypes.MessageWithMetadata, error) log.Warn("reading virtual delayed message segment", "delayedMessagesRead", r.delayedMessagesRead, "afterDelayedMessages", seqMsg.afterDelayedMessages) segment = []byte{BatchSegmentKindDelayedMessages} } else { - segment = seqMsg.segments[int(segmentNum)] + segment = seqMsg.segments[segmentNum] } if len(segment) == 0 { log.Error("empty sequencer message segment", "sequence", r.cachedSegmentNum, "segmentNum", segmentNum) diff --git a/arbutil/block_message_relation.go b/arbutil/block_message_relation.go index a69f9079ee..e164cf2619 100644 --- a/arbutil/block_message_relation.go +++ b/arbutil/block_message_relation.go @@ -15,5 +15,6 @@ func SignedBlockNumberToMessageCount(blockNumber int64, genesisBlockNumber uint6 } func MessageCountToBlockNumber(messageCount MessageIndex, genesisBlockNumber uint64) int64 { + // #nosec G115 return int64(uint64(messageCount)+genesisBlockNumber) - 1 } diff --git a/arbutil/correspondingl1blocknumber.go b/arbutil/correspondingl1blocknumber.go index 05323ed183..d654e471e2 100644 --- a/arbutil/correspondingl1blocknumber.go +++ b/arbutil/correspondingl1blocknumber.go @@ -20,6 +20,7 @@ func ParentHeaderToL1BlockNumber(header *types.Header) uint64 { } func CorrespondingL1BlockNumber(ctx context.Context, client L1Interface, parentBlockNumber uint64) (uint64, error) { + // #nosec G115 header, err := client.HeaderByNumber(ctx, big.NewInt(int64(parentBlockNumber))) if err != nil { return 0, fmt.Errorf("error getting L1 block number %d header : %w", parentBlockNumber, err) diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go index 1e4a06fe90..f7cc0d8c72 100644 --- a/blocks_reexecutor/blocks_reexecutor.go +++ b/blocks_reexecutor/blocks_reexecutor.go @@ -102,7 +102,8 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block if rng > end-start { rng = end - start } - start += uint64(rand.Intn(int(end - start - rng + 1))) + // #nosec G115 + start += uint64(rand.Int63n(int64(end - start - rng + 1))) end = start + rng } // Inclusive of block reexecution [start, end] diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index f6501105c2..b7b935fb7a 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -97,6 +97,7 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { if err != nil { log.Warn("error calculating backlogSizeInBytes", "err", err) } else { + // #nosec G115 backlogSizeInBytesGauge.Update(int64(size)) } } @@ -108,6 +109,7 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { segment = newBacklogSegment() b.head.Store(segment) b.tail.Store(segment) + // #nosec G115 confirmedSequenceNumberGauge.Update(int64(msg.SequenceNumber)) } @@ -143,9 +145,11 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { } lookupByIndex.Store(uint64(msg.SequenceNumber), segment) b.messageCount.Add(1) + // #nosec G115 backlogSizeInBytesGauge.Inc(int64(msg.Size())) } + // #nosec G115 backlogSizeGauge.Update(int64(b.Count())) return nil } @@ -174,7 +178,7 @@ func (b *backlog) Get(start, end uint64) (*m.BroadcastMessage, error) { } bm := &m.BroadcastMessage{Version: 1} - required := int(end-start) + 1 + required := end - start + 1 for { segMsgs, err := segment.Get(arbmath.MaxInt(start, segment.Start()), arbmath.MinInt(end, segment.End())) if err != nil { @@ -183,7 +187,7 @@ func (b *backlog) Get(start, end uint64) (*m.BroadcastMessage, error) { bm.Messages = append(bm.Messages, segMsgs...) segment = segment.Next() - if len(bm.Messages) == required { + if uint64(len(bm.Messages)) == required { break } else if segment == nil { return nil, errOutOfBounds @@ -213,6 +217,7 @@ func (b *backlog) delete(confirmed uint64) { return } + // #nosec G115 confirmedSequenceNumberGauge.Update(int64(confirmed)) // find the segment containing the confirmed message diff --git a/broadcaster/backlog/backlog_test.go b/broadcaster/backlog/backlog_test.go index ee712de9ed..d74389f692 100644 --- a/broadcaster/backlog/backlog_test.go +++ b/broadcaster/backlog/backlog_test.go @@ -33,8 +33,8 @@ func validateBacklog(t *testing.T, b *backlog, count, start, end uint64, lookupK } } - expLen := len(lookupKeys) - actualLen := int(b.Count()) + expLen := uint64(len(lookupKeys)) + actualLen := b.Count() if expLen != actualLen { t.Errorf("expected length of lookupByIndex map (%d) does not equal actual length (%d)", expLen, actualLen) } diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index ba95f2d8af..397698635a 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -145,6 +145,7 @@ func (b *Broadcaster) ListenerAddr() net.Addr { } func (b *Broadcaster) GetCachedMessageCount() int { + // #nosec G115 return int(b.backlog.Count()) } diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index a052c146d1..39f204980d 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -371,6 +371,7 @@ func mainImpl() int { if err != nil { log.Crit("error getting rollup addresses config", "err", err) } + // #nosec G115 addr, err := validatorwallet.GetValidatorWalletContract(ctx, deployInfo.ValidatorWalletCreator, int64(deployInfo.DeployedAt), l1TransactionOptsValidator, l1Reader, true) if err != nil { log.Crit("error creating validator wallet contract", "error", err, "address", l1TransactionOptsValidator.From.Hex()) @@ -582,7 +583,7 @@ func mainImpl() int { l1TransactionOptsBatchPoster, dataSigner, fatalErrChan, - big.NewInt(int64(nodeConfig.ParentChain.ID)), + new(big.Int).SetUint64(nodeConfig.ParentChain.ID), blobReader, ) if err != nil { diff --git a/cmd/staterecovery/staterecovery.go b/cmd/staterecovery/staterecovery.go index bb01477414..5486ba3726 100644 --- a/cmd/staterecovery/staterecovery.go +++ b/cmd/staterecovery/staterecovery.go @@ -60,6 +60,7 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon break } if time.Since(logged) > 1*time.Minute { + // #nosec G115 log.Info("Recreating missing states", "block", current, "target", target, "remaining", int64(target)-int64(current), "elapsed", time.Since(start), "recreated", recreated) logged = time.Now() } diff --git a/das/aggregator.go b/das/aggregator.go index d944f8d48a..e8972447ad 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -166,6 +166,7 @@ type storeResponse struct { // If Store gets not enough successful responses by the time its context is canceled // (eg via TimeoutWrapper) then it also returns an error. func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) { + // #nosec G115 log.Trace("das.Aggregator.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0)) allBackendsSucceeded := false diff --git a/das/dasRpcClient.go b/das/dasRpcClient.go index ca2ee8e7d4..635696bdab 100644 --- a/das/dasRpcClient.go +++ b/das/dasRpcClient.go @@ -138,6 +138,7 @@ func (c *DASRPCClient) sendChunk(ctx context.Context, batchId, i uint64, chunk [ } func (c *DASRPCClient) legacyStore(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) { + // #nosec G115 log.Trace("das.DASRPCClient.Store(...)", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "this", *c) reqSig, err := applyDasSigner(c.signer, message, timeout) diff --git a/das/dasRpcServer.go b/das/dasRpcServer.go index 9e6228ca5d..d14766cc7e 100644 --- a/das/dasRpcServer.go +++ b/das/dasRpcServer.go @@ -108,6 +108,7 @@ type StoreResult struct { } func (s *DASRPCServer) Store(ctx context.Context, message hexutil.Bytes, timeout hexutil.Uint64, sig hexutil.Bytes) (*StoreResult, error) { + // #nosec G115 log.Trace("dasRpc.DASRPCServer.Store", "message", pretty.FirstFewBytes(message), "message length", len(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig), "this", s) rpcStoreRequestGauge.Inc(1) start := time.Now() @@ -277,6 +278,7 @@ func (s *DASRPCServer) StartChunkedStore(ctx context.Context, timestamp, nChunks } // Prevent replay of old messages + // #nosec G115 if time.Since(time.Unix(int64(timestamp), 0)).Abs() > time.Minute { return nil, errors.New("too much time has elapsed since request was signed") } diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go index d873f0568d..2bcbccaae3 100644 --- a/das/dastree/dastree.go +++ b/das/dastree/dastree.go @@ -61,12 +61,13 @@ func RecordHash(record func(bytes32, []byte, arbutil.PreimageType), preimage ... return arbmath.FlipBit(keccord(prepend(LeafByte, keccord([]byte{}).Bytes())), 0) } - length := uint32(len(unrolled)) + length := len(unrolled) leaves := []node{} - for bin := uint32(0); bin < length; bin += BinSize { + for bin := 0; bin < length; bin += BinSize { end := arbmath.MinInt(bin+BinSize, length) hash := keccord(prepend(LeafByte, keccord(unrolled[bin:end]).Bytes())) - leaves = append(leaves, node{hash, end - bin}) + // #nosec G115 + leaves = append(leaves, node{hash, uint32(end - bin)}) } layer := leaves @@ -186,7 +187,9 @@ func Content(root bytes32, oracle func(bytes32) ([]byte, error)) ([]byte, error) leaves = append(leaves, leaf) case NodeByte: count := binary.BigEndian.Uint32(data[64:]) - power := uint32(arbmath.NextOrCurrentPowerOf2(uint64(count))) + power := arbmath.NextOrCurrentPowerOf2(uint64(count)) + // #nosec G115 + halfPower := uint32(power / 2) if place.size != count { return nil, fmt.Errorf("invalid size data: %v vs %v for %v", count, place.size, data) @@ -194,11 +197,11 @@ func Content(root bytes32, oracle func(bytes32) ([]byte, error)) ([]byte, error) prior := node{ hash: common.BytesToHash(data[:32]), - size: power / 2, + size: halfPower, } after := node{ hash: common.BytesToHash(data[32:64]), - size: count - power/2, + size: count - halfPower, } // we want to expand leftward so we reverse their order diff --git a/das/db_storage_service.go b/das/db_storage_service.go index e3b6183c37..1d9e5348d4 100644 --- a/das/db_storage_service.go +++ b/das/db_storage_service.go @@ -8,6 +8,7 @@ import ( "context" "errors" "fmt" + "math" "os" "path/filepath" "time" @@ -172,7 +173,8 @@ func (dbs *DBStorageService) Put(ctx context.Context, data []byte, timeout uint6 return dbs.db.Update(func(txn *badger.Txn) error { e := badger.NewEntry(dastree.HashBytes(data), data) - if dbs.discardAfterTimeout { + if dbs.discardAfterTimeout && timeout <= math.MaxInt64 { + // #nosec G115 e = e.WithTTL(time.Until(time.Unix(int64(timeout), 0))) } return txn.SetEntry(e) diff --git a/das/local_file_storage_service.go b/das/local_file_storage_service.go index 65ca6fe15c..ce86786718 100644 --- a/das/local_file_storage_service.go +++ b/das/local_file_storage_service.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "math" "os" "path" "path/filepath" @@ -133,6 +134,10 @@ func (s *LocalFileStorageService) GetByHash(ctx context.Context, key common.Hash func (s *LocalFileStorageService) Put(ctx context.Context, data []byte, expiry uint64) error { logPut("das.LocalFileStorageService.Store", data, expiry, s) + if expiry > math.MaxInt64 { + return fmt.Errorf("request expiry time (%v) exceeds max int64", expiry) + } + // #nosec G115 expiryTime := time.Unix(int64(expiry), 0) currentTimePlusRetention := time.Now().Add(s.config.MaxRetention) if expiryTime.After(currentTimePlusRetention) { @@ -182,6 +187,7 @@ func (s *LocalFileStorageService) Put(ctx context.Context, data []byte, expiry u // new flat layout files, set their modification time accordingly. if s.enableLegacyLayout { tv := syscall.Timeval{ + // #nosec G115 Sec: int64(expiry - uint64(s.legacyLayout.retention.Seconds())), Usec: 0, } diff --git a/das/local_file_storage_service_test.go b/das/local_file_storage_service_test.go index cc27e293e3..01b999f356 100644 --- a/das/local_file_storage_service_test.go +++ b/das/local_file_storage_service_test.go @@ -99,6 +99,7 @@ func TestMigrationNoExpiry(t *testing.T) { getByHashAndCheck(t, s, "a", "b", "c", "d") // Can still iterate by timestamp even if expiry disabled + // #nosec G115 countTimestampEntries(t, &s.layout, time.Unix(int64(now+11), 0), 4) } diff --git a/das/s3_storage_service.go b/das/s3_storage_service.go index a1de200c52..f9baa96547 100644 --- a/das/s3_storage_service.go +++ b/das/s3_storage_service.go @@ -8,6 +8,7 @@ import ( "context" "fmt" "io" + "math" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -110,7 +111,8 @@ func (s3s *S3StorageService) Put(ctx context.Context, value []byte, timeout uint Bucket: aws.String(s3s.bucket), Key: aws.String(s3s.objectPrefix + EncodeStorageServiceKey(dastree.Hash(value))), Body: bytes.NewReader(value)} - if !s3s.discardAfterTimeout { + if !s3s.discardAfterTimeout && timeout <= math.MaxInt64 { + // #nosec G115 expires := time.Unix(int64(timeout), 0) putObjectInput.Expires = &expires } diff --git a/das/sign_after_store_das_writer.go b/das/sign_after_store_das_writer.go index 0e31d30ae9..40b03847d8 100644 --- a/das/sign_after_store_das_writer.go +++ b/das/sign_after_store_das_writer.go @@ -105,6 +105,7 @@ func NewSignAfterStoreDASWriter(ctx context.Context, config DataAvailabilityConf } func (d *SignAfterStoreDASWriter) Store(ctx context.Context, message []byte, timeout uint64) (c *daprovider.DataAvailabilityCertificate, err error) { + // #nosec G115 log.Trace("das.SignAfterStoreDASWriter.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "this", d) c = &daprovider.DataAvailabilityCertificate{ Timeout: timeout, diff --git a/das/simple_das_reader_aggregator.go b/das/simple_das_reader_aggregator.go index dc6147a7e4..f45c56afe0 100644 --- a/das/simple_das_reader_aggregator.go +++ b/das/simple_das_reader_aggregator.go @@ -50,8 +50,8 @@ var DefaultRestfulClientAggregatorConfig = RestfulClientAggregatorConfig{ } type SimpleExploreExploitStrategyConfig struct { - ExploreIterations int `koanf:"explore-iterations"` - ExploitIterations int `koanf:"exploit-iterations"` + ExploreIterations uint32 `koanf:"explore-iterations"` + ExploitIterations uint32 `koanf:"exploit-iterations"` } var DefaultSimpleExploreExploitStrategyConfig = SimpleExploreExploitStrategyConfig{ @@ -73,8 +73,8 @@ func RestfulClientAggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { } func SimpleExploreExploitStrategyConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Int(prefix+".explore-iterations", DefaultSimpleExploreExploitStrategyConfig.ExploreIterations, "number of consecutive GetByHash calls to the aggregator where each call will cause it to randomly select from REST endpoints until one returns successfully, before switching to exploit mode") - f.Int(prefix+".exploit-iterations", DefaultSimpleExploreExploitStrategyConfig.ExploitIterations, "number of consecutive GetByHash calls to the aggregator where each call will cause it to select from REST endpoints in order of best latency and success rate, before switching to explore mode") + f.Uint32(prefix+".explore-iterations", DefaultSimpleExploreExploitStrategyConfig.ExploreIterations, "number of consecutive GetByHash calls to the aggregator where each call will cause it to randomly select from REST endpoints until one returns successfully, before switching to exploit mode") + f.Uint32(prefix+".exploit-iterations", DefaultSimpleExploreExploitStrategyConfig.ExploitIterations, "number of consecutive GetByHash calls to the aggregator where each call will cause it to select from REST endpoints in order of best latency and success rate, before switching to explore mode") } func NewRestfulClientAggregator(ctx context.Context, config *RestfulClientAggregatorConfig) (*SimpleDASReaderAggregator, error) { @@ -120,8 +120,8 @@ func NewRestfulClientAggregator(ctx context.Context, config *RestfulClientAggreg switch strings.ToLower(config.Strategy) { case "simple-explore-exploit": a.strategy = &simpleExploreExploitStrategy{ - exploreIterations: uint32(config.SimpleExploreExploitStrategy.ExploreIterations), - exploitIterations: uint32(config.SimpleExploreExploitStrategy.ExploitIterations), + exploreIterations: config.SimpleExploreExploitStrategy.ExploreIterations, + exploitIterations: config.SimpleExploreExploitStrategy.ExploitIterations, } case "testing-sequential": a.strategy = &testingSequentialStrategy{} diff --git a/das/util.go b/das/util.go index de266c433f..114e075e79 100644 --- a/das/util.go +++ b/das/util.go @@ -13,11 +13,13 @@ import ( func logPut(store string, data []byte, timeout uint64, reader daprovider.DASReader, more ...interface{}) { if len(more) == 0 { + // #nosec G115 log.Trace( store, "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(timeout), 0), "this", reader, ) } else { + // #nosec G115 log.Trace( store, "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(timeout), 0), "this", reader, more, diff --git a/execution/gethexec/api.go b/execution/gethexec/api.go index c19072ae77..2bff8026c2 100644 --- a/execution/gethexec/api.go +++ b/execution/gethexec/api.go @@ -78,6 +78,7 @@ func (api *ArbDebugAPI) evenlySpaceBlocks(start, end rpc.BlockNumber) (uint64, u end, _ = api.blockchain.ClipToPostNitroGenesis(end) blocks := end.Int64() - start.Int64() + 1 + // #nosec G115 bound := int64(api.blockRangeBound) step := int64(1) if blocks > bound { diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 90e3082062..819cd10500 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -887,6 +887,7 @@ func (s *Sequencer) createBlock(ctx context.Context) (returnValue bool) { for _, queueItem := range queueItems { s.txRetryQueue.Push(queueItem) } + // #nosec G115 log.Error( "cannot sequence: unknown L1 block or L1 timestamp too far from local clock time", "l1Block", l1Block, @@ -1037,10 +1038,14 @@ func (s *Sequencer) updateExpectedSurplus(ctx context.Context) (int64, error) { if err != nil { return 0, fmt.Errorf("error encountered getting l1 pricing surplus while updating expectedSurplus: %w", err) } + // #nosec G115 backlogL1GasCharged := int64(s.execEngine.backlogL1GasCharged()) + // #nosec G115 backlogCallDataUnits := int64(s.execEngine.backlogCallDataUnits()) + // #nosec G115 expectedSurplus := int64(surplus) + backlogL1GasCharged - backlogCallDataUnits*int64(l1GasPrice) // update metrics + // #nosec G115 l1GasPriceGauge.Update(int64(l1GasPrice)) callDataUnitsBacklogGauge.Update(backlogCallDataUnits) unusedL1GasChargeGauge.Update(backlogL1GasCharged) diff --git a/execution/gethexec/tx_pre_checker.go b/execution/gethexec/tx_pre_checker.go index 191331b48a..e0ae330148 100644 --- a/execution/gethexec/tx_pre_checker.go +++ b/execution/gethexec/tx_pre_checker.go @@ -161,6 +161,7 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty oldHeader := header blocksTraversed := uint(0) // find a block that's old enough + // #nosec G115 for now-int64(oldHeader.Time) < config.RequiredStateAge && (config.RequiredStateMaxBlocks <= 0 || blocksTraversed < config.RequiredStateMaxBlocks) && oldHeader.Number.Uint64() > 0 { diff --git a/execution/nodeInterface/NodeInterface.go b/execution/nodeInterface/NodeInterface.go index 9179a52718..45fcebcdfa 100644 --- a/execution/nodeInterface/NodeInterface.go +++ b/execution/nodeInterface/NodeInterface.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "math" "math/big" "sort" @@ -234,6 +235,7 @@ func (n NodeInterface) ConstructOutboxProof(c ctx, evm mech, size, leaf uint64) } balanced := size == arbmath.NextPowerOf2(size)/2 + // #nosec G115 treeLevels := int(arbmath.Log2ceil(size)) // the # of levels in the tree proofLevels := treeLevels - 1 // the # of levels where a hash is needed (all but root) walkLevels := treeLevels // the # of levels we need to consider when building walks @@ -297,6 +299,7 @@ func (n NodeInterface) ConstructOutboxProof(c ctx, evm mech, size, leaf uint64) mid := (lo + hi) / 2 + // #nosec G115 block, err := n.backend.BlockByNumber(n.context, rpc.BlockNumber(mid)) if err != nil { searchErr = err @@ -643,6 +646,10 @@ func (n NodeInterface) LegacyLookupMessageBatchProof(c ctx, evm mech, batchNum h // L2BlockRangeForL1 fetches the L1 block number of a given l2 block number. // c ctx and evm mech arguments are not used but supplied to match the precompile function type in NodeInterface contract func (n NodeInterface) BlockL1Num(c ctx, evm mech, l2BlockNum uint64) (uint64, error) { + if l2BlockNum > math.MaxInt64 { + return 0, fmt.Errorf("requested l2 block number %d out of range for int64", l2BlockNum) + } + // #nosec G115 blockHeader, err := n.backend.HeaderByNumber(n.context, rpc.BlockNumber(l2BlockNum)) if err != nil { return 0, err diff --git a/precompiles/ArbAddressTable.go b/precompiles/ArbAddressTable.go index 05f2275fd7..102fd55c3b 100644 --- a/precompiles/ArbAddressTable.go +++ b/precompiles/ArbAddressTable.go @@ -33,7 +33,7 @@ func (con ArbAddressTable) Decompress(c ctx, evm mech, buf []uint8, offset huge) return addr{}, nil, errors.New("invalid offset in ArbAddressTable.Decompress") } result, nbytes, err := c.State.AddressTable().Decompress(buf[ioffset:]) - return result, big.NewInt(int64(nbytes)), err + return result, new(big.Int).SetUint64(nbytes), err } // Lookup the index of an address in the table @@ -45,7 +45,7 @@ func (con ArbAddressTable) Lookup(c ctx, evm mech, addr addr) (huge, error) { if !exists { return nil, errors.New("address does not exist in AddressTable") } - return big.NewInt(int64(result)), nil + return new(big.Int).SetUint64(result), nil } // LookupIndex for an address in the table by index @@ -66,11 +66,11 @@ func (con ArbAddressTable) LookupIndex(c ctx, evm mech, index huge) (addr, error // Register adds an account to the table, shrinking its compressed representation func (con ArbAddressTable) Register(c ctx, evm mech, addr addr) (huge, error) { slot, err := c.State.AddressTable().Register(addr) - return big.NewInt(int64(slot)), err + return new(big.Int).SetUint64(slot), err } // Size gets the number of addresses in the table func (con ArbAddressTable) Size(c ctx, evm mech) (huge, error) { size, err := c.State.AddressTable().Size() - return big.NewInt(int64(size)), err + return new(big.Int).SetUint64(size), err } diff --git a/precompiles/ArbRetryableTx.go b/precompiles/ArbRetryableTx.go index d508d75752..93e8023603 100644 --- a/precompiles/ArbRetryableTx.go +++ b/precompiles/ArbRetryableTx.go @@ -149,7 +149,7 @@ func (con ArbRetryableTx) GetTimeout(c ctx, evm mech, ticketId bytes32) (huge, e if err != nil { return nil, err } - return big.NewInt(int64(timeout)), nil + return new(big.Int).SetUint64(timeout), nil } // Keepalive adds one lifetime period to the ticket's expiry @@ -176,8 +176,9 @@ func (con ArbRetryableTx) Keepalive(c ctx, evm mech, ticketId bytes32) (huge, er return big.NewInt(0), err } - err = con.LifetimeExtended(c, evm, ticketId, big.NewInt(int64(newTimeout))) - return big.NewInt(int64(newTimeout)), err + bigNewTimeout := new(big.Int).SetUint64(newTimeout) + err = con.LifetimeExtended(c, evm, ticketId, bigNewTimeout) + return bigNewTimeout, err } // GetBeneficiary gets the beneficiary of the ticket diff --git a/precompiles/ArbSys.go b/precompiles/ArbSys.go index 13f56d3b8e..d55067a09c 100644 --- a/precompiles/ArbSys.go +++ b/precompiles/ArbSys.go @@ -162,7 +162,7 @@ func (con *ArbSys) SendTxToL1(c ctx, evm mech, value huge, destination addr, cal } } - leafNum := big.NewInt(int64(size - 1)) + leafNum := new(big.Int).SetUint64(size - 1) var blockTime big.Int blockTime.SetUint64(evm.Context.Time) @@ -199,7 +199,7 @@ func (con ArbSys) SendMerkleTreeState(c ctx, evm mech) (huge, bytes32, []bytes32 for i, par := range rawPartials { partials[i] = par } - return big.NewInt(int64(size)), rootHash, partials, nil + return new(big.Int).SetUint64(size), rootHash, partials, nil } // WithdrawEth send paid eth to the destination on L1 diff --git a/relay/relay_stress_test.go b/relay/relay_stress_test.go index 9a8875a429..9d5c415056 100644 --- a/relay/relay_stress_test.go +++ b/relay/relay_stress_test.go @@ -160,7 +160,7 @@ func largeBacklogRelayTestImpl(t *testing.T, numClients, backlogSize, l2MsgSize connected++ } } - if int32(connected) != int32(numClients) { + if connected != numClients { t.Fail() } log.Info("number of clients connected", "expected", numClients, "got", connected) diff --git a/staker/block_challenge_backend.go b/staker/block_challenge_backend.go index 42351789ba..0dd89865bd 100644 --- a/staker/block_challenge_backend.go +++ b/staker/block_challenge_backend.go @@ -219,6 +219,6 @@ func (b *BlockChallengeBackend) IssueExecChallenge( }, machineStatuses, globalStateHashes, - big.NewInt(int64(numsteps)), + new(big.Int).SetUint64(numsteps), ) } diff --git a/staker/block_validator.go b/staker/block_validator.go index 8f5724beac..2239952b37 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -315,6 +315,7 @@ func NewBlockValidator( func atomicStorePos(addr *atomic.Uint64, val arbutil.MessageIndex, metr metrics.Gauge) { addr.Store(uint64(val)) + // #nosec G115 metr.Update(int64(val)) } @@ -573,6 +574,7 @@ func (v *BlockValidator) createNextValidationEntry(ctx context.Context) (bool, e v.nextCreateBatch = batch v.nextCreateBatchBlockHash = batchBlockHash v.nextCreateBatchMsgCount = count + // #nosec G115 validatorMsgCountCurrentBatch.Update(int64(count)) v.nextCreateBatchReread = false } @@ -723,6 +725,7 @@ func (v *BlockValidator) iterativeValidationPrint(ctx context.Context) time.Dura if err != nil { printedCount = -1 } else { + // #nosec G115 printedCount = int64(batchMsgs) + int64(validated.GlobalState.PosInBatch) } log.Info("validated execution", "messageCount", printedCount, "globalstate", validated.GlobalState, "WasmRoots", validated.WasmRoots) @@ -992,8 +995,10 @@ func (v *BlockValidator) UpdateLatestStaked(count arbutil.MessageIndex, globalSt if v.recordSentA.Load() < countUint64 { v.recordSentA.Store(countUint64) } + // #nosec G115 v.validatedA.Store(countUint64) v.valLoopPos = count + // #nosec G115 validatorMsgCountValidatedGauge.Update(int64(countUint64)) err = v.writeLastValidated(globalState, nil) // we don't know which wasm roots were validated if err != nil { @@ -1058,6 +1063,7 @@ func (v *BlockValidator) Reorg(ctx context.Context, count arbutil.MessageIndex) } if v.validatedA.Load() > countUint64 { v.validatedA.Store(countUint64) + // #nosec G115 validatorMsgCountValidatedGauge.Update(int64(countUint64)) err := v.writeLastValidated(v.nextCreateStartGS, nil) // we don't know which wasm roots were validated if err != nil { @@ -1249,6 +1255,7 @@ func (v *BlockValidator) checkValidatedGSCaughtUp() (bool, error) { atomicStorePos(&v.createdA, count, validatorMsgCountCreatedGauge) atomicStorePos(&v.recordSentA, count, validatorMsgCountRecordSentGauge) atomicStorePos(&v.validatedA, count, validatorMsgCountValidatedGauge) + // #nosec G115 validatorMsgCountValidatedGauge.Update(int64(count)) v.chainCaughtUp = true return true, nil diff --git a/staker/challenge-cache/cache.go b/staker/challenge-cache/cache.go index ed4fad6450..5dca2764e8 100644 --- a/staker/challenge-cache/cache.go +++ b/staker/challenge-cache/cache.go @@ -187,12 +187,12 @@ func (c *Cache) Prune(ctx context.Context, messageNumber uint64) error { if info.IsDir() { matches := pattern.FindStringSubmatch(info.Name()) if len(matches) > 1 { - dirNameMessageNum, err := strconv.Atoi(matches[1]) + dirNameMessageNum, err := strconv.ParseUint(matches[1], 10, 64) if err != nil { return err } // Collect the directory path if the message number is <= the specified value. - if dirNameMessageNum <= int(messageNumber) { + if dirNameMessageNum <= messageNumber { pathsToDelete = append(pathsToDelete, path) } } diff --git a/staker/challenge_manager.go b/staker/challenge_manager.go index b1421d7e41..ef431d3c79 100644 --- a/staker/challenge_manager.go +++ b/staker/challenge_manager.go @@ -294,7 +294,7 @@ func (m *ChallengeManager) bisect(ctx context.Context, backend ChallengeBackend, if newChallengeLength < bisectionDegree { bisectionDegree = newChallengeLength } - newSegments := make([][32]byte, int(bisectionDegree+1)) + newSegments := make([][32]byte, bisectionDegree+1) position := startSegmentPosition normalSegmentLength := newChallengeLength / bisectionDegree for i := range newSegments { diff --git a/staker/challenge_test.go b/staker/challenge_test.go index 4534b04a25..33f1644c63 100644 --- a/staker/challenge_test.go +++ b/staker/challenge_test.go @@ -77,7 +77,7 @@ func CreateChallenge( resultReceiverAddr, maxInboxMessage, [2][32]byte{startHashBytes, endHashBytes}, - big.NewInt(int64(endMachineSteps)), + new(big.Int).SetUint64(endMachineSteps), asserter, challenger, big.NewInt(100), diff --git a/staker/l1_validator.go b/staker/l1_validator.go index dd9673ee0b..6ea9fd8ded 100644 --- a/staker/l1_validator.go +++ b/staker/l1_validator.go @@ -247,6 +247,7 @@ func (v *L1Validator) generateNodeAction( startStateProposedParentChain, err, ) } + // #nosec G115 startStateProposedTime := time.Unix(int64(startStateProposedHeader.Time), 0) v.txStreamer.PauseReorgs() @@ -375,6 +376,7 @@ func (v *L1Validator) generateNodeAction( return nil, false, fmt.Errorf("error getting rollup minimum assertion period: %w", err) } + // #nosec G115 timeSinceProposed := big.NewInt(int64(l1BlockNumber) - int64(startStateProposedL1)) if timeSinceProposed.Cmp(minAssertionPeriod) < 0 { // Too soon to assert diff --git a/staker/rollup_watcher.go b/staker/rollup_watcher.go index b35bebd1c6..5ef28a49dc 100644 --- a/staker/rollup_watcher.go +++ b/staker/rollup_watcher.go @@ -196,7 +196,7 @@ func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, if logQueryRangeSize == 0 { query.ToBlock = toBlock } else { - query.ToBlock = new(big.Int).Add(fromBlock, big.NewInt(int64(logQueryRangeSize))) + query.ToBlock = new(big.Int).Add(fromBlock, new(big.Int).SetUint64(logQueryRangeSize)) } if query.ToBlock.Cmp(toBlock) > 0 { query.ToBlock = toBlock diff --git a/staker/staker.go b/staker/staker.go index c54e74be37..6e93d27311 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -352,6 +352,7 @@ func (s *Staker) Initialize(ctx context.Context) error { if err != nil { return err } + // #nosec G115 stakerLatestStakedNodeGauge.Update(int64(latestStaked)) if latestStaked == 0 { return nil @@ -570,6 +571,7 @@ func (s *Staker) Start(ctxIn context.Context) { if err != nil && ctx.Err() == nil { log.Error("staker: error checking latest staked", "err", err) } + // #nosec G115 stakerLatestStakedNodeGauge.Update(int64(staked)) if stakedGlobalState != nil { for _, notifier := range s.stakedNotifiers { @@ -585,6 +587,7 @@ func (s *Staker) Start(ctxIn context.Context) { log.Error("staker: error checking latest confirmed", "err", err) } } + // #nosec G115 stakerLatestConfirmedNodeGauge.Update(int64(confirmed)) if confirmedGlobalState != nil { for _, notifier := range s.confirmedNotifiers { @@ -726,6 +729,7 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { if err != nil { return nil, fmt.Errorf("error getting latest staked node of own wallet %v: %w", walletAddressOrZero, err) } + // #nosec G115 stakerLatestStakedNodeGauge.Update(int64(latestStakedNodeNum)) if rawInfo != nil { rawInfo.LatestStakedNode = latestStakedNodeNum diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index bd0a1f3336..eef6c29b7a 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -259,6 +259,7 @@ func testBlockValidatorSimple(t *testing.T, opts Options) { Require(t, err) // up to 3 extra references: awaiting validation, recently valid, lastValidatedHeader largestRefCount := lastBlockNow.NumberU64() - lastBlock.NumberU64() + 3 + // #nosec G115 if finalRefCount < 0 || finalRefCount > int64(largestRefCount) { Fatal(t, "unexpected refcount:", finalRefCount) } diff --git a/system_tests/forwarder_test.go b/system_tests/forwarder_test.go index 9fe419593e..f87283432e 100644 --- a/system_tests/forwarder_test.go +++ b/system_tests/forwarder_test.go @@ -246,6 +246,7 @@ func TestRedisForwarder(t *testing.T) { for i := range seqClients { userA := user("A", i) builder.L2Info.GenerateAccount(userA) + // #nosec G115 tx := builder.L2Info.PrepareTx("Owner", userA, builder.L2Info.TransferGas, big.NewInt(1e12+int64(builder.L2Info.TransferGas)*builder.L2Info.GasPrice.Int64()), nil) err := fallbackClient.SendTransaction(ctx, tx) Require(t, err) diff --git a/system_tests/initialization_test.go b/system_tests/initialization_test.go index f0797404a9..17e020e6ab 100644 --- a/system_tests/initialization_test.go +++ b/system_tests/initialization_test.go @@ -21,6 +21,7 @@ func InitOneContract(prand *testhelpers.PseudoRandomDataSource) (*statetransfer. storageMap := make(map[common.Hash]common.Hash) code := []byte{0x60, 0x0} // PUSH1 0 sum := big.NewInt(0) + // #nosec G115 numCells := int(prand.GetUint64() % 1000) for i := 0; i < numCells; i++ { storageAddr := prand.GetHash() diff --git a/system_tests/outbox_test.go b/system_tests/outbox_test.go index 739d756a31..c68df6ea22 100644 --- a/system_tests/outbox_test.go +++ b/system_tests/outbox_test.go @@ -146,6 +146,7 @@ func TestOutboxProofs(t *testing.T) { treeSize := root.size balanced := treeSize == arbmath.NextPowerOf2(treeSize)/2 + // #nosec G115 treeLevels := int(arbmath.Log2ceil(treeSize)) // the # of levels in the tree proofLevels := treeLevels - 1 // the # of levels where a hash is needed (all but root) walkLevels := treeLevels // the # of levels we need to consider when building walks diff --git a/system_tests/program_recursive_test.go b/system_tests/program_recursive_test.go index dbf527a293..e928f9f3aa 100644 --- a/system_tests/program_recursive_test.go +++ b/system_tests/program_recursive_test.go @@ -154,6 +154,7 @@ func testProgramResursiveCalls(t *testing.T, tests [][]multiCallRecurse, jit boo // execute transactions blockNum := uint64(0) for { + // #nosec G115 item := int(rander.GetUint64()/4) % len(tests) blockNum = testProgramRecursiveCall(t, builder, slotVals, rander, tests[item]) tests[item] = tests[len(tests)-1] diff --git a/system_tests/program_test.go b/system_tests/program_test.go index e171f2a444..ed640809db 100644 --- a/system_tests/program_test.go +++ b/system_tests/program_test.go @@ -582,6 +582,7 @@ func testCalls(t *testing.T, jit bool) { for i := 0; i < 2; i++ { inner := nest(level - 1) + // #nosec G115 args = append(args, arbmath.Uint32ToBytes(uint32(len(inner)))...) args = append(args, inner...) } @@ -637,6 +638,7 @@ func testCalls(t *testing.T, jit bool) { colors.PrintBlue("Calling the ArbosTest precompile (Rust => precompile)") testPrecompile := func(gas uint64) uint64 { // Call the burnArbGas() precompile from Rust + // #nosec G115 burn := pack(burnArbGas(big.NewInt(int64(gas)))) args := argsForMulticall(vm.CALL, types.ArbosTestAddress, nil, burn) tx := l2info.PrepareTxTo("Owner", &callsAddr, 1e9, nil, args) @@ -650,6 +652,7 @@ func testCalls(t *testing.T, jit bool) { large := testPrecompile(largeGas) if !arbmath.Within(large-small, largeGas-smallGas, 2) { + // #nosec G115 ratio := float64(int64(large)-int64(small)) / float64(int64(largeGas)-int64(smallGas)) Fatal(t, "inconsistent burns", large, small, largeGas, smallGas, ratio) } @@ -1527,6 +1530,7 @@ func readWasmFile(t *testing.T, file string) ([]byte, []byte) { Require(t, err) // chose a random dictionary for testing, but keep the same files consistent + // #nosec G115 randDict := arbcompress.Dictionary((len(file) + len(t.Name())) % 2) wasmSource, err := programs.Wat2Wasm(source) @@ -1597,6 +1601,7 @@ func argsForMulticall(opcode vm.OpCode, address common.Address, value *big.Int, if opcode == vm.CALL { length += 32 } + // #nosec G115 args = append(args, arbmath.Uint32ToBytes(uint32(length))...) args = append(args, kinds[opcode]) if opcode == vm.CALL { diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index 09d53669ee..cd3904ca06 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -132,6 +132,7 @@ func TestRecreateStateForRPCNoDepthLimit(t *testing.T) { func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + // #nosec G115 depthGasLimit := int64(256 * util.NormalizeL2GasForL1GasInitial(800_000, params.GWei)) execConfig := ExecConfigDefaultTest() execConfig.RPC.MaxRecreateStateDepth = depthGasLimit @@ -407,6 +408,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig gas = 0 blocks = 0 } else { + // #nosec G115 if int(i) >= int(lastBlock)-int(cacheConfig.BlockCount) { // skipping nonexistence check - the state might have been saved on node shutdown continue @@ -471,6 +473,7 @@ func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { for _, skipGas := range skipGasValues { for _, skipBlocks := range skipBlockValues[:len(skipBlockValues)-2] { cacheConfig.MaxAmountOfGasToSkipStateSaving = skipGas + // #nosec G115 cacheConfig.MaxNumberOfBlocksToSkipStateSaving = uint32(skipBlocks) testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 100) } @@ -495,6 +498,7 @@ func TestGettingStateForRPCFullNode(t *testing.T) { if header == nil { Fatal(t, "failed to get current block header") } + // #nosec G115 state, _, err := api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) Require(t, err) addr := builder.L2Info.GetAddress("User2") @@ -505,6 +509,7 @@ func TestGettingStateForRPCFullNode(t *testing.T) { Fatal(t, "User2 address does not exist in the state") } // Get the state again to avoid caching + // #nosec G115 state, _, err = api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) Require(t, err) @@ -542,6 +547,7 @@ func TestGettingStateForRPCHybridArchiveNode(t *testing.T) { if header == nil { Fatal(t, "failed to get current block header") } + // #nosec G115 state, _, err := api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) Require(t, err) addr := builder.L2Info.GetAddress("User2") @@ -552,6 +558,7 @@ func TestGettingStateForRPCHybridArchiveNode(t *testing.T) { Fatal(t, "User2 address does not exist in the state") } // Get the state again to avoid caching + // #nosec G115 state, _, err = api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) Require(t, err) diff --git a/system_tests/seq_nonce_test.go b/system_tests/seq_nonce_test.go index 72629e1978..c099563e29 100644 --- a/system_tests/seq_nonce_test.go +++ b/system_tests/seq_nonce_test.go @@ -111,6 +111,7 @@ func TestSequencerNonceTooHighQueueFull(t *testing.T) { } for wait := 9; wait >= 0; wait-- { + // #nosec G115 got := int(completed.Load()) expected := count - builder.execConfig.Sequencer.NonceFailureCacheSize if got == expected { diff --git a/system_tests/seqinbox_test.go b/system_tests/seqinbox_test.go index 4dc8f4a664..6babe5833f 100644 --- a/system_tests/seqinbox_test.go +++ b/system_tests/seqinbox_test.go @@ -229,6 +229,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { reorgTargetNumber := blockStates[reorgTo].l1BlockNumber currentHeader, err := builder.L1.Client.HeaderByNumber(ctx, nil) Require(t, err) + // #nosec G115 if currentHeader.Number.Int64()-int64(reorgTargetNumber) < 65 { Fatal(t, "Less than 65 blocks of difference between current block", currentHeader.Number, "and target", reorgTargetNumber) } @@ -346,7 +347,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { BridgeAddr: builder.L1Info.GetAddress("Bridge"), DataPosterAddr: seqOpts.From, GasRefunderAddr: gasRefunderAddr, - SequencerInboxAccs: len(blockStates), + SequencerInboxAccs: uint64(len(blockStates)), AfterDelayedMessagesRead: 1, }) if diff := diffAccessList(accessed, *wantAL); diff != "" { @@ -374,6 +375,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { t.Fatalf("BalanceAt(%v) unexpected error: %v", seqOpts.From, err) } txCost := txRes.EffectiveGasPrice.Uint64() * txRes.GasUsed + // #nosec G115 if diff := before.Int64() - after.Int64(); diff >= int64(txCost) { t.Errorf("Transaction: %v was not refunded, balance diff: %v, cost: %v", tx.Hash(), diff, txCost) } @@ -424,11 +426,13 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { } for _, state := range blockStates { + // #nosec G115 block, err := l2Backend.APIBackend().BlockByNumber(ctx, rpc.BlockNumber(state.l2BlockNumber)) Require(t, err) if block == nil { Fatal(t, "missing state block", state.l2BlockNumber) } + // #nosec G115 stateDb, _, err := l2Backend.APIBackend().StateAndHeaderByNumber(ctx, rpc.BlockNumber(state.l2BlockNumber)) Require(t, err) for acct, expectedBalance := range state.balances { diff --git a/system_tests/snap_sync_test.go b/system_tests/snap_sync_test.go index a04d9f5bf3..7462b5f5f0 100644 --- a/system_tests/snap_sync_test.go +++ b/system_tests/snap_sync_test.go @@ -92,8 +92,10 @@ func TestSnapSync(t *testing.T) { waitForBlockToCatchupToMessageCount(ctx, t, nodeC.Client, finalMessageCount) // Fetching message count - 1 instead on the latest block number as the latest block number might not be // present in the snap sync node since it does not have the sequencer feed. + // #nosec G115 header, err := builder.L2.Client.HeaderByNumber(ctx, big.NewInt(int64(finalMessageCount)-1)) Require(t, err) + // #nosec G115 headerNodeC, err := nodeC.Client.HeaderByNumber(ctx, big.NewInt(int64(finalMessageCount)-1)) Require(t, err) // Once the node is synced up, check if the block hash is the same for the last block diff --git a/system_tests/twonodeslong_test.go b/system_tests/twonodeslong_test.go index 83cd975dd8..60707b83fb 100644 --- a/system_tests/twonodeslong_test.go +++ b/system_tests/twonodeslong_test.go @@ -63,6 +63,7 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { builder.L2Info.GenerateAccount("ErrorTxSender") builder.L2.SendWaitTestTransactions(t, []*types.Transaction{ + // #nosec G115 builder.L2Info.PrepareTx("Faucet", "ErrorTxSender", builder.L2Info.TransferGas, big.NewInt(l2pricing.InitialBaseFeeWei*int64(builder.L2Info.TransferGas)), nil), }) diff --git a/system_tests/unsupported_txtypes_test.go b/system_tests/unsupported_txtypes_test.go index 4c3c8661c8..a228cb2454 100644 --- a/system_tests/unsupported_txtypes_test.go +++ b/system_tests/unsupported_txtypes_test.go @@ -112,8 +112,8 @@ func TestBlobAndInternalTxsAsDelayedMsgReject(t *testing.T) { blocknum, err := builder.L2.Client.BlockNumber(ctx) Require(t, err) - for i := int64(0); i <= int64(blocknum); i++ { - block, err := builder.L2.Client.BlockByNumber(ctx, big.NewInt(i)) + for i := uint64(0); i <= blocknum; i++ { + block, err := builder.L2.Client.BlockByNumber(ctx, new(big.Int).SetUint64(i)) Require(t, err) for _, tx := range block.Transactions() { if _, ok := txAcceptStatus[tx.Hash()]; ok { diff --git a/util/arbmath/bips.go b/util/arbmath/bips.go index 8b7c47d82b..646dad3a92 100644 --- a/util/arbmath/bips.go +++ b/util/arbmath/bips.go @@ -20,7 +20,7 @@ func PercentToBips(percentage int64) Bips { } func BigToBips(natural *big.Int) Bips { - return Bips(natural.Uint64()) + return Bips(natural.Int64()) } func BigMulByBips(value *big.Int, bips Bips) *big.Int { @@ -51,5 +51,5 @@ func (bips Bips) Uint64() uint64 { func BigDivToBips(dividend, divisor *big.Int) Bips { value := BigMulByInt(dividend, int64(OneInBips)) value.Div(value, divisor) - return Bips(BigToUintSaturating(value)) + return Bips(BigToIntSaturating(value)) } diff --git a/util/arbmath/math.go b/util/arbmath/math.go index 62af1e26e0..e5bed67f6d 100644 --- a/util/arbmath/math.go +++ b/util/arbmath/math.go @@ -117,6 +117,18 @@ func BigToUintSaturating(value *big.Int) uint64 { return value.Uint64() } +// BigToUintSaturating casts a huge to an int, saturating if out of bounds +func BigToIntSaturating(value *big.Int) int64 { + if !value.IsInt64() { + if value.Sign() < 0 { + return math.MinInt64 + } else { + return math.MaxInt64 + } + } + return value.Int64() +} + // BigToUintOrPanic casts a huge to a uint, panicking if out of bounds func BigToUintOrPanic(value *big.Int) uint64 { if value.Sign() < 0 { @@ -260,10 +272,12 @@ func BigFloatMulByUint(multiplicand *big.Float, multiplier uint64) *big.Float { } func MaxSignedValue[T Signed]() T { + // #nosec G115 return T((uint64(1) << (8*unsafe.Sizeof(T(0)) - 1)) - 1) } func MinSignedValue[T Signed]() T { + // #nosec G115 return T(uint64(1) << ((8 * unsafe.Sizeof(T(0))) - 1)) } diff --git a/util/arbmath/math_test.go b/util/arbmath/math_test.go index 1be60dc58b..528666dc19 100644 --- a/util/arbmath/math_test.go +++ b/util/arbmath/math_test.go @@ -35,6 +35,7 @@ func TestMath(t *testing.T) { input := rand.Uint64() / 256 approx := ApproxSquareRoot(input) correct := math.Sqrt(float64(input)) + // #nosec G115 diff := int(approx) - int(correct) if diff < -1 || diff > 1 { Fail(t, "sqrt approximation off by too much", diff, input, approx, correct) @@ -46,6 +47,7 @@ func TestMath(t *testing.T) { input := uint64(i) approx := ApproxSquareRoot(input) correct := math.Sqrt(float64(input)) + // #nosec G115 diff := int(approx) - int(correct) if diff < 0 || diff > 1 { Fail(t, "sqrt approximation off by too much", diff, input, approx, correct) @@ -57,6 +59,7 @@ func TestMath(t *testing.T) { input := uint64(1 << i) approx := ApproxSquareRoot(input) correct := math.Sqrt(float64(input)) + // #nosec G115 diff := int(approx) - int(correct) if diff != 0 { Fail(t, "incorrect", "2^", i, diff, approx, correct) diff --git a/util/arbmath/uint24.go b/util/arbmath/uint24.go index 818f871a23..a0c5aa27b7 100644 --- a/util/arbmath/uint24.go +++ b/util/arbmath/uint24.go @@ -9,10 +9,10 @@ import ( "math/big" ) -const MaxUint24 = 1<<24 - 1 // 16777215 - type Uint24 uint32 +const MaxUint24 = 1<<24 - 1 // 16777215 + func (value Uint24) ToBig() *big.Int { return UintToBig(uint64(value)) } @@ -26,8 +26,9 @@ func (value Uint24) ToUint64() uint64 { } func IntToUint24[T uint32 | uint64](value T) (Uint24, error) { + // #nosec G115 if value > T(MaxUint24) { - return Uint24(MaxUint24), errors.New("value out of range") + return MaxUint24, errors.New("value out of range") } return Uint24(value), nil } @@ -40,6 +41,7 @@ func BigToUint24OrPanic(value *big.Int) Uint24 { if !value.IsUint64() || value.Uint64() > MaxUint24 { panic("big.Int value exceeds the max Uint24") } + // #nosec G115 return Uint24(value.Uint64()) } diff --git a/util/headerreader/header_reader.go b/util/headerreader/header_reader.go index 074d24338e..c8041dc871 100644 --- a/util/headerreader/header_reader.go +++ b/util/headerreader/header_reader.go @@ -340,6 +340,7 @@ func (s *HeaderReader) logIfHeaderIsOld() { if storedHeader == nil { return } + // #nosec G115 l1Timetamp := time.Unix(int64(storedHeader.Time), 0) headerTime := time.Since(l1Timetamp) if headerTime >= s.config().OldHeaderTimeout { diff --git a/util/merkletree/merkleTree.go b/util/merkletree/merkleTree.go index 1b15d51d98..fffa9bcabc 100644 --- a/util/merkletree/merkleTree.go +++ b/util/merkletree/merkleTree.go @@ -43,8 +43,8 @@ func NewLevelAndLeaf(level, leaf uint64) LevelAndLeaf { func (place LevelAndLeaf) ToBigInt() *big.Int { return new(big.Int).Add( - new(big.Int).Lsh(big.NewInt(int64(place.Level)), 192), - big.NewInt(int64(place.Leaf)), + new(big.Int).Lsh(new(big.Int).SetUint64(place.Level), 192), + new(big.Int).SetUint64(place.Leaf), ) } diff --git a/util/rpcclient/rpcclient.go b/util/rpcclient/rpcclient.go index be5825a28d..a35d4b6665 100644 --- a/util/rpcclient/rpcclient.go +++ b/util/rpcclient/rpcclient.go @@ -101,7 +101,7 @@ func (c *RpcClient) Close() { } type limitedMarshal struct { - limit int + limit uint value any } @@ -113,16 +113,18 @@ func (m limitedMarshal) String() string { } else { str = string(marshalled) } - if m.limit == 0 || len(str) <= m.limit { + // #nosec G115 + limit := int(m.limit) + if m.limit <= 0 || len(str) <= limit { return str } prefix := str[:m.limit/2-1] - postfix := str[len(str)-m.limit/2+1:] + postfix := str[len(str)-limit/2+1:] return fmt.Sprintf("%v..%v", prefix, postfix) } type limitedArgumentsMarshal struct { - limit int + limit uint args []any } @@ -162,9 +164,9 @@ func (c *RpcClient) CallContext(ctx_in context.Context, result interface{}, meth return errors.New("not connected") } logId := c.logId.Add(1) - log.Trace("sending RPC request", "method", method, "logId", logId, "args", limitedArgumentsMarshal{int(c.config().ArgLogLimit), args}) + log.Trace("sending RPC request", "method", method, "logId", logId, "args", limitedArgumentsMarshal{c.config().ArgLogLimit, args}) var err error - for i := 0; i < int(c.config().Retries)+1; i++ { + for i := uint(0); i < c.config().Retries+1; i++ { retryDelay := c.config().RetryDelay if i > 0 && retryDelay > 0 { select { @@ -188,7 +190,7 @@ func (c *RpcClient) CallContext(ctx_in context.Context, result interface{}, meth cancelCtx() logger := log.Trace - limit := int(c.config().ArgLogLimit) + limit := c.config().ArgLogLimit if err != nil && !IsAlreadyKnownError(err) { logger = log.Info } diff --git a/util/sharedmetrics/sharedmetrics.go b/util/sharedmetrics/sharedmetrics.go index 377eef5352..9b4b3609bc 100644 --- a/util/sharedmetrics/sharedmetrics.go +++ b/util/sharedmetrics/sharedmetrics.go @@ -11,8 +11,10 @@ var ( ) func UpdateSequenceNumberGauge(sequenceNumber arbutil.MessageIndex) { + // #nosec G115 latestSequenceNumberGauge.Update(int64(sequenceNumber)) } func UpdateSequenceNumberInBlockGauge(sequenceNumber arbutil.MessageIndex) { + // #nosec G115 sequenceNumberInBlockGauge.Update(int64(sequenceNumber)) } diff --git a/util/testhelpers/testhelpers.go b/util/testhelpers/testhelpers.go index b1b08708e7..d681b422bf 100644 --- a/util/testhelpers/testhelpers.go +++ b/util/testhelpers/testhelpers.go @@ -65,6 +65,7 @@ func RandomCallValue(limit int64) *big.Int { // Computes a psuedo-random uint64 on the interval [min, max] func RandomUint32(min, max uint32) uint32 { + //#nosec G115 return uint32(RandomUint64(uint64(min), uint64(max))) } diff --git a/validator/client/validation_client.go b/validator/client/validation_client.go index 80cff66675..00bd992f46 100644 --- a/validator/client/validation_client.go +++ b/validator/client/validation_client.go @@ -102,6 +102,7 @@ func (c *ValidationClient) Start(ctx context.Context) error { } else { log.Info("connected to validation server", "name", name, "room", room) } + // #nosec G115 c.room.Store(int32(room)) c.wasmModuleRoots = moduleRoots c.name = name diff --git a/validator/server_arb/execution_run_test.go b/validator/server_arb/execution_run_test.go index bdc1eefc4d..479db58515 100644 --- a/validator/server_arb/execution_run_test.go +++ b/validator/server_arb/execution_run_test.go @@ -194,7 +194,7 @@ func Test_machineHashesWithStep(t *testing.T) { Batch: 1, PosInBatch: mm.totalSteps - 1, })) - if len(hashes) >= int(maxIterations) { + if uint64(len(hashes)) >= maxIterations { t.Fatal("Wanted fewer hashes than the max iterations") } for i := range hashes { diff --git a/validator/server_arb/machine_cache.go b/validator/server_arb/machine_cache.go index 23fcdef6d6..55ef61cf11 100644 --- a/validator/server_arb/machine_cache.go +++ b/validator/server_arb/machine_cache.go @@ -239,6 +239,7 @@ func (c *MachineCache) getClosestMachine(stepCount uint64) (int, MachineInterfac if c.machineStepInterval == 0 || stepsFromStart > c.machineStepInterval*uint64(len(c.machines)-1) { index = len(c.machines) - 1 } else { + // #nosec G115 index = int(stepsFromStart / c.machineStepInterval) } return index, c.machines[index] diff --git a/validator/server_arb/validator_spawner.go b/validator/server_arb/validator_spawner.go index 844a988d28..eb53070303 100644 --- a/validator/server_arb/validator_spawner.go +++ b/validator/server_arb/validator_spawner.go @@ -179,7 +179,10 @@ func (v *ArbitratorSpawner) execute( } steps += count } + + // #nosec G115 arbitratorValidationSteps.Update(int64(mach.GetStepCount())) + if mach.IsErrored() { log.Error("machine entered errored state during attempted validation", "block", entry.Id) return validator.GoGlobalState{}, errors.New("machine entered errored state during attempted validation") diff --git a/validator/server_jit/jit_machine.go b/validator/server_jit/jit_machine.go index 23a75bba83..e7753748ab 100644 --- a/validator/server_jit/jit_machine.go +++ b/validator/server_jit/jit_machine.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "math" "net" "os" "os/exec" @@ -125,6 +126,13 @@ func (machine *JitMachine) prove( writeUint32 := func(data uint32) error { return writeExact(arbmath.Uint32ToBytes(data)) } + writeIntAsUint32 := func(data int) error { + if data < 0 || data > math.MaxUint32 { + return fmt.Errorf("attempted to write out-of-bounds int %v as uint32", data) + } + // #nosec G115 + return writeUint32(uint32(data)) + } writeUint64 := func(data uint64) error { return writeExact(arbmath.UintToBytes(data)) } @@ -192,14 +200,14 @@ func (machine *JitMachine) prove( // send known preimages preimageTypes := entry.Preimages - if err := writeUint32(uint32(len(preimageTypes))); err != nil { + if err := writeIntAsUint32(len(preimageTypes)); err != nil { return state, err } for ty, preimages := range preimageTypes { if err := writeUint8(uint8(ty)); err != nil { return state, err } - if err := writeUint32(uint32(len(preimages))); err != nil { + if err := writeIntAsUint32(len(preimages)); err != nil { return state, err } for hash, preimage := range preimages { @@ -224,7 +232,7 @@ func (machine *JitMachine) prove( } } - if err := writeUint32(uint32(len(userWasms))); err != nil { + if err := writeIntAsUint32(len(userWasms)); err != nil { return state, err } for moduleHash, program := range userWasms { @@ -301,6 +309,7 @@ func (machine *JitMachine) prove( if memoryUsed > uint64(machine.wasmMemoryUsageLimit) { log.Warn("memory used by jit wasm exceeds the wasm memory usage limit", "limit", machine.wasmMemoryUsageLimit, "memoryUsed", memoryUsed) } + // #nosec G115 jitWasmMemoryUsage.Update(int64(memoryUsed)) return state, nil default: diff --git a/wavmio/stub.go b/wavmio/stub.go index 7fd29e2062..1395fb4235 100644 --- a/wavmio/stub.go +++ b/wavmio/stub.go @@ -60,13 +60,13 @@ func parsePreimageBytes(path string) { if read != len(lenBuf) { panic(fmt.Sprintf("missing bytes reading len got %d", read)) } - fieldSize := int(binary.LittleEndian.Uint64(lenBuf)) + fieldSize := binary.LittleEndian.Uint64(lenBuf) dataBuf := make([]byte, fieldSize) read, err = file.Read(dataBuf) if err != nil { panic(err) } - if read != fieldSize { + if uint64(read) != fieldSize { panic("missing bytes reading data") } hash := crypto.Keccak256Hash(dataBuf) @@ -125,7 +125,7 @@ func ReadInboxMessage(msgNum uint64) []byte { } func ReadDelayedInboxMessage(seqNum uint64) []byte { - if seqNum < delayedMsgFirstPos || (int(seqNum-delayedMsgFirstPos) > len(delayedMsgs)) { + if seqNum < delayedMsgFirstPos || (seqNum-delayedMsgFirstPos > uint64(len(delayedMsgs))) { panic(fmt.Sprintf("trying to read bad delayed msg %d", seqNum)) } return delayedMsgs[seqNum-delayedMsgFirstPos] diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index 16a8f64daf..00ae0f0dcf 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -135,6 +135,7 @@ func (cc *ClientConnection) writeBacklog(ctx context.Context, segment backlog.Ba msgs := prevSegment.Messages() if isFirstSegment && prevSegment.Contains(uint64(cc.requestedSeqNum)) { + // #nosec G115 requestedIdx := int(cc.requestedSeqNum) - int(prevSegment.Start()) // This might be false if messages were added after we fetched the segment's messages if len(msgs) >= requestedIdx { From b4fa9bbd97abaa2b83d8499cc769fff629566f39 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 22 Aug 2024 09:39:41 -0500 Subject: [PATCH 2/2] Lower a couple of info logs down to debug --- arbnode/batch_poster.go | 2 +- execution/gethexec/executionengine.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 71239efdbb..b7eee5cc47 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -1439,7 +1439,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) b.building.muxBackend.delayedInboxStart = batchPosition.DelayedMessageCount b.building.muxBackend.SetPositionWithinMessage(0) simMux := arbstate.NewInboxMultiplexer(b.building.muxBackend, batchPosition.DelayedMessageCount, dapReaders, daprovider.KeysetValidate) - log.Info("Begin checking the correctness of batch against inbox multiplexer", "startMsgSeqNum", batchPosition.MessageCount, "endMsgSeqNum", b.building.msgCount-1) + log.Debug("Begin checking the correctness of batch against inbox multiplexer", "startMsgSeqNum", batchPosition.MessageCount, "endMsgSeqNum", b.building.msgCount-1) for i := batchPosition.MessageCount; i < b.building.msgCount; i++ { msg, err := simMux.Pop(ctx) if err != nil { diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 806355b2c6..19d77fc38f 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -138,7 +138,7 @@ func (s *ExecutionEngine) MarkFeedStart(to arbutil.MessageIndex) { defer s.cachedL1PriceData.mutex.Unlock() if to < s.cachedL1PriceData.startOfL1PriceDataCache { - log.Info("trying to trim older cache which doesnt exist anymore") + log.Debug("trying to trim older L1 price data cache which doesnt exist anymore") } else if to >= s.cachedL1PriceData.endOfL1PriceDataCache { s.cachedL1PriceData.startOfL1PriceDataCache = 0 s.cachedL1PriceData.endOfL1PriceDataCache = 0