Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(deps): update module github.com/golangci/golangci-lint to v1.61.0 #10935

Merged
merged 4 commits into from
Oct 12, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -126,10 +126,10 @@ linters-settings:

linters:
enable:
- copyloopvar
- depguard
- errcheck
- errorlint
- exportloopref
- gocritic
- gofmt
- goimports
Expand Down
2 changes: 1 addition & 1 deletion component/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ func callValidateIfPossible(v reflect.Value) error {
if reflect.PointerTo(v.Type()).Implements(configValidatorType) {
// If not addressable, then create a new *V pointer and set the value to current v.
if !v.CanAddr() {
pv := reflect.New(reflect.PtrTo(v.Type()).Elem())
pv := reflect.New(reflect.PointerTo(v.Type()).Elem())
pv.Elem().Set(v)
v = pv.Elem()
}
Expand Down
1 change: 0 additions & 1 deletion config/confighttp/compress_readcloser_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ func TestCompressReadCloser(t *testing.T) {
errVal: "failed to close reader",
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()

Expand Down
1 change: 0 additions & 1 deletion config/confighttp/compression_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,6 @@ func TestDecompressorAvoidDecompressionBomb(t *testing.T) {
compress: compressLz4,
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()

Expand Down
6 changes: 2 additions & 4 deletions confmap/confmap_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -233,8 +233,6 @@ func TestUintUnmarshalerSuccess(t *testing.T) {
}

for _, tt := range tests {
tt := tt

t.Run(tt.name, func(t *testing.T) {
stringMap := map[string]any{
"uint_test": tt.testValue,
Expand All @@ -244,13 +242,15 @@ func TestUintUnmarshalerSuccess(t *testing.T) {
err := conf.Unmarshal(cfg)

require.NoError(t, err)
// nolint:gosec
assert.Equal(t, cfg.UintTest, uint32(tt.testValue))
})
}
}

func TestUint64Unmarshaler(t *testing.T) {
negativeInt := -1000
// nolint:gosec
testValue := uint64(negativeInt)

type Uint64Config struct {
Expand Down Expand Up @@ -663,8 +663,6 @@ func TestZeroSliceHookFunc(t *testing.T) {
}

for _, tt := range tests {
tt := tt

t.Run(tt.name, func(t *testing.T) {
cfg := NewFromStringMap(tt.cfg)

Expand Down
12 changes: 6 additions & 6 deletions exporter/debugexporter/internal/otlptext/databuffer.go
Original file line number Diff line number Diff line change
Expand Up @@ -202,9 +202,9 @@ func (b *dataBuffer) logExponentialHistogramDataPoints(ps pmetric.ExponentialHis

for i := 0; i < negB.Len(); i++ {
pos := negB.Len() - i - 1
index := p.Negative().Offset() + int32(pos)
lower := math.Exp(float64(index) * factor)
upper := math.Exp(float64(index+1) * factor)
index := float64(p.Negative().Offset()) + float64(pos)
lower := math.Exp(index * factor)
upper := math.Exp((index + 1) * factor)
b.logEntry("Bucket [%f, %f), Count: %d", -upper, -lower, negB.At(pos))
}

Expand All @@ -213,9 +213,9 @@ func (b *dataBuffer) logExponentialHistogramDataPoints(ps pmetric.ExponentialHis
}

for pos := 0; pos < posB.Len(); pos++ {
index := p.Positive().Offset() + int32(pos)
lower := math.Exp(float64(index) * factor)
upper := math.Exp(float64(index+1) * factor)
index := float64(p.Positive().Offset()) + float64(pos)
lower := math.Exp(index * factor)
upper := math.Exp((index + 1) * factor)
b.logEntry("Bucket (%f, %f], Count: %d", lower, upper, posB.At(pos))
}

Expand Down
107 changes: 84 additions & 23 deletions exporter/exporterhelper/internal/batch_sender_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ func TestBatchSender_Merge(t *testing.T) {
require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 3, sink: sink,
mergeErr: errors.New("merge error")}))

assert.Equal(t, uint64(1), sink.requestsCount.Load())
assert.Equal(t, int64(1), sink.requestsCount.Load())
assert.Eventually(t, func() bool {
return sink.requestsCount.Load() == 2 && sink.itemsCount.Load() == 15
}, 100*time.Millisecond, 10*time.Millisecond)
Expand All @@ -89,8 +89,8 @@ func TestBatchSender_BatchExportError(t *testing.T) {
tests := []struct {
name string
batcherOption Option
expectedRequests uint64
expectedItems uint64
expectedRequests int64
expectedItems int64
}{
{
name: "merge_only",
Expand Down Expand Up @@ -131,7 +131,7 @@ func TestBatchSender_BatchExportError(t *testing.T) {

// the first two requests should be blocked by the batchSender.
time.Sleep(50 * time.Millisecond)
assert.Equal(t, uint64(0), sink.requestsCount.Load())
assert.Equal(t, int64(0), sink.requestsCount.Load())

// the third request should trigger the export and cause an error.
errReq := &fakeRequest{items: 20, exportErr: errors.New("transient error"), sink: sink}
Expand Down Expand Up @@ -203,8 +203,8 @@ func TestBatchSender_Shutdown(t *testing.T) {
require.NoError(t, be.Shutdown(context.Background()))

// shutdown should force sending the batch
assert.Equal(t, uint64(1), sink.requestsCount.Load())
assert.Equal(t, uint64(3), sink.itemsCount.Load())
assert.Equal(t, int64(1), sink.requestsCount.Load())
assert.Equal(t, int64(3), sink.itemsCount.Load())
}

func TestBatchSender_Disabled(t *testing.T) {
Expand All @@ -224,8 +224,8 @@ func TestBatchSender_Disabled(t *testing.T) {
sink := newFakeRequestSink()
// should be sent right away without splitting because batching is disabled.
require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 8, sink: sink}))
assert.Equal(t, uint64(1), sink.requestsCount.Load())
assert.Equal(t, uint64(8), sink.itemsCount.Load())
assert.Equal(t, int64(1), sink.requestsCount.Load())
assert.Equal(t, int64(8), sink.itemsCount.Load())
}

func TestBatchSender_InvalidMergeSplitFunc(t *testing.T) {
Expand Down Expand Up @@ -270,8 +270,8 @@ func TestBatchSender_PostShutdown(t *testing.T) {
// Closed batch sender should act as a pass-through to not block queue draining.
sink := newFakeRequestSink()
require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 8, sink: sink}))
assert.Equal(t, uint64(1), sink.requestsCount.Load())
assert.Equal(t, uint64(8), sink.itemsCount.Load())
assert.Equal(t, int64(1), sink.requestsCount.Load())
assert.Equal(t, int64(8), sink.itemsCount.Load())
}

func TestBatchSender_ConcurrencyLimitReached(t *testing.T) {
Expand All @@ -281,8 +281,8 @@ func TestBatchSender_ConcurrencyLimitReached(t *testing.T) {
tests := []struct {
name string
batcherCfg exporterbatcher.Config
expectedRequests uint64
expectedItems uint64
expectedRequests int64
expectedItems int64
}{
{
name: "merge_only",
Expand Down Expand Up @@ -318,7 +318,6 @@ func TestBatchSender_ConcurrencyLimitReached(t *testing.T) {
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
qCfg := exporterqueue.NewDefaultConfig()
qCfg.NumConsumers = 2
Expand Down Expand Up @@ -397,8 +396,8 @@ func TestBatchSender_BatchBlocking(t *testing.T) {
wg.Wait()

// should be sent in two batches since the batch size is 3
assert.Equal(t, uint64(2), sink.requestsCount.Load())
assert.Equal(t, uint64(6), sink.itemsCount.Load())
assert.Equal(t, int64(2), sink.requestsCount.Load())
assert.Equal(t, int64(6), sink.itemsCount.Load())

require.NoError(t, be.Shutdown(context.Background()))
}
Expand Down Expand Up @@ -433,8 +432,8 @@ func TestBatchSender_BatchCancelled(t *testing.T) {
wg.Wait()

// nothing should be delivered
assert.Equal(t, uint64(0), sink.requestsCount.Load())
assert.Equal(t, uint64(0), sink.itemsCount.Load())
assert.Equal(t, int64(0), sink.requestsCount.Load())
assert.Equal(t, int64(0), sink.itemsCount.Load())

require.NoError(t, be.Shutdown(context.Background()))
}
Expand Down Expand Up @@ -468,8 +467,8 @@ func TestBatchSender_DrainActiveRequests(t *testing.T) {
// It should take 120 milliseconds to complete.
require.NoError(t, be.Shutdown(context.Background()))

assert.Equal(t, uint64(2), sink.requestsCount.Load())
assert.Equal(t, uint64(3), sink.itemsCount.Load())
assert.Equal(t, int64(2), sink.requestsCount.Load())
assert.Equal(t, int64(3), sink.itemsCount.Load())
}

func TestBatchSender_WithBatcherOption(t *testing.T) {
Expand Down Expand Up @@ -654,7 +653,7 @@ func TestBatchSenderTimerResetNoConflict(t *testing.T) {

// The batch should be sent either with the flush interval or by reaching the minimum items size with no conflict
assert.EventuallyWithT(t, func(c *assert.CollectT) {
assert.LessOrEqual(c, uint64(1), sink.requestsCount.Load())
assert.LessOrEqual(c, int64(1), sink.requestsCount.Load())
assert.EqualValues(c, 8, sink.itemsCount.Load())
}, 200*time.Millisecond, 10*time.Millisecond)

Expand Down Expand Up @@ -683,7 +682,7 @@ func TestBatchSenderTimerFlush(t *testing.T) {
assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink}))
}()
assert.EventuallyWithT(t, func(c *assert.CollectT) {
assert.LessOrEqual(c, uint64(1), sink.requestsCount.Load())
assert.LessOrEqual(c, int64(1), sink.requestsCount.Load())
assert.EqualValues(c, 8, sink.itemsCount.Load())
}, 30*time.Millisecond, 5*time.Millisecond)

Expand All @@ -694,12 +693,12 @@ func TestBatchSenderTimerFlush(t *testing.T) {

// Confirm that it is not flushed in 50ms
time.Sleep(60 * time.Millisecond)
assert.LessOrEqual(t, uint64(1), sink.requestsCount.Load())
assert.LessOrEqual(t, int64(1), sink.requestsCount.Load())
assert.EqualValues(t, 8, sink.itemsCount.Load())

// Confirm that it is flushed after 100ms (using 60+50=110 here to be safe)
time.Sleep(50 * time.Millisecond)
assert.LessOrEqual(t, uint64(2), sink.requestsCount.Load())
assert.LessOrEqual(t, int64(2), sink.requestsCount.Load())
assert.EqualValues(t, 12, sink.itemsCount.Load())
require.NoError(t, be.Shutdown(context.Background()))
}
Expand All @@ -711,3 +710,65 @@ func queueBatchExporter(t *testing.T, batchOption Option) *BaseExporter {
require.NoError(t, err)
return be
}

func fakeBatchMergeFunc(_ context.Context, r1 internal.Request, r2 internal.Request) (internal.Request, error) {
if r1 == nil {
return r2, nil
}
fr1 := r1.(*fakeRequest)
fr2 := r2.(*fakeRequest)
if fr2.mergeErr != nil {
return nil, fr2.mergeErr
}
return &fakeRequest{
items: fr1.items + fr2.items,
sink: fr1.sink,
exportErr: fr2.exportErr,
delay: fr1.delay + fr2.delay,
}, nil
}

func fakeBatchMergeSplitFunc(ctx context.Context, cfg exporterbatcher.MaxSizeConfig, r1 internal.Request, r2 internal.Request) ([]internal.Request, error) {
maxItems := cfg.MaxSizeItems
if maxItems == 0 {
r, err := fakeBatchMergeFunc(ctx, r1, r2)
return []internal.Request{r}, err
}

if r2.(*fakeRequest).mergeErr != nil {
return nil, r2.(*fakeRequest).mergeErr
}

fr2 := r2.(*fakeRequest)
fr2 = &fakeRequest{items: fr2.items, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay}
var res []internal.Request

// fill fr1 to maxItems if it's not nil
if r1 != nil {
fr1 := r1.(*fakeRequest)
fr1 = &fakeRequest{items: fr1.items, sink: fr1.sink, exportErr: fr1.exportErr, delay: fr1.delay}
if fr2.items <= maxItems-fr1.items {
fr1.items += fr2.items
if fr2.exportErr != nil {
fr1.exportErr = fr2.exportErr
}
return []internal.Request{fr1}, nil
}
// if split is needed, we don't propagate exportErr from fr2 to fr1 to test more cases
fr2.items -= maxItems - fr1.items
fr1.items = maxItems
res = append(res, fr1)
}

// split fr2 to maxItems
for {
if fr2.items <= maxItems {
res = append(res, &fakeRequest{items: fr2.items, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay})
break
}
res = append(res, &fakeRequest{items: maxItems, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay})
fr2.items -= maxItems
}

return res, nil
}
Loading
Loading