diff --git a/pkg/bloombuild/builder/batch.go b/pkg/bloombuild/builder/batch.go index 4b5fcdb00ad2..af47f5531df7 100644 --- a/pkg/bloombuild/builder/batch.go +++ b/pkg/bloombuild/builder/batch.go @@ -10,6 +10,7 @@ import ( "golang.org/x/exp/slices" "github.com/grafana/loki/v3/pkg/chunkenc" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/logproto" logql_log "github.com/grafana/loki/v3/pkg/logql/log" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" @@ -168,9 +169,9 @@ func newBatchedBlockLoader( } // compiler checks -var _ v1.Iterator[*v1.SeriesWithBlooms] = &blockLoadingIter{} -var _ v1.CloseableIterator[*v1.SeriesWithBlooms] = &blockLoadingIter{} -var _ v1.ResettableIterator[*v1.SeriesWithBlooms] = &blockLoadingIter{} +var _ iter.Iterator[*v1.SeriesWithBlooms] = &blockLoadingIter{} +var _ iter.CloseIterator[*v1.SeriesWithBlooms] = &blockLoadingIter{} +var _ iter.ResetIterator[*v1.SeriesWithBlooms] = &blockLoadingIter{} // TODO(chaudum): testware func newBlockLoadingIter(ctx context.Context, blocks []bloomshipper.BlockRef, fetcher FetchFunc[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier], batchSize int) *blockLoadingIter { @@ -189,14 +190,14 @@ type blockLoadingIter struct { ctx context.Context fetcher Fetcher[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier] inputs []bloomshipper.BlockRef - overlapping v1.Iterator[[]bloomshipper.BlockRef] + overlapping iter.Iterator[[]bloomshipper.BlockRef] batchSize int // optional arguments filter func(*bloomshipper.CloseableBlockQuerier) bool // internals initialized bool err error - iter v1.Iterator[*v1.SeriesWithBlooms] + iter iter.Iterator[*v1.SeriesWithBlooms] loader *batchedLoader[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier, *bloomshipper.CloseableBlockQuerier] loaded map[io.Closer]struct{} } @@ -229,7 +230,7 @@ func (i *blockLoadingIter) init() { i.overlapping = overlappingBlocksIter(i.inputs) // set initial iter - i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]() + i.iter = iter.NewEmptyIter[*v1.SeriesWithBlooms]() // set "match all" filter function if not present if i.filter == nil { @@ -247,24 +248,24 @@ func (i *blockLoadingIter) loadNext() bool { blockRefs := i.overlapping.At() loader := newBatchedBlockLoader(i.ctx, i.fetcher, blockRefs, i.batchSize) - filtered := v1.NewFilterIter[*bloomshipper.CloseableBlockQuerier](loader, i.filter) + filtered := iter.NewFilterIter[*bloomshipper.CloseableBlockQuerier](loader, i.filter) - iters := make([]v1.PeekingIterator[*v1.SeriesWithBlooms], 0, len(blockRefs)) + iters := make([]iter.PeekIterator[*v1.SeriesWithBlooms], 0, len(blockRefs)) for filtered.Next() { bq := filtered.At() i.loaded[bq] = struct{}{} - iter, err := bq.SeriesIter() + itr, err := bq.SeriesIter() if err != nil { i.err = err - i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]() + i.iter = iter.NewEmptyIter[*v1.SeriesWithBlooms]() return false } - iters = append(iters, iter) + iters = append(iters, itr) } if err := filtered.Err(); err != nil { i.err = err - i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]() + i.iter = iter.NewEmptyIter[*v1.SeriesWithBlooms]() return false } @@ -278,23 +279,23 @@ func (i *blockLoadingIter) loadNext() bool { // two overlapping blocks can conceivably have the same series, so we need to dedupe, // preferring the one with the most chunks already indexed since we'll have // to add fewer chunks to the bloom - i.iter = v1.NewDedupingIter[*v1.SeriesWithBlooms, *v1.SeriesWithBlooms]( + i.iter = iter.NewDedupingIter[*v1.SeriesWithBlooms, *v1.SeriesWithBlooms]( func(a, b *v1.SeriesWithBlooms) bool { return a.Series.Fingerprint == b.Series.Fingerprint }, - v1.Identity[*v1.SeriesWithBlooms], + iter.Identity[*v1.SeriesWithBlooms], func(a, b *v1.SeriesWithBlooms) *v1.SeriesWithBlooms { if len(a.Series.Chunks) > len(b.Series.Chunks) { return a } return b }, - v1.NewPeekingIter(mergedBlocks), + iter.NewPeekIter(mergedBlocks), ) return i.iter.Next() } - i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]() + i.iter = iter.NewEmptyIter[*v1.SeriesWithBlooms]() i.err = i.overlapping.Err() return false } @@ -335,11 +336,11 @@ func (i *blockLoadingIter) Filter(filter func(*bloomshipper.CloseableBlockQuerie i.filter = filter } -func overlappingBlocksIter(inputs []bloomshipper.BlockRef) v1.Iterator[[]bloomshipper.BlockRef] { +func overlappingBlocksIter(inputs []bloomshipper.BlockRef) iter.Iterator[[]bloomshipper.BlockRef] { // can we assume sorted blocks? - peekIter := v1.NewPeekingIter(v1.NewSliceIter(inputs)) + peekIter := iter.NewPeekIter(iter.NewSliceIter(inputs)) - return v1.NewDedupingIter[bloomshipper.BlockRef, []bloomshipper.BlockRef]( + return iter.NewDedupingIter[bloomshipper.BlockRef, []bloomshipper.BlockRef]( func(a bloomshipper.BlockRef, b []bloomshipper.BlockRef) bool { minFp := b[0].Bounds.Min maxFp := slices.MaxFunc(b, func(a, b bloomshipper.BlockRef) int { return int(a.Bounds.Max - b.Bounds.Max) }).Bounds.Max diff --git a/pkg/bloombuild/builder/batch_test.go b/pkg/bloombuild/builder/batch_test.go index 19de5354fb14..37109d0196af 100644 --- a/pkg/bloombuild/builder/batch_test.go +++ b/pkg/bloombuild/builder/batch_test.go @@ -8,6 +8,7 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + v2 "github.com/grafana/loki/v3/pkg/iter/v2" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" ) @@ -128,7 +129,7 @@ func TestBatchedLoader(t *testing.T) { tc.batchSize, ) - got, err := v1.Collect[int](loader) + got, err := v2.Collect[int](loader) if tc.err { require.Error(t, err) return diff --git a/pkg/bloombuild/builder/builder.go b/pkg/bloombuild/builder/builder.go index 0ee9afbc71fd..2282a377f783 100644 --- a/pkg/bloombuild/builder/builder.go +++ b/pkg/bloombuild/builder/builder.go @@ -22,6 +22,7 @@ import ( "github.com/grafana/loki/v3/pkg/bloombuild/common" "github.com/grafana/loki/v3/pkg/bloombuild/protos" "github.com/grafana/loki/v3/pkg/chunkenc" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/storage" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/config" @@ -334,7 +335,7 @@ func (b *Builder) processTask( // Blocks are built consuming the series iterator. For observability, we wrap the series iterator // with a counter iterator to count the number of times Next() is called on it. // This is used to observe the number of series that are being processed. - seriesItrWithCounter := v1.NewCounterIter[*v1.Series](seriesItr) + seriesItrWithCounter := iter.NewCounterIter[*v1.Series](seriesItr) gen := NewSimpleBloomGenerator( tenant, @@ -429,7 +430,7 @@ func (b *Builder) loadWorkForGap( tenant string, id tsdb.Identifier, gap protos.GapWithBlocks, -) (v1.Iterator[*v1.Series], v1.CloseableResettableIterator[*v1.SeriesWithBlooms], error) { +) (iter.Iterator[*v1.Series], iter.CloseResetIterator[*v1.SeriesWithBlooms], error) { // load a series iterator for the gap seriesItr, err := b.tsdbStore.LoadTSDB(ctx, table, tenant, id, gap.Bounds) if err != nil { diff --git a/pkg/bloombuild/builder/spec.go b/pkg/bloombuild/builder/spec.go index a031a69c9812..3feca8f49a3b 100644 --- a/pkg/bloombuild/builder/spec.go +++ b/pkg/bloombuild/builder/spec.go @@ -10,6 +10,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/common/model" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/logproto" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/chunk" @@ -36,15 +37,15 @@ func (k Keyspace) Cmp(other Keyspace) v1.BoundsCheck { // Store is likely bound within. This allows specifying impls like ShardedStore // to only request the shard-range needed from the existing store. type BloomGenerator interface { - Generate(ctx context.Context) (skippedBlocks []v1.BlockMetadata, toClose []io.Closer, results v1.Iterator[*v1.Block], err error) + Generate(ctx context.Context) (skippedBlocks []v1.BlockMetadata, toClose []io.Closer, results iter.Iterator[*v1.Block], err error) } // Simple implementation of a BloomGenerator. type SimpleBloomGenerator struct { userID string - store v1.Iterator[*v1.Series] + store iter.Iterator[*v1.Series] chunkLoader ChunkLoader - blocksIter v1.ResettableIterator[*v1.SeriesWithBlooms] + blocksIter iter.ResetIterator[*v1.SeriesWithBlooms] // options to build blocks with opts v1.BlockOptions @@ -65,9 +66,9 @@ type SimpleBloomGenerator struct { func NewSimpleBloomGenerator( userID string, opts v1.BlockOptions, - store v1.Iterator[*v1.Series], + store iter.Iterator[*v1.Series], chunkLoader ChunkLoader, - blocksIter v1.ResettableIterator[*v1.SeriesWithBlooms], + blocksIter iter.ResetIterator[*v1.SeriesWithBlooms], readWriterFn func() (v1.BlockWriter, v1.BlockReader), reporter func(model.Fingerprint), metrics *v1.Metrics, @@ -100,7 +101,7 @@ func NewSimpleBloomGenerator( func (s *SimpleBloomGenerator) populator(ctx context.Context) v1.BloomPopulatorFunc { return func( series *v1.Series, - srcBlooms v1.SizedIterator[*v1.Bloom], + srcBlooms iter.SizedIterator[*v1.Bloom], toAdd v1.ChunkRefs, ch chan *v1.BloomCreation, ) { @@ -126,7 +127,7 @@ func (s *SimpleBloomGenerator) populator(ctx context.Context) v1.BloomPopulatorF func (s *SimpleBloomGenerator) Generate(ctx context.Context) *LazyBlockBuilderIterator { level.Debug(s.logger).Log("msg", "generating bloom filters for blocks", "schema", fmt.Sprintf("%+v", s.opts.Schema)) - series := v1.NewPeekingIter(s.store) + series := iter.NewPeekIter(s.store) // TODO: Use interface impl, ok := s.blocksIter.(*blockLoadingIter) @@ -166,8 +167,8 @@ type LazyBlockBuilderIterator struct { metrics *v1.Metrics populate v1.BloomPopulatorFunc readWriterFn func() (v1.BlockWriter, v1.BlockReader) - series v1.PeekingIterator[*v1.Series] - blocks v1.ResettableIterator[*v1.SeriesWithBlooms] + series iter.PeekIterator[*v1.Series] + blocks iter.ResetIterator[*v1.SeriesWithBlooms] bytesAdded int curr *v1.Block @@ -180,8 +181,8 @@ func NewLazyBlockBuilderIterator( metrics *v1.Metrics, populate v1.BloomPopulatorFunc, readWriterFn func() (v1.BlockWriter, v1.BlockReader), - series v1.PeekingIterator[*v1.Series], - blocks v1.ResettableIterator[*v1.SeriesWithBlooms], + series iter.PeekIterator[*v1.Series], + blocks iter.ResetIterator[*v1.SeriesWithBlooms], ) *LazyBlockBuilderIterator { return &LazyBlockBuilderIterator{ ctx: ctx, @@ -250,7 +251,7 @@ type indexLoader interface { // ChunkItersByFingerprint models the chunks belonging to a fingerprint type ChunkItersByFingerprint struct { fp model.Fingerprint - itr v1.Iterator[v1.ChunkRefWithIter] + itr iter.Iterator[v1.ChunkRefWithIter] } // ChunkLoader loads chunks from a store diff --git a/pkg/bloombuild/builder/spec_test.go b/pkg/bloombuild/builder/spec_test.go index 0e3f98c90779..5be2a0e1c61b 100644 --- a/pkg/bloombuild/builder/spec_test.go +++ b/pkg/bloombuild/builder/spec_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/grafana/loki/v3/pkg/chunkenc" + v2 "github.com/grafana/loki/v3/pkg/iter/v2" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" "github.com/grafana/loki/v3/pkg/util/mempool" @@ -47,7 +48,7 @@ func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fro minIdx, maxIdx := i*seriesPerBlock, (i+1)*seriesPerBlock - itr := v1.NewSliceIter[v1.SeriesWithBlooms](data[minIdx:maxIdx]) + itr := v2.NewSliceIter[v1.SeriesWithBlooms](data[minIdx:maxIdx]) _, err = builder.BuildFrom(itr) require.Nil(t, err) @@ -66,11 +67,11 @@ type dummyChunkLoader struct{} func (dummyChunkLoader) Load(_ context.Context, _ string, series *v1.Series) *ChunkItersByFingerprint { return &ChunkItersByFingerprint{ fp: series.Fingerprint, - itr: v1.NewEmptyIter[v1.ChunkRefWithIter](), + itr: v2.NewEmptyIter[v1.ChunkRefWithIter](), } } -func dummyBloomGen(t *testing.T, opts v1.BlockOptions, store v1.Iterator[*v1.Series], blocks []*v1.Block, refs []bloomshipper.BlockRef) *SimpleBloomGenerator { +func dummyBloomGen(t *testing.T, opts v1.BlockOptions, store v2.Iterator[*v1.Series], blocks []*v1.Block, refs []bloomshipper.BlockRef) *SimpleBloomGenerator { bqs := make([]*bloomshipper.CloseableBlockQuerier, 0, len(blocks)) for i, b := range blocks { bqs = append(bqs, &bloomshipper.CloseableBlockQuerier{ @@ -133,8 +134,8 @@ func TestSimpleBloomGenerator(t *testing.T) { } { t.Run(fmt.Sprintf("%s/%s", tc.desc, enc), func(t *testing.T) { sourceBlocks, data, refs := blocksFromSchemaWithRange(t, 2, tc.fromSchema, 0x00000, 0x6ffff) - storeItr := v1.NewMapIter[v1.SeriesWithBlooms, *v1.Series]( - v1.NewSliceIter[v1.SeriesWithBlooms](data), + storeItr := v2.NewMapIter[v1.SeriesWithBlooms, *v1.Series]( + v2.NewSliceIter[v1.SeriesWithBlooms](data), func(swb v1.SeriesWithBlooms) *v1.Series { return swb.Series }, diff --git a/pkg/bloombuild/common/tsdb.go b/pkg/bloombuild/common/tsdb.go index a24efcd1eecf..8082a8b319a4 100644 --- a/pkg/bloombuild/common/tsdb.go +++ b/pkg/bloombuild/common/tsdb.go @@ -15,6 +15,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/grafana/loki/v3/pkg/chunkenc" + iter "github.com/grafana/loki/v3/pkg/iter/v2" baseStore "github.com/grafana/loki/v3/pkg/storage" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/config" @@ -38,7 +39,7 @@ type TSDBStore interface { tenant string, id tsdb.Identifier, bounds v1.FingerprintBounds, - ) (v1.Iterator[*v1.Series], error) + ) (iter.Iterator[*v1.Series], error) } // BloomTSDBStore is a wrapper around the storage.Client interface which @@ -90,7 +91,7 @@ func (b *BloomTSDBStore) LoadTSDB( tenant string, id tsdb.Identifier, bounds v1.FingerprintBounds, -) (v1.Iterator[*v1.Series], error) { +) (iter.Iterator[*v1.Series], error) { withCompression := id.Name() + gzipExtension data, err := b.storage.GetUserFile(ctx, table.Addr(), tenant, withCompression) @@ -126,7 +127,7 @@ func (b *BloomTSDBStore) LoadTSDB( return NewTSDBSeriesIter(ctx, tenant, idx, bounds) } -func NewTSDBSeriesIter(ctx context.Context, user string, f sharding.ForSeries, bounds v1.FingerprintBounds) (v1.Iterator[*v1.Series], error) { +func NewTSDBSeriesIter(ctx context.Context, user string, f sharding.ForSeries, bounds v1.FingerprintBounds) (iter.Iterator[*v1.Series], error) { // TODO(salvacorts): Create a pool series := make([]*v1.Series, 0, 100) @@ -163,9 +164,9 @@ func NewTSDBSeriesIter(ctx context.Context, user string, f sharding.ForSeries, b select { case <-ctx.Done(): - return v1.NewEmptyIter[*v1.Series](), ctx.Err() + return iter.NewEmptyIter[*v1.Series](), ctx.Err() default: - return v1.NewCancelableIter[*v1.Series](ctx, v1.NewSliceIter[*v1.Series](series)), nil + return iter.NewCancelableIter[*v1.Series](ctx, iter.NewSliceIter[*v1.Series](series)), nil } } @@ -251,7 +252,7 @@ func (s *TSDBStores) LoadTSDB( tenant string, id tsdb.Identifier, bounds v1.FingerprintBounds, -) (v1.Iterator[*v1.Series], error) { +) (iter.Iterator[*v1.Series], error) { store, err := s.storeForPeriod(table.DayTime) if err != nil { return nil, err diff --git a/pkg/bloombuild/common/tsdb_test.go b/pkg/bloombuild/common/tsdb_test.go index 481399102fce..70ee440551e4 100644 --- a/pkg/bloombuild/common/tsdb_test.go +++ b/pkg/bloombuild/common/tsdb_test.go @@ -9,6 +9,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" + v2 "github.com/grafana/loki/v3/pkg/iter/v2" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index" ) @@ -61,7 +62,7 @@ func TestTSDBSeriesIter(t *testing.T) { }, }, } - srcItr := v1.NewSliceIter(input) + srcItr := v2.NewSliceIter(input) itr, err := NewTSDBSeriesIter(context.Background(), "", forSeriesTestImpl(input), v1.NewBounds(0, math.MaxUint64)) require.NoError(t, err) diff --git a/pkg/bloombuild/planner/planner.go b/pkg/bloombuild/planner/planner.go index ccbd462aaabe..995a4f9f35d0 100644 --- a/pkg/bloombuild/planner/planner.go +++ b/pkg/bloombuild/planner/planner.go @@ -18,6 +18,7 @@ import ( "github.com/grafana/loki/v3/pkg/bloombuild/common" "github.com/grafana/loki/v3/pkg/bloombuild/protos" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/queue" "github.com/grafana/loki/v3/pkg/storage" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" @@ -575,13 +576,13 @@ func (p *Planner) loadTenantWork( return tenantTableWork, ctx.Err() } -func (p *Planner) tenants(ctx context.Context, table config.DayTable) (*v1.SliceIter[string], error) { +func (p *Planner) tenants(ctx context.Context, table config.DayTable) (*iter.SliceIter[string], error) { tenants, err := p.tsdbStore.UsersForPeriod(ctx, table) if err != nil { return nil, fmt.Errorf("error loading tenants for table (%s): %w", table, err) } - return v1.NewSliceIter(tenants), nil + return iter.NewSliceIter(tenants), nil } // blockPlan is a plan for all the work needed to build a meta.json @@ -720,24 +721,24 @@ func blockPlansForGaps(tsdbs []tsdbGaps, metas []bloomshipper.Meta) ([]blockPlan return planGap.Blocks[i].Bounds.Less(planGap.Blocks[j].Bounds) }) - peekingBlocks := v1.NewPeekingIter[bloomshipper.BlockRef]( - v1.NewSliceIter[bloomshipper.BlockRef]( + peekingBlocks := iter.NewPeekIter[bloomshipper.BlockRef]( + iter.NewSliceIter[bloomshipper.BlockRef]( planGap.Blocks, ), ) // dedupe blocks which could be in multiple metas - itr := v1.NewDedupingIter[bloomshipper.BlockRef, bloomshipper.BlockRef]( + itr := iter.NewDedupingIter[bloomshipper.BlockRef, bloomshipper.BlockRef]( func(a, b bloomshipper.BlockRef) bool { return a == b }, - v1.Identity[bloomshipper.BlockRef], + iter.Identity[bloomshipper.BlockRef], func(a, _ bloomshipper.BlockRef) bloomshipper.BlockRef { return a }, peekingBlocks, ) - deduped, err := v1.Collect[bloomshipper.BlockRef](itr) + deduped, err := iter.Collect[bloomshipper.BlockRef](itr) if err != nil { return nil, fmt.Errorf("failed to dedupe blocks: %w", err) } diff --git a/pkg/bloomcompactor/batch.go b/pkg/bloomcompactor/batch.go index 4525bca006a0..c4e1043b4483 100644 --- a/pkg/bloomcompactor/batch.go +++ b/pkg/bloomcompactor/batch.go @@ -10,6 +10,7 @@ import ( "golang.org/x/exp/slices" "github.com/grafana/loki/v3/pkg/chunkenc" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/logproto" logql_log "github.com/grafana/loki/v3/pkg/logql/log" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" @@ -168,9 +169,9 @@ func newBatchedBlockLoader( } // compiler checks -var _ v1.Iterator[*v1.SeriesWithBlooms] = &blockLoadingIter{} -var _ v1.CloseableIterator[*v1.SeriesWithBlooms] = &blockLoadingIter{} -var _ v1.ResettableIterator[*v1.SeriesWithBlooms] = &blockLoadingIter{} +var _ iter.Iterator[*v1.SeriesWithBlooms] = &blockLoadingIter{} +var _ iter.CloseIterator[*v1.SeriesWithBlooms] = &blockLoadingIter{} +var _ iter.ResetIterator[*v1.SeriesWithBlooms] = &blockLoadingIter{} // TODO(chaudum): testware func newBlockLoadingIter(ctx context.Context, blocks []bloomshipper.BlockRef, fetcher FetchFunc[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier], batchSize int) *blockLoadingIter { @@ -189,14 +190,14 @@ type blockLoadingIter struct { ctx context.Context fetcher Fetcher[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier] inputs []bloomshipper.BlockRef - overlapping v1.Iterator[[]bloomshipper.BlockRef] + overlapping iter.Iterator[[]bloomshipper.BlockRef] batchSize int // optional arguments filter func(*bloomshipper.CloseableBlockQuerier) bool // internals initialized bool err error - iter v1.Iterator[*v1.SeriesWithBlooms] + iter iter.Iterator[*v1.SeriesWithBlooms] loader *batchedLoader[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier, *bloomshipper.CloseableBlockQuerier] loaded map[io.Closer]struct{} } @@ -229,7 +230,7 @@ func (i *blockLoadingIter) init() { i.overlapping = overlappingBlocksIter(i.inputs) // set initial iter - i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]() + i.iter = iter.NewEmptyIter[*v1.SeriesWithBlooms]() // set "match all" filter function if not present if i.filter == nil { @@ -247,24 +248,24 @@ func (i *blockLoadingIter) loadNext() bool { blockRefs := i.overlapping.At() loader := newBatchedBlockLoader(i.ctx, i.fetcher, blockRefs, i.batchSize) - filtered := v1.NewFilterIter[*bloomshipper.CloseableBlockQuerier](loader, i.filter) + filtered := iter.NewFilterIter[*bloomshipper.CloseableBlockQuerier](loader, i.filter) - iters := make([]v1.PeekingIterator[*v1.SeriesWithBlooms], 0, len(blockRefs)) + iters := make([]iter.PeekIterator[*v1.SeriesWithBlooms], 0, len(blockRefs)) for filtered.Next() { bq := filtered.At() i.loaded[bq] = struct{}{} - iter, err := bq.SeriesIter() + itr, err := bq.SeriesIter() if err != nil { i.err = err - i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]() + i.iter = iter.NewEmptyIter[*v1.SeriesWithBlooms]() return false } - iters = append(iters, iter) + iters = append(iters, itr) } if err := filtered.Err(); err != nil { i.err = err - i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]() + i.iter = iter.NewEmptyIter[*v1.SeriesWithBlooms]() return false } @@ -278,23 +279,23 @@ func (i *blockLoadingIter) loadNext() bool { // two overlapping blocks can conceivably have the same series, so we need to dedupe, // preferring the one with the most chunks already indexed since we'll have // to add fewer chunks to the bloom - i.iter = v1.NewDedupingIter[*v1.SeriesWithBlooms, *v1.SeriesWithBlooms]( + i.iter = iter.NewDedupingIter[*v1.SeriesWithBlooms, *v1.SeriesWithBlooms]( func(a, b *v1.SeriesWithBlooms) bool { return a.Series.Fingerprint == b.Series.Fingerprint }, - v1.Identity[*v1.SeriesWithBlooms], + iter.Identity[*v1.SeriesWithBlooms], func(a, b *v1.SeriesWithBlooms) *v1.SeriesWithBlooms { if len(a.Series.Chunks) > len(b.Series.Chunks) { return a } return b }, - v1.NewPeekingIter(mergedBlocks), + iter.NewPeekIter(mergedBlocks), ) return i.iter.Next() } - i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]() + i.iter = iter.NewEmptyIter[*v1.SeriesWithBlooms]() i.err = i.overlapping.Err() return false } @@ -335,11 +336,11 @@ func (i *blockLoadingIter) Filter(filter func(*bloomshipper.CloseableBlockQuerie i.filter = filter } -func overlappingBlocksIter(inputs []bloomshipper.BlockRef) v1.Iterator[[]bloomshipper.BlockRef] { +func overlappingBlocksIter(inputs []bloomshipper.BlockRef) iter.Iterator[[]bloomshipper.BlockRef] { // can we assume sorted blocks? - peekIter := v1.NewPeekingIter(v1.NewSliceIter(inputs)) + peekIter := iter.NewPeekIter(iter.NewSliceIter(inputs)) - return v1.NewDedupingIter[bloomshipper.BlockRef, []bloomshipper.BlockRef]( + return iter.NewDedupingIter[bloomshipper.BlockRef, []bloomshipper.BlockRef]( func(a bloomshipper.BlockRef, b []bloomshipper.BlockRef) bool { minFp := b[0].Bounds.Min maxFp := slices.MaxFunc(b, func(a, b bloomshipper.BlockRef) int { return int(a.Bounds.Max - b.Bounds.Max) }).Bounds.Max diff --git a/pkg/bloomcompactor/batch_test.go b/pkg/bloomcompactor/batch_test.go index d64b8313e106..09d595459b50 100644 --- a/pkg/bloomcompactor/batch_test.go +++ b/pkg/bloomcompactor/batch_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" - v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" + v2 "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" ) @@ -127,7 +127,7 @@ func TestBatchedLoader(t *testing.T) { tc.batchSize, ) - got, err := v1.Collect[int](loader) + got, err := v2.Collect[int](loader) if tc.err { require.Error(t, err) return diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go index 8eed0823314a..6f07389a0bb4 100644 --- a/pkg/bloomcompactor/bloomcompactor.go +++ b/pkg/bloomcompactor/bloomcompactor.go @@ -17,6 +17,7 @@ import ( "github.com/prometheus/common/model" "github.com/grafana/loki/v3/pkg/bloomutils" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/storage" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/config" @@ -185,13 +186,13 @@ type tenantTableRange struct { queueTime, startTime, endTime time.Time } -func (c *Compactor) tenants(ctx context.Context, table config.DayTable) (*v1.SliceIter[string], error) { +func (c *Compactor) tenants(ctx context.Context, table config.DayTable) (*iter.SliceIter[string], error) { tenants, err := c.tsdbStore.UsersForPeriod(ctx, table) if err != nil { return nil, errors.Wrap(err, "getting tenants") } - return v1.NewSliceIter(tenants), nil + return iter.NewSliceIter(tenants), nil } // ownsTenant returns the ownership range for the tenant, if the compactor owns the tenant, and an error. diff --git a/pkg/bloomcompactor/controller.go b/pkg/bloomcompactor/controller.go index 3929f2da3f80..fffd67f7f2f4 100644 --- a/pkg/bloomcompactor/controller.go +++ b/pkg/bloomcompactor/controller.go @@ -14,6 +14,7 @@ import ( "github.com/prometheus/common/model" "github.com/grafana/loki/v3/pkg/chunkenc" + iter "github.com/grafana/loki/v3/pkg/iter/v2" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/config" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" @@ -287,7 +288,7 @@ func (s *SimpleBloomController) loadWorkForGap( tenant string, id tsdb.Identifier, gap gapWithBlocks, -) (v1.Iterator[*v1.Series], v1.CloseableResettableIterator[*v1.SeriesWithBlooms], error) { +) (iter.Iterator[*v1.Series], iter.CloseResetIterator[*v1.SeriesWithBlooms], error) { // load a series iterator for the gap seriesItr, err := s.tsdbStore.LoadTSDB(ctx, table, tenant, id, gap.bounds) if err != nil { @@ -400,7 +401,7 @@ func (s *SimpleBloomController) buildGaps( // Blocks are built consuming the series iterator. For observability, we wrap the series iterator // with a counter iterator to count the number of times Next() is called on it. // This is used to observe the number of series that are being processed. - seriesItrWithCounter := v1.NewCounterIter[*v1.Series](seriesItr) + seriesItrWithCounter := iter.NewCounterIter[*v1.Series](seriesItr) gen := NewSimpleBloomGenerator( tenant, @@ -612,24 +613,24 @@ func blockPlansForGaps(tsdbs []tsdbGaps, metas []bloomshipper.Meta) ([]blockPlan return planGap.blocks[i].Bounds.Less(planGap.blocks[j].Bounds) }) - peekingBlocks := v1.NewPeekingIter[bloomshipper.BlockRef]( - v1.NewSliceIter[bloomshipper.BlockRef]( + peekingBlocks := iter.NewPeekIter[bloomshipper.BlockRef]( + iter.NewSliceIter[bloomshipper.BlockRef]( planGap.blocks, ), ) // dedupe blocks which could be in multiple metas - itr := v1.NewDedupingIter[bloomshipper.BlockRef, bloomshipper.BlockRef]( + itr := iter.NewDedupingIter[bloomshipper.BlockRef, bloomshipper.BlockRef]( func(a, b bloomshipper.BlockRef) bool { return a == b }, - v1.Identity[bloomshipper.BlockRef], + iter.Identity[bloomshipper.BlockRef], func(a, _ bloomshipper.BlockRef) bloomshipper.BlockRef { return a }, peekingBlocks, ) - deduped, err := v1.Collect[bloomshipper.BlockRef](itr) + deduped, err := iter.Collect[bloomshipper.BlockRef](itr) if err != nil { return nil, errors.Wrap(err, "failed to dedupe blocks") } diff --git a/pkg/bloomcompactor/spec.go b/pkg/bloomcompactor/spec.go index 2cb16eac02ea..7a1d000dde70 100644 --- a/pkg/bloomcompactor/spec.go +++ b/pkg/bloomcompactor/spec.go @@ -10,6 +10,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/common/model" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/logproto" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/chunk" @@ -36,15 +37,15 @@ func (k Keyspace) Cmp(other Keyspace) v1.BoundsCheck { // Store is likely bound within. This allows specifying impls like ShardedStore // to only request the shard-range needed from the existing store. type BloomGenerator interface { - Generate(ctx context.Context) (skippedBlocks []v1.BlockMetadata, toClose []io.Closer, results v1.Iterator[*v1.Block], err error) + Generate(ctx context.Context) (skippedBlocks []v1.BlockMetadata, toClose []io.Closer, results iter.Iterator[*v1.Block], err error) } // Simple implementation of a BloomGenerator. type SimpleBloomGenerator struct { userID string - store v1.Iterator[*v1.Series] + store iter.Iterator[*v1.Series] chunkLoader ChunkLoader - blocksIter v1.ResettableIterator[*v1.SeriesWithBlooms] + blocksIter iter.ResetIterator[*v1.SeriesWithBlooms] // options to build blocks with opts v1.BlockOptions @@ -65,9 +66,9 @@ type SimpleBloomGenerator struct { func NewSimpleBloomGenerator( userID string, opts v1.BlockOptions, - store v1.Iterator[*v1.Series], + store iter.Iterator[*v1.Series], chunkLoader ChunkLoader, - blocksIter v1.ResettableIterator[*v1.SeriesWithBlooms], + blocksIter iter.ResetIterator[*v1.SeriesWithBlooms], readWriterFn func() (v1.BlockWriter, v1.BlockReader), reporter func(model.Fingerprint), metrics *Metrics, @@ -100,7 +101,7 @@ func NewSimpleBloomGenerator( func (s *SimpleBloomGenerator) populator(ctx context.Context) v1.BloomPopulatorFunc { return func( series *v1.Series, - srcBlooms v1.SizedIterator[*v1.Bloom], + srcBlooms iter.SizedIterator[*v1.Bloom], toAdd v1.ChunkRefs, ch chan *v1.BloomCreation, ) { @@ -126,7 +127,7 @@ func (s *SimpleBloomGenerator) populator(ctx context.Context) v1.BloomPopulatorF func (s *SimpleBloomGenerator) Generate(ctx context.Context) *LazyBlockBuilderIterator { level.Debug(s.logger).Log("msg", "generating bloom filters for blocks", "schema", fmt.Sprintf("%+v", s.opts.Schema)) - series := v1.NewPeekingIter(s.store) + series := iter.NewPeekIter(s.store) // TODO: Use interface impl, ok := s.blocksIter.(*blockLoadingIter) @@ -166,8 +167,8 @@ type LazyBlockBuilderIterator struct { metrics *Metrics populate v1.BloomPopulatorFunc readWriterFn func() (v1.BlockWriter, v1.BlockReader) - series v1.PeekingIterator[*v1.Series] - blocks v1.ResettableIterator[*v1.SeriesWithBlooms] + series iter.PeekIterator[*v1.Series] + blocks iter.ResetIterator[*v1.SeriesWithBlooms] bytesAdded int curr *v1.Block @@ -180,8 +181,8 @@ func NewLazyBlockBuilderIterator( metrics *Metrics, populate v1.BloomPopulatorFunc, readWriterFn func() (v1.BlockWriter, v1.BlockReader), - series v1.PeekingIterator[*v1.Series], - blocks v1.ResettableIterator[*v1.SeriesWithBlooms], + series iter.PeekIterator[*v1.Series], + blocks iter.ResetIterator[*v1.SeriesWithBlooms], ) *LazyBlockBuilderIterator { return &LazyBlockBuilderIterator{ ctx: ctx, @@ -250,7 +251,7 @@ type indexLoader interface { // ChunkItersByFingerprint models the chunks belonging to a fingerprint type ChunkItersByFingerprint struct { fp model.Fingerprint - itr v1.Iterator[v1.ChunkRefWithIter] + itr iter.Iterator[v1.ChunkRefWithIter] } // ChunkLoader loads chunks from a store diff --git a/pkg/bloomcompactor/spec_test.go b/pkg/bloomcompactor/spec_test.go index e08cafb68cab..8ee914b5c898 100644 --- a/pkg/bloomcompactor/spec_test.go +++ b/pkg/bloomcompactor/spec_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/grafana/loki/v3/pkg/chunkenc" + v2 "github.com/grafana/loki/v3/pkg/iter/v2" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" "github.com/grafana/loki/v3/pkg/util/mempool" @@ -47,7 +48,7 @@ func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fro minIdx, maxIdx := i*seriesPerBlock, (i+1)*seriesPerBlock - itr := v1.NewSliceIter[v1.SeriesWithBlooms](data[minIdx:maxIdx]) + itr := v2.NewSliceIter[v1.SeriesWithBlooms](data[minIdx:maxIdx]) _, err = builder.BuildFrom(itr) require.Nil(t, err) @@ -66,11 +67,11 @@ type dummyChunkLoader struct{} func (dummyChunkLoader) Load(_ context.Context, _ string, series *v1.Series) *ChunkItersByFingerprint { return &ChunkItersByFingerprint{ fp: series.Fingerprint, - itr: v1.NewEmptyIter[v1.ChunkRefWithIter](), + itr: v2.NewEmptyIter[v1.ChunkRefWithIter](), } } -func dummyBloomGen(t *testing.T, opts v1.BlockOptions, store v1.Iterator[*v1.Series], blocks []*v1.Block, refs []bloomshipper.BlockRef) *SimpleBloomGenerator { +func dummyBloomGen(t *testing.T, opts v1.BlockOptions, store v2.Iterator[*v1.Series], blocks []*v1.Block, refs []bloomshipper.BlockRef) *SimpleBloomGenerator { bqs := make([]*bloomshipper.CloseableBlockQuerier, 0, len(blocks)) for i, b := range blocks { bqs = append(bqs, &bloomshipper.CloseableBlockQuerier{ @@ -133,8 +134,8 @@ func TestSimpleBloomGenerator(t *testing.T) { } { t.Run(fmt.Sprintf("%s/%s", tc.desc, enc), func(t *testing.T) { sourceBlocks, data, refs := blocksFromSchemaWithRange(t, 2, tc.fromSchema, 0x00000, 0x6ffff) - storeItr := v1.NewMapIter[v1.SeriesWithBlooms, *v1.Series]( - v1.NewSliceIter[v1.SeriesWithBlooms](data), + storeItr := v2.NewMapIter[v1.SeriesWithBlooms, *v1.Series]( + v2.NewSliceIter[v1.SeriesWithBlooms](data), func(swb v1.SeriesWithBlooms) *v1.Series { return swb.Series }, diff --git a/pkg/bloomcompactor/tsdb.go b/pkg/bloomcompactor/tsdb.go index c5b1ecf59307..c522cc6dbcef 100644 --- a/pkg/bloomcompactor/tsdb.go +++ b/pkg/bloomcompactor/tsdb.go @@ -15,6 +15,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/grafana/loki/v3/pkg/chunkenc" + iter "github.com/grafana/loki/v3/pkg/iter/v2" baseStore "github.com/grafana/loki/v3/pkg/storage" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/config" @@ -38,7 +39,7 @@ type TSDBStore interface { tenant string, id tsdb.Identifier, bounds v1.FingerprintBounds, - ) (v1.Iterator[*v1.Series], error) + ) (iter.Iterator[*v1.Series], error) } // BloomTSDBStore is a wrapper around the storage.Client interface which @@ -90,7 +91,7 @@ func (b *BloomTSDBStore) LoadTSDB( tenant string, id tsdb.Identifier, bounds v1.FingerprintBounds, -) (v1.Iterator[*v1.Series], error) { +) (iter.Iterator[*v1.Series], error) { withCompression := id.Name() + gzipExtension data, err := b.storage.GetUserFile(ctx, table.Addr(), tenant, withCompression) @@ -126,7 +127,7 @@ func (b *BloomTSDBStore) LoadTSDB( return NewTSDBSeriesIter(ctx, tenant, idx, bounds) } -func NewTSDBSeriesIter(ctx context.Context, user string, f sharding.ForSeries, bounds v1.FingerprintBounds) (v1.Iterator[*v1.Series], error) { +func NewTSDBSeriesIter(ctx context.Context, user string, f sharding.ForSeries, bounds v1.FingerprintBounds) (iter.Iterator[*v1.Series], error) { // TODO(salvacorts): Create a pool series := make([]*v1.Series, 0, 100) @@ -163,9 +164,9 @@ func NewTSDBSeriesIter(ctx context.Context, user string, f sharding.ForSeries, b select { case <-ctx.Done(): - return v1.NewEmptyIter[*v1.Series](), ctx.Err() + return iter.NewEmptyIter[*v1.Series](), ctx.Err() default: - return v1.NewCancelableIter[*v1.Series](ctx, v1.NewSliceIter[*v1.Series](series)), nil + return iter.NewCancelableIter[*v1.Series](ctx, iter.NewSliceIter[*v1.Series](series)), nil } } @@ -251,7 +252,7 @@ func (s *TSDBStores) LoadTSDB( tenant string, id tsdb.Identifier, bounds v1.FingerprintBounds, -) (v1.Iterator[*v1.Series], error) { +) (iter.Iterator[*v1.Series], error) { store, err := s.storeForPeriod(table.DayTime) if err != nil { return nil, err diff --git a/pkg/bloomcompactor/tsdb_test.go b/pkg/bloomcompactor/tsdb_test.go index a18e36ddb6d1..b81880d83b46 100644 --- a/pkg/bloomcompactor/tsdb_test.go +++ b/pkg/bloomcompactor/tsdb_test.go @@ -9,6 +9,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" + v2 "github.com/grafana/loki/v3/pkg/iter/v2" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index" ) @@ -61,7 +62,7 @@ func TestTSDBSeriesIter(t *testing.T) { }, }, } - srcItr := v1.NewSliceIter(input) + srcItr := v2.NewSliceIter(input) itr, err := NewTSDBSeriesIter(context.Background(), "", forSeriesTestImpl(input), v1.NewBounds(0, math.MaxUint64)) require.NoError(t, err) diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go index 603d41c2c437..cdc7c96f065b 100644 --- a/pkg/bloomgateway/bloomgateway.go +++ b/pkg/bloomgateway/bloomgateway.go @@ -21,6 +21,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/queue" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" @@ -351,13 +352,13 @@ func filterChunkRefs(req *logproto.FilterChunkRefRequest, responses []v1.Output) // dedupe outputs, merging the same series. // This returns an Iterator[v1.Output] - dedupedResps := v1.NewDedupingIter[v1.Output, v1.Output]( + dedupedResps := iter.NewDedupingIter[v1.Output, v1.Output]( // eq func(o1, o2 v1.Output) bool { return o1.Fp == o2.Fp }, // from - v1.Identity[v1.Output], + iter.Identity[v1.Output], // merge two removal sets for the same series, deduping // requires that the removals of the outputs are sorted func(o1, o2 v1.Output) v1.Output { @@ -395,7 +396,7 @@ func filterChunkRefs(req *logproto.FilterChunkRefRequest, responses []v1.Output) res.Removals = chks return res }, - v1.NewPeekingIter(v1.NewSliceIter(responses)), + iter.NewPeekIter(iter.NewSliceIter(responses)), ) // Iterate through the requested and filtered series/chunks, diff --git a/pkg/bloomgateway/client.go b/pkg/bloomgateway/client.go index 36e5b7598da9..d64cba01224d 100644 --- a/pkg/bloomgateway/client.go +++ b/pkg/bloomgateway/client.go @@ -18,6 +18,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/health/grpc_health_v1" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/logqlmodel/stats" "github.com/grafana/loki/v3/pkg/querier/plan" @@ -300,10 +301,10 @@ func mergeSeries(input [][]*logproto.GroupedChunkRefs, buf []*logproto.GroupedCh // clear provided buffer buf = buf[:0] - iters := make([]v1.PeekingIterator[*logproto.GroupedChunkRefs], 0, len(input)) + iters := make([]iter.PeekIterator[*logproto.GroupedChunkRefs], 0, len(input)) for _, inp := range input { sort.Slice(inp, func(i, j int) bool { return inp[i].Fingerprint < inp[j].Fingerprint }) - iters = append(iters, v1.NewPeekingIter(v1.NewSliceIter(inp))) + iters = append(iters, iter.NewPeekIter(iter.NewSliceIter(inp))) } heapIter := v1.NewHeapIterator[*logproto.GroupedChunkRefs]( @@ -311,11 +312,11 @@ func mergeSeries(input [][]*logproto.GroupedChunkRefs, buf []*logproto.GroupedCh iters..., ) - dedupeIter := v1.NewDedupingIter[*logproto.GroupedChunkRefs, *logproto.GroupedChunkRefs]( + dedupeIter := iter.NewDedupingIter[*logproto.GroupedChunkRefs, *logproto.GroupedChunkRefs]( // eq func(a, b *logproto.GroupedChunkRefs) bool { return a.Fingerprint == b.Fingerprint }, // from - v1.Identity[*logproto.GroupedChunkRefs], + iter.Identity[*logproto.GroupedChunkRefs], // merge func(a, b *logproto.GroupedChunkRefs) *logproto.GroupedChunkRefs { // TODO(chaudum): Check if we can assume sorted shortrefs here @@ -332,10 +333,10 @@ func mergeSeries(input [][]*logproto.GroupedChunkRefs, buf []*logproto.GroupedCh } }, // iterator - v1.NewPeekingIter(heapIter), + iter.NewPeekIter(heapIter), ) - return v1.CollectInto(dedupeIter, buf) + return iter.CollectInto(dedupeIter, buf) } // mergeChunkSets merges and deduplicates two sorted slices of shortRefs diff --git a/pkg/bloomgateway/multiplexing.go b/pkg/bloomgateway/multiplexing.go index 3520d7b18057..b814ae23a5a5 100644 --- a/pkg/bloomgateway/multiplexing.go +++ b/pkg/bloomgateway/multiplexing.go @@ -7,6 +7,7 @@ import ( "github.com/prometheus/common/model" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/logql/syntax" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" @@ -133,21 +134,21 @@ func (t Task) Copy(series []*logproto.GroupedChunkRefs) Task { func (t Task) RequestIter( tokenizer *v1.NGramTokenizer, -) v1.Iterator[v1.Request] { +) iter.Iterator[v1.Request] { return &requestIterator{ recorder: t.recorder, - series: v1.NewSliceIter(t.series), + series: iter.NewSliceIter(t.series), search: v1.FiltersToBloomTest(tokenizer, t.filters...), channel: t.resCh, curr: v1.Request{}, } } -var _ v1.Iterator[v1.Request] = &requestIterator{} +var _ iter.Iterator[v1.Request] = &requestIterator{} type requestIterator struct { recorder *v1.BloomRecorder - series v1.Iterator[*logproto.GroupedChunkRefs] + series iter.Iterator[*logproto.GroupedChunkRefs] search v1.BloomTest channel chan<- v1.Output curr v1.Request diff --git a/pkg/bloomgateway/multiplexing_test.go b/pkg/bloomgateway/multiplexing_test.go index 5f71d3c9623d..d395d2a315cb 100644 --- a/pkg/bloomgateway/multiplexing_test.go +++ b/pkg/bloomgateway/multiplexing_test.go @@ -9,6 +9,7 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + v2 "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/logql/syntax" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" @@ -103,9 +104,9 @@ func TestTask_RequestIterator(t *testing.T) { tasks := createTasksForRequests(t, tenant, r1, r2, r3) - iters := make([]v1.PeekingIterator[v1.Request], 0, len(tasks)) + iters := make([]v2.PeekIterator[v1.Request], 0, len(tasks)) for _, task := range tasks { - iters = append(iters, v1.NewPeekingIter(task.RequestIter(tokenizer))) + iters = append(iters, v2.NewPeekIter(task.RequestIter(tokenizer))) } // merge the request iterators using the heap sort iterator diff --git a/pkg/bloomgateway/processor.go b/pkg/bloomgateway/processor.go index b0d4f57ca5c1..4dc02fef435f 100644 --- a/pkg/bloomgateway/processor.go +++ b/pkg/bloomgateway/processor.go @@ -11,6 +11,7 @@ import ( "github.com/grafana/dskit/concurrency" "github.com/grafana/dskit/multierror" + iter "github.com/grafana/loki/v3/pkg/iter/v2" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/config" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" @@ -149,7 +150,7 @@ func (p *processor) processBlock(_ context.Context, bq *bloomshipper.CloseableBl } tokenizer := v1.NewNGramTokenizer(schema.NGramLen(), schema.NGramSkip()) - iters := make([]v1.PeekingIterator[v1.Request], 0, len(tasks)) + iters := make([]iter.PeekIterator[v1.Request], 0, len(tasks)) for _, task := range tasks { // NB(owen-d): can be helpful for debugging, but is noisy @@ -162,7 +163,7 @@ func (p *processor) processBlock(_ context.Context, bq *bloomshipper.CloseableBl // sp.LogKV("process block", blockID, "series", len(task.series)) // } - it := v1.NewPeekingIter(task.RequestIter(tokenizer)) + it := iter.NewPeekIter(task.RequestIter(tokenizer)) iters = append(iters, it) } diff --git a/pkg/chunkenc/dumb_chunk.go b/pkg/chunkenc/dumb_chunk.go index ef8548b1438d..33df4501927b 100644 --- a/pkg/chunkenc/dumb_chunk.go +++ b/pkg/chunkenc/dumb_chunk.go @@ -147,7 +147,7 @@ func (i *dumbChunkIterator) Next() bool { } } -func (i *dumbChunkIterator) Entry() logproto.Entry { +func (i *dumbChunkIterator) At() logproto.Entry { return i.entries[i.i] } @@ -159,7 +159,7 @@ func (i *dumbChunkIterator) StreamHash() uint64 { return 0 } -func (i *dumbChunkIterator) Error() error { +func (i *dumbChunkIterator) Err() error { return nil } diff --git a/pkg/chunkenc/memchunk.go b/pkg/chunkenc/memchunk.go index f4e27255633d..b144c1d695e1 100644 --- a/pkg/chunkenc/memchunk.go +++ b/pkg/chunkenc/memchunk.go @@ -1121,7 +1121,7 @@ func (c *MemChunk) Rebound(start, end time.Time, filter filter.Func) (Chunk, err } for itr.Next() { - entry := itr.Entry() + entry := itr.At() if filter != nil && filter(entry.Timestamp, entry.Line, logproto.FromLabelAdaptersToLabels(entry.StructuredMetadata)...) { continue } @@ -1154,14 +1154,14 @@ type encBlock struct { func (b encBlock) Iterator(ctx context.Context, pipeline log.StreamPipeline) iter.EntryIterator { if len(b.b) == 0 { - return iter.NoopIterator + return iter.NoopEntryIterator } return newEntryIterator(ctx, GetReaderPool(b.enc), b.b, pipeline, b.format, b.symbolizer) } func (b encBlock) SampleIterator(ctx context.Context, extractor log.StreamSampleExtractor) iter.SampleIterator { if len(b.b) == 0 { - return iter.NoopIterator + return iter.NoopSampleIterator } return newSampleIterator(ctx, GetReaderPool(b.enc), b.b, b.format, extractor, b.symbolizer) } @@ -1184,7 +1184,7 @@ func (b block) MaxTime() int64 { func (hb *headBlock) Iterator(ctx context.Context, direction logproto.Direction, mint, maxt int64, pipeline log.StreamPipeline) iter.EntryIterator { if hb.IsEmpty() || (maxt < hb.mint || hb.maxt < mint) { - return iter.NoopIterator + return iter.NoopEntryIterator } stats := stats.FromContext(ctx) @@ -1239,7 +1239,7 @@ func (hb *headBlock) Iterator(ctx context.Context, direction logproto.Direction, } if len(streams) == 0 { - return iter.NoopIterator + return iter.NoopEntryIterator } streamsResult := make([]logproto.Stream, 0, len(streams)) for _, stream := range streams { @@ -1250,7 +1250,7 @@ func (hb *headBlock) Iterator(ctx context.Context, direction logproto.Direction, func (hb *headBlock) SampleIterator(ctx context.Context, mint, maxt int64, extractor log.StreamSampleExtractor) iter.SampleIterator { if hb.IsEmpty() || (maxt < hb.mint || hb.maxt < mint) { - return iter.NoopIterator + return iter.NoopSampleIterator } stats := stats.FromContext(ctx) stats.AddHeadChunkLines(int64(len(hb.entries))) @@ -1290,7 +1290,7 @@ func (hb *headBlock) SampleIterator(ctx context.Context, mint, maxt int64, extra stats.SetQueryReferencedStructuredMetadata() } if len(series) == 0 { - return iter.NoopIterator + return iter.NoopSampleIterator } seriesRes := make([]logproto.Series, 0, len(series)) for _, s := range series { @@ -1563,7 +1563,7 @@ func (si *bufferedIterator) moveNext() (int64, []byte, labels.Labels, bool) { return ts, si.buf[:lineSize], si.symbolizer.Lookup(si.symbolsBuf[:nSymbols]), true } -func (si *bufferedIterator) Error() error { return si.err } +func (si *bufferedIterator) Err() error { return si.err } func (si *bufferedIterator) Close() error { if !si.closed { @@ -1609,7 +1609,7 @@ type entryBufferedIterator struct { currLabels log.LabelsResult } -func (e *entryBufferedIterator) Entry() logproto.Entry { +func (e *entryBufferedIterator) At() logproto.Entry { return e.cur } @@ -1645,12 +1645,11 @@ func (e *entryBufferedIterator) Close() error { } func newSampleIterator(ctx context.Context, pool ReaderPool, b []byte, format byte, extractor log.StreamSampleExtractor, symbolizer *symbolizer) iter.SampleIterator { - it := &sampleBufferedIterator{ + return &sampleBufferedIterator{ bufferedIterator: newBufferedIterator(ctx, pool, b, format, symbolizer), extractor: extractor, stats: stats.FromContext(ctx), } - return it } type sampleBufferedIterator struct { @@ -1691,6 +1690,6 @@ func (e *sampleBufferedIterator) Labels() string { return e.currLabels.String() func (e *sampleBufferedIterator) StreamHash() uint64 { return e.extractor.BaseLabels().Hash() } -func (e *sampleBufferedIterator) Sample() logproto.Sample { +func (e *sampleBufferedIterator) At() logproto.Sample { return e.cur } diff --git a/pkg/chunkenc/memchunk_test.go b/pkg/chunkenc/memchunk_test.go index 1d9ef3eea21f..af8ef89339af 100644 --- a/pkg/chunkenc/memchunk_test.go +++ b/pkg/chunkenc/memchunk_test.go @@ -194,7 +194,7 @@ func TestBlock(t *testing.T) { idx := 0 for it.Next() { - e := it.Entry() + e := it.At() require.Equal(t, cases[idx].ts, e.Timestamp.UnixNano()) require.Equal(t, cases[idx].str, e.Line) if chunkFormat < ChunkFormatV4 { @@ -211,7 +211,7 @@ func TestBlock(t *testing.T) { idx++ } - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) require.NoError(t, it.Close()) require.Equal(t, len(cases), idx) @@ -226,14 +226,14 @@ func TestBlock(t *testing.T) { sampleIt := chk.SampleIterator(context.Background(), time.Unix(0, 0), time.Unix(0, math.MaxInt64), countExtractor) idx = 0 for sampleIt.Next() { - s := sampleIt.Sample() + s := sampleIt.At() require.Equal(t, cases[idx].ts, s.Timestamp) require.Equal(t, 1., s.Value) require.NotEmpty(t, s.Hash) idx++ } - require.NoError(t, sampleIt.Error()) + require.NoError(t, sampleIt.Err()) require.NoError(t, sampleIt.Close()) require.Equal(t, len(cases), idx) @@ -243,12 +243,12 @@ func TestBlock(t *testing.T) { idx := 2 for it.Next() { - e := it.Entry() + e := it.At() require.Equal(t, cases[idx].ts, e.Timestamp.UnixNano()) require.Equal(t, cases[idx].str, e.Line) idx++ } - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) require.Equal(t, 6, idx) }) }) @@ -287,7 +287,7 @@ func TestCorruptChunk(t *testing.T) { for it.Next() { idx++ } - require.Error(t, it.Error(), "case %d", i) + require.Error(t, it.Err(), "case %d", i) require.NoError(t, it.Close()) } }) @@ -321,8 +321,8 @@ func TestReadFormatV1(t *testing.T) { i := int64(0) for it.Next() { - require.Equal(t, i, it.Entry().Timestamp.UnixNano()) - require.Equal(t, testdata.LogString(i), it.Entry().Line) + require.Equal(t, i, it.At().Timestamp.UnixNano()) + require.Equal(t, testdata.LogString(i), it.At().Line) i++ } @@ -354,10 +354,10 @@ func TestRoundtripV2(t *testing.T) { i := int64(0) var data int64 for it.Next() { - require.Equal(t, i, it.Entry().Timestamp.UnixNano()) - require.Equal(t, testdata.LogString(i), it.Entry().Line) + require.Equal(t, i, it.At().Timestamp.UnixNano()) + require.Equal(t, testdata.LogString(i), it.At().Line) - data += int64(len(it.Entry().Line)) + data += int64(len(it.At().Line)) i++ } require.Equal(t, populated, data) @@ -462,7 +462,7 @@ func TestSerialization(t *testing.T) { for i := 0; i < numSamples; i++ { require.True(t, it.Next()) - e := it.Entry() + e := it.At() require.Equal(t, int64(i), e.Timestamp.UnixNano()) require.Equal(t, strconv.Itoa(i), e.Line) if appendWithStructuredMetadata && testData.chunkFormat >= ChunkFormatV4 { @@ -473,7 +473,7 @@ func TestSerialization(t *testing.T) { require.Nil(t, e.StructuredMetadata) } } - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) extractor := func() log.StreamSampleExtractor { ex, err := log.NewLineSampleExtractor(log.CountExtractor, nil, nil, false, false) @@ -487,7 +487,7 @@ func TestSerialization(t *testing.T) { for i := 0; i < numSamples; i++ { require.True(t, sampleIt.Next(), i) - s := sampleIt.Sample() + s := sampleIt.At() require.Equal(t, int64(i), s.Timestamp) require.Equal(t, 1., s.Value) if appendWithStructuredMetadata && testData.chunkFormat >= ChunkFormatV4 { @@ -496,7 +496,7 @@ func TestSerialization(t *testing.T) { require.Equal(t, labels.EmptyLabels().String(), sampleIt.Labels()) } } - require.NoError(t, sampleIt.Error()) + require.NoError(t, sampleIt.Err()) byt2, err := chk.Bytes() require.NoError(t, err) @@ -544,7 +544,7 @@ func TestChunkFilling(t *testing.T) { require.NoError(t, err) i = 0 for it.Next() { - entry := it.Entry() + entry := it.At() require.Equal(t, i, entry.Timestamp.UnixNano()) i++ } @@ -810,7 +810,7 @@ func TestIteratorClose(t *testing.T) { func(iter iter.EntryIterator, t *testing.T) { // close after iterating for iter.Next() { - _ = iter.Entry() + _ = iter.At() } if err := iter.Close(); err != nil { t.Fatal(err) @@ -819,7 +819,7 @@ func TestIteratorClose(t *testing.T) { func(iter iter.EntryIterator, t *testing.T) { // close after a single iteration iter.Next() - _ = iter.Entry() + _ = iter.At() if err := iter.Close(); err != nil { t.Fatal(err) } @@ -909,7 +909,7 @@ func BenchmarkRead(b *testing.B) { panic(err) } for iterator.Next() { - _ = iterator.Entry() + _ = iterator.At() } if err := iterator.Close(); err != nil { b.Fatal(err) @@ -933,7 +933,7 @@ func BenchmarkRead(b *testing.B) { for _, c := range chunks { iterator := c.SampleIterator(ctx, time.Unix(0, 0), time.Now(), countExtractor) for iterator.Next() { - _ = iterator.Sample() + _ = iterator.At() } if err := iterator.Close(); err != nil { b.Fatal(err) @@ -961,7 +961,7 @@ func BenchmarkBackwardIterator(b *testing.B) { panic(err) } for iterator.Next() { - _ = iterator.Entry() + _ = iterator.At() } if err := iterator.Close(); err != nil { b.Fatal(err) @@ -985,7 +985,7 @@ func TestGenerateDataSize(t *testing.T) { panic(err) } for iterator.Next() { - e := iterator.Entry() + e := iterator.At() bytesRead += uint64(len(e.Line)) } if err := iterator.Close(); err != nil { @@ -1022,7 +1022,7 @@ func BenchmarkHeadBlockIterator(b *testing.B) { iter := h.Iterator(context.Background(), logproto.BACKWARD, 0, math.MaxInt64, noopStreamPipeline) for iter.Next() { - _ = iter.Entry() + _ = iter.At() } } }) @@ -1053,7 +1053,7 @@ func BenchmarkHeadBlockSampleIterator(b *testing.B) { iter := h.SampleIterator(context.Background(), 0, math.MaxInt64, countExtractor) for iter.Next() { - _ = iter.Sample() + _ = iter.At() } iter.Close() } @@ -1302,7 +1302,7 @@ func BenchmarkBufferedIteratorLabels(b *testing.B) { for n := 0; n < b.N; n++ { for _, it := range iters { for it.Next() { - streams = append(streams, logproto.Stream{Labels: it.Labels(), Entries: []logproto.Entry{it.Entry()}}) + streams = append(streams, logproto.Stream{Labels: it.Labels(), Entries: []logproto.Entry{it.At()}}) } } } @@ -1337,7 +1337,7 @@ func BenchmarkBufferedIteratorLabels(b *testing.B) { for n := 0; n < b.N; n++ { for _, it := range iters { for it.Next() { - series = append(series, logproto.Series{Labels: it.Labels(), Samples: []logproto.Sample{it.Sample()}}) + series = append(series, logproto.Series{Labels: it.Labels(), Samples: []logproto.Sample{it.At()}}) } } } @@ -1374,7 +1374,7 @@ func Test_HeadIteratorReverse(t *testing.T) { require.NoError(t, err) for it.Next() { total-- - require.Equal(t, total, it.Entry().Timestamp.UnixNano()) + require.Equal(t, total, it.At().Timestamp.UnixNano()) } } @@ -1459,7 +1459,7 @@ func TestMemChunk_Rebound(t *testing.T) { break } - require.Equal(t, originalChunkItr.Entry(), newChunkItr.Entry()) + require.Equal(t, originalChunkItr.At(), newChunkItr.At()) } }) } @@ -1975,8 +1975,8 @@ func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) { var streams []string var structuredMetadata [][]logproto.LabelAdapter for it.Next() { - require.NoError(t, it.Error()) - e := it.Entry() + require.NoError(t, it.Err()) + e := it.At() lines = append(lines, e.Line) streams = append(streams, it.Labels()) @@ -2012,8 +2012,8 @@ func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) { var sumValues int var streams []string for it.Next() { - require.NoError(t, it.Error()) - e := it.Sample() + require.NoError(t, it.Err()) + e := it.At() sumValues += int(e.Value) streams = append(streams, it.Labels()) } diff --git a/pkg/chunkenc/unordered.go b/pkg/chunkenc/unordered.go index 807f80b2c0f8..683a4f17e2ad 100644 --- a/pkg/chunkenc/unordered.go +++ b/pkg/chunkenc/unordered.go @@ -288,7 +288,7 @@ func (hb *unorderedHeadBlock) Iterator(ctx context.Context, direction logproto.D stats.FromContext(ctx).SetQueryReferencedStructuredMetadata() } if len(streams) == 0 { - return iter.NoopIterator + return iter.NoopEntryIterator } streamsResult := make([]logproto.Stream, 0, len(streams)) for _, stream := range streams { @@ -345,7 +345,7 @@ func (hb *unorderedHeadBlock) SampleIterator( } if len(series) == 0 { - return iter.NoopIterator + return iter.NoopSampleIterator } seriesRes := make([]logproto.Series, 0, len(series)) for _, s := range series { diff --git a/pkg/chunkenc/unordered_test.go b/pkg/chunkenc/unordered_test.go index 43c07d0f835f..8a3420965bdb 100644 --- a/pkg/chunkenc/unordered_test.go +++ b/pkg/chunkenc/unordered_test.go @@ -25,7 +25,7 @@ func iterEq(t *testing.T, exp []entry, got iter.EntryIterator) { Timestamp: time.Unix(0, exp[i].t), Line: exp[i].s, StructuredMetadata: logproto.FromLabelsToLabelAdapters(exp[i].structuredMetadata), - }, got.Entry()) + }, got.At()) require.Equal(t, exp[i].structuredMetadata.String(), got.Labels()) i++ } @@ -486,10 +486,10 @@ func TestUnorderedChunkIterators(t *testing.T) { require.Equal(t, true, forward.Next()) require.Equal(t, true, backward.Next()) require.Equal(t, true, smpl.Next()) - require.Equal(t, time.Unix(int64(i), 0), forward.Entry().Timestamp) - require.Equal(t, time.Unix(int64(99-i), 0), backward.Entry().Timestamp) - require.Equal(t, float64(1), smpl.Sample().Value) - require.Equal(t, time.Unix(int64(i), 0).UnixNano(), smpl.Sample().Timestamp) + require.Equal(t, time.Unix(int64(i), 0), forward.At().Timestamp) + require.Equal(t, time.Unix(int64(99-i), 0), backward.At().Timestamp) + require.Equal(t, float64(1), smpl.At().Value) + require.Equal(t, time.Unix(int64(i), 0).UnixNano(), smpl.At().Timestamp) } require.Equal(t, false, forward.Next()) require.Equal(t, false, backward.Next()) @@ -530,7 +530,7 @@ func BenchmarkUnorderedRead(b *testing.B) { panic(err) } for iterator.Next() { - _ = iterator.Entry() + _ = iterator.At() } if err := iterator.Close(); err != nil { b.Fatal(err) @@ -546,7 +546,7 @@ func BenchmarkUnorderedRead(b *testing.B) { for n := 0; n < b.N; n++ { iterator := tc.c.SampleIterator(context.Background(), time.Unix(0, 0), time.Unix(0, math.MaxInt64), countExtractor) for iterator.Next() { - _ = iterator.Sample() + _ = iterator.At() } if err := iterator.Close(); err != nil { b.Fatal(err) @@ -568,7 +568,7 @@ func TestUnorderedIteratorCountsAllEntries(t *testing.T) { panic(err) } for iterator.Next() { - next := iterator.Entry().Timestamp.UnixNano() + next := iterator.At().Timestamp.UnixNano() require.GreaterOrEqual(t, next, i) i = next ct++ @@ -582,10 +582,10 @@ func TestUnorderedIteratorCountsAllEntries(t *testing.T) { i = 0 smpl := c.SampleIterator(context.Background(), time.Unix(0, 0), time.Unix(0, math.MaxInt64), countExtractor) for smpl.Next() { - next := smpl.Sample().Timestamp + next := smpl.At().Timestamp require.GreaterOrEqual(t, next, i) i = next - ct += int(smpl.Sample().Value) + ct += int(smpl.At().Value) } require.Equal(t, c.Size(), ct) diff --git a/pkg/compactor/retention/retention_test.go b/pkg/compactor/retention/retention_test.go index a3f157dc7774..b140b3661f4d 100644 --- a/pkg/compactor/retention/retention_test.go +++ b/pkg/compactor/retention/retention_test.go @@ -562,7 +562,7 @@ func TestChunkRewriter(t *testing.T) { Timestamp: curr.Time(), Line: curr.String(), StructuredMetadata: logproto.FromLabelsToLabelAdapters(expectedStructuredMetadata), - }, newChunkItr.Entry()) + }, newChunkItr.At()) require.Equal(t, expectedStructuredMetadata.String(), newChunkItr.Labels()) } } diff --git a/pkg/indexgateway/gateway.go b/pkg/indexgateway/gateway.go index 7b49490a012e..052575647951 100644 --- a/pkg/indexgateway/gateway.go +++ b/pkg/indexgateway/gateway.go @@ -19,6 +19,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/logql/syntax" "github.com/grafana/loki/v3/pkg/logqlmodel/stats" @@ -613,14 +614,14 @@ func accumulateChunksToShards( for i := range filteredChks { for j < len(chks) { switch filteredChks[i].Cmp(chks[j]) { - case v1.Less: + case iter.Less: // this chunk is not in the queried index, continue checking other chunks continue outer - case v1.Greater: + case iter.Greater: // next chunk in index but didn't pass filter; continue j++ continue - case v1.Eq: + case iter.Eq: // a match; set the sizing info filteredChks[i].KB = chks[j].KB filteredChks[i].Entries = chks[j].Entries @@ -679,32 +680,32 @@ type refWithSizingInfo struct { } // careful: only checks from,through,checksum -func (r refWithSizingInfo) Cmp(chk tsdb_index.ChunkMeta) v1.Ord { +func (r refWithSizingInfo) Cmp(chk tsdb_index.ChunkMeta) iter.Ord { ref := *r.ref chkFrom := model.Time(chk.MinTime) if ref.From != chkFrom { if ref.From < chkFrom { - return v1.Less + return iter.Less } - return v1.Greater + return iter.Greater } chkThrough := model.Time(chk.MaxTime) if ref.Through != chkThrough { if ref.Through < chkThrough { - return v1.Less + return iter.Less } - return v1.Greater + return iter.Greater } if ref.Checksum != chk.Checksum { if ref.Checksum < chk.Checksum { - return v1.Less + return iter.Less } - return v1.Greater + return iter.Greater } - return v1.Eq + return iter.Eq } type failingIndexClient struct{} diff --git a/pkg/indexgateway/gateway_test.go b/pkg/indexgateway/gateway_test.go index aa17482776e7..cf5cd7256486 100644 --- a/pkg/indexgateway/gateway_test.go +++ b/pkg/indexgateway/gateway_test.go @@ -14,8 +14,8 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/grpc" + v2 "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/logproto" - v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/chunk" "github.com/grafana/loki/v3/pkg/storage/config" "github.com/grafana/loki/v3/pkg/storage/stores/series/index" @@ -307,7 +307,7 @@ func TestRefWithSizingInfo(t *testing.T) { desc string a refWithSizingInfo b tsdb_index.ChunkMeta - exp v1.Ord + exp v2.Ord }{ { desc: "less by from", @@ -319,7 +319,7 @@ func TestRefWithSizingInfo(t *testing.T) { b: tsdb_index.ChunkMeta{ MinTime: 2, }, - exp: v1.Less, + exp: v2.Less, }, { desc: "eq by from", @@ -331,7 +331,7 @@ func TestRefWithSizingInfo(t *testing.T) { b: tsdb_index.ChunkMeta{ MinTime: 1, }, - exp: v1.Eq, + exp: v2.Eq, }, { desc: "gt by from", @@ -343,7 +343,7 @@ func TestRefWithSizingInfo(t *testing.T) { b: tsdb_index.ChunkMeta{ MinTime: 1, }, - exp: v1.Greater, + exp: v2.Greater, }, { desc: "less by through", @@ -355,7 +355,7 @@ func TestRefWithSizingInfo(t *testing.T) { b: tsdb_index.ChunkMeta{ MaxTime: 2, }, - exp: v1.Less, + exp: v2.Less, }, { desc: "eq by through", @@ -367,7 +367,7 @@ func TestRefWithSizingInfo(t *testing.T) { b: tsdb_index.ChunkMeta{ MaxTime: 2, }, - exp: v1.Eq, + exp: v2.Eq, }, { desc: "gt by through", @@ -379,7 +379,7 @@ func TestRefWithSizingInfo(t *testing.T) { b: tsdb_index.ChunkMeta{ MaxTime: 1, }, - exp: v1.Greater, + exp: v2.Greater, }, { desc: "less by checksum", @@ -391,7 +391,7 @@ func TestRefWithSizingInfo(t *testing.T) { b: tsdb_index.ChunkMeta{ Checksum: 2, }, - exp: v1.Less, + exp: v2.Less, }, { desc: "eq by checksum", @@ -403,7 +403,7 @@ func TestRefWithSizingInfo(t *testing.T) { b: tsdb_index.ChunkMeta{ Checksum: 2, }, - exp: v1.Eq, + exp: v2.Eq, }, { desc: "gt by checksum", @@ -415,7 +415,7 @@ func TestRefWithSizingInfo(t *testing.T) { b: tsdb_index.ChunkMeta{ Checksum: 1, }, - exp: v1.Greater, + exp: v2.Greater, }, } { t.Run(tc.desc, func(t *testing.T) { diff --git a/pkg/ingester/checkpoint_test.go b/pkg/ingester/checkpoint_test.go index 1b0c76466dc1..5a816a3b779d 100644 --- a/pkg/ingester/checkpoint_test.go +++ b/pkg/ingester/checkpoint_test.go @@ -484,7 +484,7 @@ func Test_SeriesIterator(t *testing.T) { Labels: logproto.FromLabelAdaptersToLabels(iter.Stream().Labels).String(), } for it.Next() { - stream.Entries = append(stream.Entries, it.Entry()) + stream.Entries = append(stream.Entries, it.At()) } require.NoError(t, it.Close()) streams = append(streams, stream) diff --git a/pkg/ingester/chunk_test.go b/pkg/ingester/chunk_test.go index 9ceb3c740926..f6a16731e6d4 100644 --- a/pkg/ingester/chunk_test.go +++ b/pkg/ingester/chunk_test.go @@ -19,25 +19,25 @@ import ( func testIteratorForward(t *testing.T, iter iter.EntryIterator, from, through int64) { i := from for iter.Next() { - entry := iter.Entry() + entry := iter.At() require.Equal(t, time.Unix(i, 0).Unix(), entry.Timestamp.Unix()) require.Equal(t, fmt.Sprintf("line %d", i), entry.Line) i++ } require.Equal(t, through, i) - require.NoError(t, iter.Error()) + require.NoError(t, iter.Err()) } func testIteratorBackward(t *testing.T, iter iter.EntryIterator, from, through int64) { i := through - 1 for iter.Next() { - entry := iter.Entry() + entry := iter.At() require.Equal(t, time.Unix(i, 0).Unix(), entry.Timestamp.Unix()) require.Equal(t, fmt.Sprintf("line %d", i), entry.Line) i-- } require.Equal(t, from-1, i) - require.NoError(t, iter.Error()) + require.NoError(t, iter.Err()) } func TestIterator(t *testing.T) { diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go index 1287be3d4bfd..69462a3d352a 100644 --- a/pkg/ingester/flush_test.go +++ b/pkg/ingester/flush_test.go @@ -588,8 +588,8 @@ func buildStreamsFromChunk(t *testing.T, lbs string, chk chunkenc.Chunk) logprot Labels: lbs, } for it.Next() { - stream.Entries = append(stream.Entries, it.Entry()) + stream.Entries = append(stream.Entries, it.At()) } - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) return stream } diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 570452af44eb..17daa7b3ba58 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -1043,8 +1043,8 @@ func Test_DedupeIngester(t *testing.T) { actualHashes := []uint64{} for j := 0; j < int(streamCount); j++ { require.True(t, it.Next()) - require.Equal(t, fmt.Sprintf("line %d", i), it.Entry().Line) - require.Equal(t, i, it.Entry().Timestamp.UnixNano()) + require.Equal(t, fmt.Sprintf("line %d", i), it.At().Line) + require.Equal(t, i, it.At().Timestamp.UnixNano()) require.Equal(t, `{bar="", foo="bar"}`, it.Labels()) actualHashes = append(actualHashes, it.StreamHash()) } @@ -1052,7 +1052,7 @@ func Test_DedupeIngester(t *testing.T) { require.Equal(t, streamHashes, actualHashes) } require.False(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) }) t.Run("forward log", func(t *testing.T) { iterators := make([]iter.EntryIterator, 0, len(ingesterSet)) @@ -1073,8 +1073,8 @@ func Test_DedupeIngester(t *testing.T) { actualHashes := []uint64{} for j := 0; j < int(streamCount); j++ { require.True(t, it.Next()) - require.Equal(t, fmt.Sprintf("line %d", i), it.Entry().Line) - require.Equal(t, i, it.Entry().Timestamp.UnixNano()) + require.Equal(t, fmt.Sprintf("line %d", i), it.At().Line) + require.Equal(t, i, it.At().Timestamp.UnixNano()) require.Equal(t, `{bar="", foo="bar"}`, it.Labels()) actualHashes = append(actualHashes, it.StreamHash()) } @@ -1082,7 +1082,7 @@ func Test_DedupeIngester(t *testing.T) { require.Equal(t, streamHashes, actualHashes) } require.False(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) }) t.Run("sum by metrics", func(t *testing.T) { iterators := make([]iter.SampleIterator, 0, len(ingesterSet)) @@ -1109,8 +1109,8 @@ func Test_DedupeIngester(t *testing.T) { actualHashes := []uint64{} for j := 0; j < int(streamCount); j++ { require.True(t, it.Next()) - require.Equal(t, float64(1), it.Sample().Value) - require.Equal(t, i, it.Sample().Timestamp) + require.Equal(t, float64(1), it.At().Value) + require.Equal(t, i, it.At().Timestamp) labels = append(labels, it.Labels()) actualHashes = append(actualHashes, it.StreamHash()) } @@ -1120,7 +1120,7 @@ func Test_DedupeIngester(t *testing.T) { require.Equal(t, streamHashes, actualHashes) } require.False(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) }) t.Run("sum metrics", func(t *testing.T) { iterators := make([]iter.SampleIterator, 0, len(ingesterSet)) @@ -1141,8 +1141,8 @@ func Test_DedupeIngester(t *testing.T) { actualHashes := []uint64{} for j := 0; j < int(streamCount); j++ { require.True(t, it.Next()) - require.Equal(t, float64(1), it.Sample().Value) - require.Equal(t, i, it.Sample().Timestamp) + require.Equal(t, float64(1), it.At().Value) + require.Equal(t, i, it.At().Timestamp) require.Equal(t, "{}", it.Labels()) actualHashes = append(actualHashes, it.StreamHash()) } @@ -1150,7 +1150,7 @@ func Test_DedupeIngester(t *testing.T) { require.Equal(t, streamHashes, actualHashes) } require.False(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) }) } @@ -1205,12 +1205,12 @@ func Test_DedupeIngesterParser(t *testing.T) { for j := 0; j < streamCount; j++ { for k := 0; k < 2; k++ { // 2 line per entry require.True(t, it.Next()) - require.Equal(t, int64(i), it.Entry().Timestamp.UnixNano()) + require.Equal(t, int64(i), it.At().Timestamp.UnixNano()) } } } require.False(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) }) t.Run("forward log", func(t *testing.T) { @@ -1235,12 +1235,12 @@ func Test_DedupeIngesterParser(t *testing.T) { for j := 0; j < streamCount; j++ { for k := 0; k < 2; k++ { // 2 line per entry require.True(t, it.Next()) - require.Equal(t, int64(i), it.Entry().Timestamp.UnixNano()) + require.Equal(t, int64(i), it.At().Timestamp.UnixNano()) } } } require.False(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) }) t.Run("no sum metrics", func(t *testing.T) { iterators := make([]iter.SampleIterator, 0, len(ingesterSet)) @@ -1262,13 +1262,13 @@ func Test_DedupeIngesterParser(t *testing.T) { for j := 0; j < streamCount; j++ { for k := 0; k < 2; k++ { // 2 line per entry require.True(t, it.Next()) - require.Equal(t, float64(1), it.Sample().Value) - require.Equal(t, int64(i), it.Sample().Timestamp) + require.Equal(t, float64(1), it.At().Value) + require.Equal(t, int64(i), it.At().Timestamp) } } } require.False(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) }) t.Run("sum metrics", func(t *testing.T) { iterators := make([]iter.SampleIterator, 0, len(ingesterSet)) @@ -1290,13 +1290,13 @@ func Test_DedupeIngesterParser(t *testing.T) { for j := 0; j < streamCount; j++ { for k := 0; k < 2; k++ { // 2 line per entry require.True(t, it.Next()) - require.Equal(t, float64(1), it.Sample().Value) - require.Equal(t, int64(i), it.Sample().Timestamp) + require.Equal(t, float64(1), it.At().Value) + require.Equal(t, int64(i), it.At().Timestamp) } } } require.False(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) }) } diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go index 80074f6391e9..7a2647a3bb01 100644 --- a/pkg/ingester/instance_test.go +++ b/pkg/ingester/instance_test.go @@ -681,7 +681,7 @@ func Test_ChunkFilter(t *testing.T) { defer it.Close() for it.Next() { - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) lbs, err := syntax.ParseLabels(it.Labels()) require.NoError(t, err) require.NotEqual(t, "dispatcher", lbs.Get("log_stream")) @@ -721,7 +721,7 @@ func Test_PipelineWrapper(t *testing.T) { for it.Next() { // Consume the iterator - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) } require.Equal(t, "test-user", wrapper.tenant) @@ -762,7 +762,7 @@ func Test_PipelineWrapper_disabled(t *testing.T) { for it.Next() { // Consume the iterator - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) } require.Equal(t, "", wrapper.tenant) @@ -853,7 +853,7 @@ func Test_ExtractorWrapper(t *testing.T) { for it.Next() { // Consume the iterator - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) } require.Equal(t, `sum(count_over_time({job="3"}[1m]))`, wrapper.query) @@ -888,7 +888,7 @@ func Test_ExtractorWrapper_disabled(t *testing.T) { for it.Next() { // Consume the iterator - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) } require.Equal(t, ``, wrapper.query) @@ -990,7 +990,7 @@ func Test_QueryWithDelete(t *testing.T) { var logs []string for it.Next() { - logs = append(logs, it.Entry().Line) + logs = append(logs, it.At().Line) } require.Equal(t, logs, []string{`msg="dispatcher_7"`}) @@ -1033,7 +1033,7 @@ func Test_QuerySampleWithDelete(t *testing.T) { var samples []float64 for it.Next() { - samples = append(samples, it.Sample().Value) + samples = append(samples, it.At().Value) } require.Equal(t, samples, []float64{1.}) diff --git a/pkg/ingester/stream_test.go b/pkg/ingester/stream_test.go index 68974ae016b3..3bbd091b25c5 100644 --- a/pkg/ingester/stream_test.go +++ b/pkg/ingester/stream_test.go @@ -456,8 +456,8 @@ func TestUnorderedPush(t *testing.T) { require.Nil(t, err) for _, x := range exp { require.Equal(t, true, sItr.Next()) - require.Equal(t, x.Timestamp, time.Unix(0, sItr.Sample().Timestamp)) - require.Equal(t, float64(1), sItr.Sample().Value) + require.Equal(t, x.Timestamp, time.Unix(0, sItr.At().Timestamp)) + require.Equal(t, float64(1), sItr.At().Value) } require.Equal(t, false, sItr.Next()) } @@ -599,8 +599,8 @@ func TestReplayAppendIgnoresValidityWindow(t *testing.T) { func iterEq(t *testing.T, exp []logproto.Entry, got iter.EntryIterator) { var i int for got.Next() { - require.Equal(t, exp[i].Timestamp, got.Entry().Timestamp, "failed on the (%d) ts", i) - require.Equal(t, exp[i].Line, got.Entry().Line) + require.Equal(t, exp[i].Timestamp, got.At().Timestamp, "failed on the (%d) ts", i) + require.Equal(t, exp[i].Line, got.At().Line) i++ } require.Equal(t, i, len(exp), "incorrect number of entries expected") diff --git a/pkg/iter/cache.go b/pkg/iter/cache.go index a6e12dffbce2..3066bdbb67b2 100644 --- a/pkg/iter/cache.go +++ b/pkg/iter/cache.go @@ -48,12 +48,12 @@ func (it *cachedIterator) consumeWrapped() bool { // we're done with the base iterator. if !ok { it.closeErr = it.Wrapped().Close() - it.iterErr = it.Wrapped().Error() + it.iterErr = it.Wrapped().Err() it.wrapped = nil return false } // we're caching entries - it.cache = append(it.cache, entryWithLabels{Entry: it.Wrapped().Entry(), labels: it.Wrapped().Labels(), streamHash: it.Wrapped().StreamHash()}) + it.cache = append(it.cache, entryWithLabels{Entry: it.Wrapped().At(), labels: it.Wrapped().Labels(), streamHash: it.Wrapped().StreamHash()}) it.curr++ return true } @@ -72,7 +72,7 @@ func (it *cachedIterator) Next() bool { return true } -func (it *cachedIterator) Entry() logproto.Entry { +func (it *cachedIterator) At() logproto.Entry { if len(it.cache) == 0 || it.curr < 0 || it.curr >= len(it.cache) { return logproto.Entry{} } @@ -94,7 +94,7 @@ func (it *cachedIterator) StreamHash() uint64 { return it.cache[it.curr].streamHash } -func (it *cachedIterator) Error() error { return it.iterErr } +func (it *cachedIterator) Err() error { return it.iterErr } func (it *cachedIterator) Close() error { it.Reset() @@ -145,12 +145,12 @@ func (it *cachedSampleIterator) consumeWrapped() bool { // we're done with the base iterator. if !ok { it.closeErr = it.Wrapped().Close() - it.iterErr = it.Wrapped().Error() + it.iterErr = it.Wrapped().Err() it.wrapped = nil return false } // we're caching entries - it.cache = append(it.cache, sampleWithLabels{Sample: it.Wrapped().Sample(), labels: it.Wrapped().Labels(), streamHash: it.Wrapped().StreamHash()}) + it.cache = append(it.cache, sampleWithLabels{Sample: it.Wrapped().At(), labels: it.Wrapped().Labels(), streamHash: it.Wrapped().StreamHash()}) it.curr++ return true } @@ -169,7 +169,7 @@ func (it *cachedSampleIterator) Next() bool { return true } -func (it *cachedSampleIterator) Sample() logproto.Sample { +func (it *cachedSampleIterator) At() logproto.Sample { if len(it.cache) == 0 || it.curr < 0 || it.curr >= len(it.cache) { return logproto.Sample{} } @@ -190,7 +190,7 @@ func (it *cachedSampleIterator) StreamHash() uint64 { return it.cache[it.curr].streamHash } -func (it *cachedSampleIterator) Error() error { return it.iterErr } +func (it *cachedSampleIterator) Err() error { return it.iterErr } func (it *cachedSampleIterator) Close() error { it.Reset() diff --git a/pkg/iter/cache_test.go b/pkg/iter/cache_test.go index 23ee9cb7d995..3d5c7340a43e 100644 --- a/pkg/iter/cache_test.go +++ b/pkg/iter/cache_test.go @@ -23,16 +23,16 @@ func Test_CachedIterator(t *testing.T) { assert := func() { require.Equal(t, "", c.Labels()) - require.Equal(t, logproto.Entry{}, c.Entry()) + require.Equal(t, logproto.Entry{}, c.At()) require.Equal(t, true, c.Next()) - require.Equal(t, stream.Entries[0], c.Entry()) + require.Equal(t, stream.Entries[0], c.At()) require.Equal(t, true, c.Next()) - require.Equal(t, stream.Entries[1], c.Entry()) + require.Equal(t, stream.Entries[1], c.At()) require.Equal(t, true, c.Next()) - require.Equal(t, stream.Entries[2], c.Entry()) + require.Equal(t, stream.Entries[2], c.At()) require.Equal(t, false, c.Next()) - require.NoError(t, c.Error()) - require.Equal(t, stream.Entries[2], c.Entry()) + require.NoError(t, c.Err()) + require.Equal(t, stream.Entries[2], c.At()) require.Equal(t, false, c.Next()) } @@ -45,30 +45,30 @@ func Test_CachedIterator(t *testing.T) { } func Test_EmptyCachedIterator(t *testing.T) { - c := NewCachedIterator(NoopIterator, 0) + c := NewCachedIterator(NoopEntryIterator, 0) require.Equal(t, "", c.Labels()) - require.Equal(t, logproto.Entry{}, c.Entry()) + require.Equal(t, logproto.Entry{}, c.At()) require.Equal(t, false, c.Next()) require.Equal(t, "", c.Labels()) - require.Equal(t, logproto.Entry{}, c.Entry()) + require.Equal(t, logproto.Entry{}, c.At()) require.Equal(t, nil, c.Close()) require.Equal(t, "", c.Labels()) - require.Equal(t, logproto.Entry{}, c.Entry()) + require.Equal(t, logproto.Entry{}, c.At()) require.Equal(t, false, c.Next()) require.Equal(t, "", c.Labels()) - require.Equal(t, logproto.Entry{}, c.Entry()) + require.Equal(t, logproto.Entry{}, c.At()) } func Test_ErrorCachedIterator(t *testing.T) { - c := NewCachedIterator(&errorIter{}, 0) + c := NewCachedIterator(ErrorEntryIterator, 0) require.Equal(t, false, c.Next()) require.Equal(t, "", c.Labels()) - require.Equal(t, logproto.Entry{}, c.Entry()) - require.Equal(t, errors.New("error"), c.Error()) + require.Equal(t, logproto.Entry{}, c.At()) + require.Equal(t, errors.New("error"), c.Err()) require.Equal(t, errors.New("close"), c.Close()) } @@ -84,19 +84,19 @@ func Test_CachedIteratorResetNotExhausted(t *testing.T) { c := NewCachedIterator(NewStreamIterator(stream), 3) require.Equal(t, true, c.Next()) - require.Equal(t, stream.Entries[0], c.Entry()) + require.Equal(t, stream.Entries[0], c.At()) require.Equal(t, true, c.Next()) - require.Equal(t, stream.Entries[1], c.Entry()) + require.Equal(t, stream.Entries[1], c.At()) c.Reset() require.Equal(t, true, c.Next()) - require.Equal(t, stream.Entries[0], c.Entry()) + require.Equal(t, stream.Entries[0], c.At()) require.Equal(t, true, c.Next()) - require.Equal(t, stream.Entries[1], c.Entry()) + require.Equal(t, stream.Entries[1], c.At()) require.Equal(t, true, c.Next()) - require.Equal(t, stream.Entries[2], c.Entry()) + require.Equal(t, stream.Entries[2], c.At()) require.Equal(t, false, c.Next()) - require.NoError(t, c.Error()) - require.Equal(t, stream.Entries[2], c.Entry()) + require.NoError(t, c.Err()) + require.Equal(t, stream.Entries[2], c.At()) require.Equal(t, false, c.Next()) // Close the iterator reset it to the beginning. @@ -114,14 +114,14 @@ func Test_CachedIteratorResetExhausted(t *testing.T) { c := NewCachedIterator(NewStreamIterator(stream), 3) require.Equal(t, true, c.Next()) - require.Equal(t, stream.Entries[0], c.Entry()) + require.Equal(t, stream.Entries[0], c.At()) require.Equal(t, true, c.Next()) - require.Equal(t, stream.Entries[1], c.Entry()) + require.Equal(t, stream.Entries[1], c.At()) c.Reset() require.Equal(t, true, c.Next()) - require.Equal(t, stream.Entries[0], c.Entry()) + require.Equal(t, stream.Entries[0], c.At()) require.Equal(t, true, c.Next()) - require.Equal(t, stream.Entries[1], c.Entry()) + require.Equal(t, stream.Entries[1], c.At()) require.Equal(t, false, c.Next()) // Close the iterator reset it to the beginning. @@ -141,16 +141,16 @@ func Test_CachedSampleIterator(t *testing.T) { assert := func() { require.Equal(t, "", c.Labels()) - require.Equal(t, logproto.Sample{}, c.Sample()) + require.Equal(t, logproto.Sample{}, c.At()) require.Equal(t, true, c.Next()) - require.Equal(t, series.Samples[0], c.Sample()) + require.Equal(t, series.Samples[0], c.At()) require.Equal(t, true, c.Next()) - require.Equal(t, series.Samples[1], c.Sample()) + require.Equal(t, series.Samples[1], c.At()) require.Equal(t, true, c.Next()) - require.Equal(t, series.Samples[2], c.Sample()) + require.Equal(t, series.Samples[2], c.At()) require.Equal(t, false, c.Next()) - require.NoError(t, c.Error()) - require.Equal(t, series.Samples[2], c.Sample()) + require.NoError(t, c.Err()) + require.Equal(t, series.Samples[2], c.At()) require.Equal(t, false, c.Next()) } @@ -174,19 +174,19 @@ func Test_CachedSampleIteratorResetNotExhausted(t *testing.T) { c := NewCachedSampleIterator(NewSeriesIterator(series), 3) require.Equal(t, true, c.Next()) - require.Equal(t, series.Samples[0], c.Sample()) + require.Equal(t, series.Samples[0], c.At()) require.Equal(t, true, c.Next()) - require.Equal(t, series.Samples[1], c.Sample()) + require.Equal(t, series.Samples[1], c.At()) c.Reset() require.Equal(t, true, c.Next()) - require.Equal(t, series.Samples[0], c.Sample()) + require.Equal(t, series.Samples[0], c.At()) require.Equal(t, true, c.Next()) - require.Equal(t, series.Samples[1], c.Sample()) + require.Equal(t, series.Samples[1], c.At()) require.Equal(t, true, c.Next()) - require.Equal(t, series.Samples[2], c.Sample()) + require.Equal(t, series.Samples[2], c.At()) require.Equal(t, false, c.Next()) - require.NoError(t, c.Error()) - require.Equal(t, series.Samples[2], c.Sample()) + require.NoError(t, c.Err()) + require.Equal(t, series.Samples[2], c.At()) require.Equal(t, false, c.Next()) // Close the iterator reset it to the beginning. @@ -204,14 +204,14 @@ func Test_CachedSampleIteratorResetExhausted(t *testing.T) { c := NewCachedSampleIterator(NewSeriesIterator(series), 3) require.Equal(t, true, c.Next()) - require.Equal(t, series.Samples[0], c.Sample()) + require.Equal(t, series.Samples[0], c.At()) require.Equal(t, true, c.Next()) - require.Equal(t, series.Samples[1], c.Sample()) + require.Equal(t, series.Samples[1], c.At()) c.Reset() require.Equal(t, true, c.Next()) - require.Equal(t, series.Samples[0], c.Sample()) + require.Equal(t, series.Samples[0], c.At()) require.Equal(t, true, c.Next()) - require.Equal(t, series.Samples[1], c.Sample()) + require.Equal(t, series.Samples[1], c.At()) require.Equal(t, false, c.Next()) // Close the iterator reset it to the beginning. @@ -219,39 +219,29 @@ func Test_CachedSampleIteratorResetExhausted(t *testing.T) { } func Test_EmptyCachedSampleIterator(t *testing.T) { - c := NewCachedSampleIterator(NoopIterator, 0) + c := NewCachedSampleIterator(NoopSampleIterator, 0) require.Equal(t, "", c.Labels()) - require.Equal(t, logproto.Sample{}, c.Sample()) + require.Equal(t, logproto.Sample{}, c.At()) require.Equal(t, false, c.Next()) require.Equal(t, "", c.Labels()) - require.Equal(t, logproto.Sample{}, c.Sample()) + require.Equal(t, logproto.Sample{}, c.At()) require.Equal(t, nil, c.Close()) require.Equal(t, "", c.Labels()) - require.Equal(t, logproto.Sample{}, c.Sample()) + require.Equal(t, logproto.Sample{}, c.At()) require.Equal(t, false, c.Next()) require.Equal(t, "", c.Labels()) - require.Equal(t, logproto.Sample{}, c.Sample()) + require.Equal(t, logproto.Sample{}, c.At()) } func Test_ErrorCachedSampleIterator(t *testing.T) { - c := NewCachedSampleIterator(&errorIter{}, 0) + c := NewCachedSampleIterator(ErrorSampleIterator, 0) require.Equal(t, false, c.Next()) require.Equal(t, "", c.Labels()) - require.Equal(t, logproto.Sample{}, c.Sample()) - require.Equal(t, errors.New("error"), c.Error()) + require.Equal(t, logproto.Sample{}, c.At()) + require.Equal(t, errors.New("error"), c.Err()) require.Equal(t, errors.New("close"), c.Close()) } - -type errorIter struct{} - -func (errorIter) Next() bool { return false } -func (errorIter) Error() error { return errors.New("error") } -func (errorIter) Labels() string { return "" } -func (errorIter) StreamHash() uint64 { return 0 } -func (errorIter) Entry() logproto.Entry { return logproto.Entry{} } -func (errorIter) Sample() logproto.Sample { return logproto.Sample{} } -func (errorIter) Close() error { return errors.New("close") } diff --git a/pkg/iter/categorized_labels_iterator.go b/pkg/iter/categorized_labels_iterator.go index c91aa4991116..81c1b8f00ac7 100644 --- a/pkg/iter/categorized_labels_iterator.go +++ b/pkg/iter/categorized_labels_iterator.go @@ -5,13 +5,12 @@ import ( "github.com/prometheus/prometheus/model/labels" - "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/logql/syntax" ) type categorizeLabelsIterator struct { EntryIterator - currEntry logproto.Entry + currStreamLabels string currHash uint64 currErr error @@ -28,8 +27,8 @@ func (c *categorizeLabelsIterator) Next() bool { return false } - c.currEntry = c.Entry() - if len(c.currEntry.StructuredMetadata) == 0 && len(c.currEntry.Parsed) == 0 { + currEntry := c.At() + if len(currEntry.StructuredMetadata) == 0 && len(currEntry.Parsed) == 0 { c.currStreamLabels = c.EntryIterator.Labels() c.currHash = c.EntryIterator.StreamHash() return true @@ -44,10 +43,10 @@ func (c *categorizeLabelsIterator) Next() bool { } builder := labels.NewBuilder(lbls) - for _, label := range c.currEntry.StructuredMetadata { + for _, label := range currEntry.StructuredMetadata { builder.Del(label.Name) } - for _, label := range c.currEntry.Parsed { + for _, label := range currEntry.Parsed { builder.Del(label.Name) } @@ -58,7 +57,7 @@ func (c *categorizeLabelsIterator) Next() bool { return true } -func (c *categorizeLabelsIterator) Error() error { +func (c *categorizeLabelsIterator) Err() error { return c.currErr } diff --git a/pkg/iter/categorized_labels_iterator_test.go b/pkg/iter/categorized_labels_iterator_test.go index 790ca5413aba..0043053fdd1c 100644 --- a/pkg/iter/categorized_labels_iterator_test.go +++ b/pkg/iter/categorized_labels_iterator_test.go @@ -127,8 +127,8 @@ func TestNewCategorizeLabelsIterator(t *testing.T) { streamsEntries := make(map[string][]logproto.Entry) for itr.Next() { - streamsEntries[itr.Labels()] = append(streamsEntries[itr.Labels()], itr.Entry()) - require.NoError(t, itr.Error()) + streamsEntries[itr.Labels()] = append(streamsEntries[itr.Labels()], itr.At()) + require.NoError(t, itr.Err()) } var streams []logproto.Stream diff --git a/pkg/iter/entry_iterator.go b/pkg/iter/entry_iterator.go index 43394fbfe133..b0b5a4635d1e 100644 --- a/pkg/iter/entry_iterator.go +++ b/pkg/iter/entry_iterator.go @@ -15,13 +15,6 @@ import ( "github.com/grafana/loki/v3/pkg/util/loser" ) -// EntryIterator iterates over entries in time-order. -type EntryIterator interface { - Iterator - Entry() logproto.Entry -} - -// streamIterator iterates over entries in a stream. type streamIterator struct { i int stream logproto.Stream @@ -40,7 +33,7 @@ func (i *streamIterator) Next() bool { return i.i < len(i.stream.Entries) } -func (i *streamIterator) Error() error { +func (i *streamIterator) Err() error { return nil } @@ -48,9 +41,11 @@ func (i *streamIterator) Labels() string { return i.stream.Labels } -func (i *streamIterator) StreamHash() uint64 { return i.stream.Hash } +func (i *streamIterator) StreamHash() uint64 { + return i.stream.Hash +} -func (i *streamIterator) Entry() logproto.Entry { +func (i *streamIterator) At() logproto.Entry { return i.stream.Entries[i.i] } @@ -92,7 +87,7 @@ func NewMergeEntryIterator(ctx context.Context, is []EntryIterator, direction lo } func (i *mergeEntryIterator) closeEntry(e EntryIterator) { - if err := e.Error(); err != nil { + if err := e.Err(); err != nil { i.errs = append(i.errs, err) } util.LogError("closing iterator", e.Close) @@ -129,7 +124,7 @@ func (i *mergeEntryIterator) fillBuffer() { // Entries with identical timestamp and line are removed as duplicates. for { next := i.tree.Winner() - entry := next.Entry() + entry := next.At() i.buffer = append(i.buffer, entryWithLabels{ Entry: entry, labels: next.Labels(), @@ -175,7 +170,7 @@ func (i *mergeEntryIterator) nextFromBuffer() { i.buffer = i.buffer[1:] } -func (i *mergeEntryIterator) Entry() logproto.Entry { +func (i *mergeEntryIterator) At() logproto.Entry { return i.currEntry.Entry } @@ -185,7 +180,7 @@ func (i *mergeEntryIterator) Labels() string { func (i *mergeEntryIterator) StreamHash() uint64 { return i.currEntry.streamHash } -func (i *mergeEntryIterator) Error() error { +func (i *mergeEntryIterator) Err() error { switch len(i.errs) { case 0: return nil @@ -199,7 +194,7 @@ func (i *mergeEntryIterator) Error() error { func (i *mergeEntryIterator) Close() error { i.tree.Close() i.buffer = nil - return i.Error() + return i.Err() } func (i *mergeEntryIterator) Peek() time.Time { @@ -232,7 +227,7 @@ type entrySortIterator struct { // When timestamp is equal, the iterator sorts samples by their label alphabetically. func NewSortEntryIterator(is []EntryIterator, direction logproto.Direction) EntryIterator { if len(is) == 0 { - return NoopIterator + return NoopEntryIterator } if len(is) == 1 { return is[0] @@ -265,7 +260,7 @@ type sortFields struct { func sortFieldsAt(i EntryIterator) sortFields { return sortFields{ - timeNanos: i.Entry().Timestamp.UnixNano(), + timeNanos: i.At().Timestamp.UnixNano(), labels: i.Labels(), streamHash: i.StreamHash(), } @@ -295,7 +290,7 @@ func lessDescending(e1, e2 sortFields) bool { } func (i *entrySortIterator) closeEntry(e EntryIterator) { - if err := e.Error(); err != nil { + if err := e.Err(); err != nil { i.errs = append(i.errs, err) } util.LogError("closing iterator", e.Close) @@ -307,13 +302,13 @@ func (i *entrySortIterator) Next() bool { return false } next := i.tree.Winner() - i.currEntry.Entry = next.Entry() + i.currEntry.Entry = next.At() i.currEntry.labels = next.Labels() i.currEntry.streamHash = next.StreamHash() return true } -func (i *entrySortIterator) Entry() logproto.Entry { +func (i *entrySortIterator) At() logproto.Entry { return i.currEntry.Entry } @@ -325,7 +320,7 @@ func (i *entrySortIterator) StreamHash() uint64 { return i.currEntry.streamHash } -func (i *entrySortIterator) Error() error { +func (i *entrySortIterator) Err() error { switch len(i.errs) { case 0: return nil @@ -338,7 +333,7 @@ func (i *entrySortIterator) Error() error { func (i *entrySortIterator) Close() error { i.tree.Close() - return i.Error() + return i.Err() } // NewStreamsIterator returns an iterator over logproto.Stream @@ -388,8 +383,8 @@ func (i *queryClientIterator) Next() bool { return true } -func (i *queryClientIterator) Entry() logproto.Entry { - return i.curr.Entry() +func (i *queryClientIterator) At() logproto.Entry { + return i.curr.At() } func (i *queryClientIterator) Labels() string { @@ -398,7 +393,7 @@ func (i *queryClientIterator) Labels() string { func (i *queryClientIterator) StreamHash() uint64 { return i.curr.StreamHash() } -func (i *queryClientIterator) Error() error { +func (i *queryClientIterator) Err() error { return i.err } @@ -435,8 +430,8 @@ func (i *nonOverlappingIterator) Next() bool { return true } -func (i *nonOverlappingIterator) Entry() logproto.Entry { - return i.curr.Entry() +func (i *nonOverlappingIterator) At() logproto.Entry { + return i.curr.At() } func (i *nonOverlappingIterator) Labels() string { @@ -453,11 +448,11 @@ func (i *nonOverlappingIterator) StreamHash() uint64 { return i.curr.StreamHash() } -func (i *nonOverlappingIterator) Error() error { +func (i *nonOverlappingIterator) Err() error { if i.curr == nil { return nil } - return i.curr.Error() + return i.curr.Err() } func (i *nonOverlappingIterator) Close() error { @@ -492,13 +487,13 @@ func (i *timeRangedIterator) Next() bool { i.EntryIterator.Close() return ok } - ts := i.EntryIterator.Entry().Timestamp + ts := i.EntryIterator.At().Timestamp for ok && i.mint.After(ts) { ok = i.EntryIterator.Next() if !ok { continue } - ts = i.EntryIterator.Entry().Timestamp + ts = i.EntryIterator.At().Timestamp } if ok { if ts.Equal(i.mint) { // The mint is inclusive @@ -537,7 +532,7 @@ func NewReversedIter(it EntryIterator, limit uint32, preload bool) (EntryIterato iter: it, entriesWithLabels: make([]entryWithLabels, 0, 1024), limit: limit, - }, it.Error() + }, it.Err() if err != nil { return nil, err } @@ -553,7 +548,7 @@ func (i *reverseIterator) load() { if !i.loaded { i.loaded = true for count := uint32(0); (i.limit == 0 || count < i.limit) && i.iter.Next(); count++ { - i.entriesWithLabels = append(i.entriesWithLabels, entryWithLabels{i.iter.Entry(), i.iter.Labels(), i.iter.StreamHash()}) + i.entriesWithLabels = append(i.entriesWithLabels, entryWithLabels{i.iter.At(), i.iter.Labels(), i.iter.StreamHash()}) } i.iter.Close() } @@ -569,7 +564,7 @@ func (i *reverseIterator) Next() bool { return true } -func (i *reverseIterator) Entry() logproto.Entry { +func (i *reverseIterator) At() logproto.Entry { return i.cur.Entry } @@ -581,7 +576,7 @@ func (i *reverseIterator) StreamHash() uint64 { return i.cur.streamHash } -func (i *reverseIterator) Error() error { return nil } +func (i *reverseIterator) Err() error { return nil } func (i *reverseIterator) Close() error { if !i.loaded { @@ -616,7 +611,7 @@ func NewEntryReversedIter(it EntryIterator) (EntryIterator, error) { iter, err := &reverseEntryIterator{ iter: it, buf: entryBufferPool.Get().(*entryBuffer), - }, it.Error() + }, it.Err() if err != nil { return nil, err } @@ -628,7 +623,7 @@ func (i *reverseEntryIterator) load() { if !i.loaded { i.loaded = true for i.iter.Next() { - i.buf.entries = append(i.buf.entries, entryWithLabels{i.iter.Entry(), i.iter.Labels(), i.iter.StreamHash()}) + i.buf.entries = append(i.buf.entries, entryWithLabels{i.iter.At(), i.iter.Labels(), i.iter.StreamHash()}) } i.iter.Close() } @@ -644,7 +639,7 @@ func (i *reverseEntryIterator) Next() bool { return true } -func (i *reverseEntryIterator) Entry() logproto.Entry { +func (i *reverseEntryIterator) At() logproto.Entry { return i.cur.Entry } @@ -656,7 +651,7 @@ func (i *reverseEntryIterator) StreamHash() uint64 { return i.cur.streamHash } -func (i *reverseEntryIterator) Error() error { return nil } +func (i *reverseEntryIterator) Err() error { return nil } func (i *reverseEntryIterator) release() { if i.buf == nil { @@ -687,7 +682,7 @@ func ReadBatch(i EntryIterator, size uint32) (*logproto.QueryResponse, uint32, e streamsCount int ) for ; respSize < size && i.Next(); respSize++ { - labels, hash, entry := i.Labels(), i.StreamHash(), i.Entry() + labels, hash, entry := i.Labels(), i.StreamHash(), i.At() mutatedStreams, ok := streams[hash] if !ok { mutatedStreams = map[string]*logproto.Stream{} @@ -713,7 +708,7 @@ func ReadBatch(i EntryIterator, size uint32) (*logproto.QueryResponse, uint32, e result.Streams = append(result.Streams, *s) } } - return &result, respSize, i.Error() + return &result, respSize, i.Err() } type peekingEntryIterator struct { @@ -737,7 +732,7 @@ func NewPeekingIterator(iter EntryIterator) PeekingEntryIterator { next := &entryWithLabels{} if iter.Next() { cache = &entryWithLabels{ - Entry: iter.Entry(), + Entry: iter.At(), labels: iter.Labels(), streamHash: iter.StreamHash(), } @@ -766,7 +761,7 @@ func (it *peekingEntryIterator) Next() bool { // cacheNext caches the next element if it exists. func (it *peekingEntryIterator) cacheNext() { if it.iter.Next() { - it.cache.Entry = it.iter.Entry() + it.cache.Entry = it.iter.At() it.cache.labels = it.iter.Labels() it.cache.streamHash = it.iter.StreamHash() return @@ -799,7 +794,7 @@ func (it *peekingEntryIterator) StreamHash() uint64 { } // Entry implements `EntryIterator` -func (it *peekingEntryIterator) Entry() logproto.Entry { +func (it *peekingEntryIterator) At() logproto.Entry { if it.next != nil { return it.next.Entry } @@ -807,8 +802,8 @@ func (it *peekingEntryIterator) Entry() logproto.Entry { } // Error implements `EntryIterator` -func (it *peekingEntryIterator) Error() error { - return it.iter.Error() +func (it *peekingEntryIterator) Err() error { + return it.iter.Err() } // Close implements `EntryIterator` diff --git a/pkg/iter/entry_iterator_test.go b/pkg/iter/entry_iterator_test.go index 3c64c01e296e..e49ecf3ee528 100644 --- a/pkg/iter/entry_iterator_test.go +++ b/pkg/iter/entry_iterator_test.go @@ -91,12 +91,12 @@ func TestIterator(t *testing.T) { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { for i := int64(0); i < tc.length; i++ { assert.Equal(t, true, tc.iterator.Next()) - assert.Equal(t, tc.generator(i), tc.iterator.Entry(), fmt.Sprintln("iteration", i)) + assert.Equal(t, tc.generator(i), tc.iterator.At(), fmt.Sprintln("iteration", i)) assert.Equal(t, tc.labels, tc.iterator.Labels(), fmt.Sprintln("iteration", i)) } assert.Equal(t, false, tc.iterator.Next()) - assert.Equal(t, nil, tc.iterator.Error()) + assert.Equal(t, nil, tc.iterator.Err()) assert.NoError(t, tc.iterator.Close()) }) } @@ -148,12 +148,12 @@ func TestIteratorMultipleLabels(t *testing.T) { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { for i := int64(0); i < tc.length; i++ { assert.Equal(t, true, tc.iterator.Next()) - assert.Equal(t, tc.generator(i), tc.iterator.Entry(), fmt.Sprintln("iteration", i)) + assert.Equal(t, tc.generator(i), tc.iterator.At(), fmt.Sprintln("iteration", i)) assert.Equal(t, tc.labels(i), tc.iterator.Labels(), fmt.Sprintln("iteration", i)) } assert.Equal(t, false, tc.iterator.Next()) - assert.Equal(t, nil, tc.iterator.Error()) + assert.Equal(t, nil, tc.iterator.Err()) assert.NoError(t, tc.iterator.Close()) }) } @@ -173,7 +173,7 @@ func TestMergeIteratorPrefetch(t *testing.T) { }, "prefetch on Next() when called as first method": func(t *testing.T, i HeapIterator) { assert.True(t, i.Next()) - assert.Equal(t, logproto.Entry{Timestamp: time.Unix(0, 0), Line: "0"}, i.Entry()) + assert.Equal(t, logproto.Entry{Timestamp: time.Unix(0, 0), Line: "0"}, i.At()) }, } @@ -268,18 +268,18 @@ func TestMergeIteratorDeduplication(t *testing.T) { j = length - 1 - i } require.True(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) require.Equal(t, bar.Labels, it.Labels()) - require.Equal(t, bar.Entries[j], it.Entry()) + require.Equal(t, bar.Entries[j], it.At()) require.True(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) require.Equal(t, foo.Labels, it.Labels()) - require.Equal(t, foo.Entries[j], it.Entry()) + require.Equal(t, foo.Entries[j], it.At()) } require.False(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) } // forward iteration it := NewMergeEntryIterator(context.Background(), []EntryIterator{ @@ -340,18 +340,18 @@ func TestMergeIteratorWithoutLabels(t *testing.T) { for i := 0; i < 3; i++ { require.True(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) require.Equal(t, bar.Labels, it.Labels()) - require.Equal(t, bar.Entries[i], it.Entry()) + require.Equal(t, bar.Entries[i], it.At()) require.True(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) require.Equal(t, foo.Labels, it.Labels()) - require.Equal(t, foo.Entries[i], it.Entry()) + require.Equal(t, foo.Entries[i], it.At()) } require.False(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) } func mustReverseStreamIterator(it EntryIterator) EntryIterator { @@ -372,15 +372,15 @@ func TestReverseIterator(t *testing.T) { for i := int64((testSize / 2) + 1); i <= testSize; i++ { assert.Equal(t, true, reversedIter.Next()) - assert.Equal(t, identity(i), reversedIter.Entry(), fmt.Sprintln("iteration", i)) + assert.Equal(t, identity(i), reversedIter.At(), fmt.Sprintln("iteration", i)) assert.Equal(t, reversedIter.Labels(), itr2.Labels()) assert.Equal(t, true, reversedIter.Next()) - assert.Equal(t, identity(i), reversedIter.Entry(), fmt.Sprintln("iteration", i)) + assert.Equal(t, identity(i), reversedIter.At(), fmt.Sprintln("iteration", i)) assert.Equal(t, reversedIter.Labels(), itr1.Labels()) } assert.Equal(t, false, reversedIter.Next()) - assert.Equal(t, nil, reversedIter.Error()) + assert.Equal(t, nil, reversedIter.Err()) assert.NoError(t, reversedIter.Close()) } @@ -392,12 +392,12 @@ func TestReverseEntryIterator(t *testing.T) { for i := int64(testSize - 1); i >= 0; i-- { assert.Equal(t, true, reversedIter.Next()) - assert.Equal(t, identity(i), reversedIter.Entry(), fmt.Sprintln("iteration", i)) + assert.Equal(t, identity(i), reversedIter.At(), fmt.Sprintln("iteration", i)) assert.Equal(t, reversedIter.Labels(), defaultLabels) } assert.Equal(t, false, reversedIter.Next()) - assert.Equal(t, nil, reversedIter.Error()) + assert.Equal(t, nil, reversedIter.Err()) assert.NoError(t, reversedIter.Close()) } @@ -443,7 +443,7 @@ func Test_PeekingIterator(t *testing.T) { if !hasNext { t.Fatal("should have next.") } - if iter.Entry().Timestamp.UnixNano() != 1 { + if iter.At().Timestamp.UnixNano() != 1 { t.Fatal("wrong peeked time.") } @@ -458,7 +458,7 @@ func Test_PeekingIterator(t *testing.T) { if !hasNext { t.Fatal("should have next.") } - if iter.Entry().Timestamp.UnixNano() != 2 { + if iter.At().Timestamp.UnixNano() != 2 { t.Fatal("wrong peeked time.") } _, peek, ok = iter.Peek() @@ -472,7 +472,7 @@ func Test_PeekingIterator(t *testing.T) { if !hasNext { t.Fatal("should have next.") } - if iter.Entry().Timestamp.UnixNano() != 3 { + if iter.At().Timestamp.UnixNano() != 3 { t.Fatal("wrong peeked time.") } _, _, ok = iter.Peek() @@ -671,11 +671,11 @@ type CloseTestingIterator struct { e logproto.Entry } -func (i *CloseTestingIterator) Next() bool { return true } -func (i *CloseTestingIterator) Entry() logproto.Entry { return i.e } -func (i *CloseTestingIterator) Labels() string { return "" } -func (i *CloseTestingIterator) StreamHash() uint64 { return 0 } -func (i *CloseTestingIterator) Error() error { return nil } +func (i *CloseTestingIterator) Next() bool { return true } +func (i *CloseTestingIterator) At() logproto.Entry { return i.e } +func (i *CloseTestingIterator) Labels() string { return "" } +func (i *CloseTestingIterator) StreamHash() uint64 { return 0 } +func (i *CloseTestingIterator) Err() error { return nil } func (i *CloseTestingIterator) Close() error { i.closed.Store(true) return nil @@ -730,7 +730,7 @@ func BenchmarkSortIterator(b *testing.B) { b.StartTimer() it := NewMergeEntryIterator(ctx, itrs, logproto.BACKWARD) for it.Next() { - it.Entry() + it.At() } it.Close() } @@ -749,7 +749,7 @@ func BenchmarkSortIterator(b *testing.B) { b.StartTimer() it := NewMergeEntryIterator(ctx, itrs, logproto.BACKWARD) for it.Next() { - it.Entry() + it.At() } it.Close() } @@ -767,7 +767,7 @@ func BenchmarkSortIterator(b *testing.B) { b.StartTimer() it := NewSortEntryIterator(itrs, logproto.BACKWARD) for it.Next() { - it.Entry() + it.At() } it.Close() } @@ -799,7 +799,7 @@ func Test_EntrySortIterator(t *testing.T) { var i int64 = 5 defer it.Close() for it.Next() { - require.Equal(t, time.Unix(0, i), it.Entry().Timestamp) + require.Equal(t, time.Unix(0, i), it.At().Timestamp) i-- } }) @@ -827,7 +827,7 @@ func Test_EntrySortIterator(t *testing.T) { var i int64 defer it.Close() for it.Next() { - require.Equal(t, time.Unix(0, i), it.Entry().Timestamp) + require.Equal(t, time.Unix(0, i), it.At().Timestamp) i++ } }) @@ -855,13 +855,13 @@ func Test_EntrySortIterator(t *testing.T) { }, logproto.FORWARD) // The first entry appears in both so we expect it to be sorted by Labels. require.True(t, it.Next()) - require.Equal(t, time.Unix(0, 0), it.Entry().Timestamp) + require.Equal(t, time.Unix(0, 0), it.At().Timestamp) require.Equal(t, `a`, it.Labels()) var i int64 defer it.Close() for it.Next() { - require.Equal(t, time.Unix(0, i), it.Entry().Timestamp) + require.Equal(t, time.Unix(0, i), it.At().Timestamp) i++ } }) @@ -906,17 +906,17 @@ func TestDedupeMergeEntryIterator(t *testing.T) { }), }, logproto.FORWARD) require.True(t, it.Next()) - lines := []string{it.Entry().Line} - require.Equal(t, time.Unix(1, 0), it.Entry().Timestamp) + lines := []string{it.At().Line} + require.Equal(t, time.Unix(1, 0), it.At().Timestamp) require.True(t, it.Next()) - lines = append(lines, it.Entry().Line) - require.Equal(t, time.Unix(1, 0), it.Entry().Timestamp) + lines = append(lines, it.At().Line) + require.Equal(t, time.Unix(1, 0), it.At().Timestamp) require.True(t, it.Next()) - lines = append(lines, it.Entry().Line) - require.Equal(t, time.Unix(1, 0), it.Entry().Timestamp) + lines = append(lines, it.At().Line) + require.Equal(t, time.Unix(1, 0), it.At().Timestamp) require.True(t, it.Next()) - lines = append(lines, it.Entry().Line) - require.Equal(t, time.Unix(2, 0), it.Entry().Timestamp) + lines = append(lines, it.At().Line) + require.Equal(t, time.Unix(2, 0), it.At().Timestamp) // Two orderings are consistent with the inputs. if lines[0] == "1" { require.Equal(t, []string{"1", "0", "2", "3"}, lines) diff --git a/pkg/iter/iterator.go b/pkg/iter/iterator.go index 61c727428c71..4b4ef03cb922 100644 --- a/pkg/iter/iterator.go +++ b/pkg/iter/iterator.go @@ -1,28 +1,50 @@ package iter -import "github.com/grafana/loki/v3/pkg/logproto" +import ( + "errors" -// Iterator iterates over data in time-order. -type Iterator interface { - // Returns true if there is more data to iterate. - Next() bool + v2 "github.com/grafana/loki/v3/pkg/iter/v2" + "github.com/grafana/loki/v3/pkg/logproto" +) + +type Value interface { + logproto.Entry | logproto.Sample +} + +type StreamIterator[T Value] interface { + v2.CloseIterator[T] // Labels returns the labels for the current entry. // The labels can be mutated by the query engine and not reflect the original stream. Labels() string // StreamHash returns the hash of the original stream for the current entry. StreamHash() uint64 - Error() error - Close() error } -type noOpIterator struct{} +type EntryIterator StreamIterator[logproto.Entry] +type SampleIterator StreamIterator[logproto.Sample] + +// noOpIterator implements StreamIterator +type noOpIterator[T Value] struct{} + +func (noOpIterator[T]) Next() bool { return false } +func (noOpIterator[T]) Err() error { return nil } +func (noOpIterator[T]) At() (zero T) { return zero } +func (noOpIterator[T]) Labels() string { return "" } +func (noOpIterator[T]) StreamHash() uint64 { return 0 } +func (noOpIterator[T]) Close() error { return nil } + +var NoopEntryIterator = noOpIterator[logproto.Entry]{} +var NoopSampleIterator = noOpIterator[logproto.Sample]{} + +// errorIterator implements StreamIterator +type errorIterator[T Value] struct{} -var NoopIterator = noOpIterator{} +func (errorIterator[T]) Next() bool { return false } +func (errorIterator[T]) Err() error { return errors.New("error") } +func (errorIterator[T]) At() (zero T) { return zero } +func (errorIterator[T]) Labels() string { return "" } +func (errorIterator[T]) StreamHash() uint64 { return 0 } +func (errorIterator[T]) Close() error { return errors.New("close") } -func (noOpIterator) Next() bool { return false } -func (noOpIterator) Error() error { return nil } -func (noOpIterator) Labels() string { return "" } -func (noOpIterator) StreamHash() uint64 { return 0 } -func (noOpIterator) Entry() logproto.Entry { return logproto.Entry{} } -func (noOpIterator) Sample() logproto.Sample { return logproto.Sample{} } -func (noOpIterator) Close() error { return nil } +var ErrorEntryIterator = errorIterator[logproto.Entry]{} +var ErrorSampleIterator = errorIterator[logproto.Sample]{} diff --git a/pkg/iter/sample_iterator.go b/pkg/iter/sample_iterator.go index be55678ce628..cac1cac6c83e 100644 --- a/pkg/iter/sample_iterator.go +++ b/pkg/iter/sample_iterator.go @@ -13,14 +13,6 @@ import ( "github.com/grafana/loki/v3/pkg/util" ) -// SampleIterator iterates over samples in time-order. -type SampleIterator interface { - Iterator - // todo(ctovena) we should add `Seek(t int64) bool` - // This way we can skip when ranging over samples. - Sample() logproto.Sample -} - // PeekingSampleIterator is a sample iterator that can peek sample without moving the current sample. type PeekingSampleIterator interface { SampleIterator @@ -46,7 +38,7 @@ func NewPeekingSampleIterator(iter SampleIterator) PeekingSampleIterator { next := &sampleWithLabels{} if iter.Next() { cache = &sampleWithLabels{ - Sample: iter.Sample(), + Sample: iter.At(), labels: iter.Labels(), streamHash: iter.StreamHash(), } @@ -92,7 +84,7 @@ func (it *peekingSampleIterator) Next() bool { // cacheNext caches the next element if it exists. func (it *peekingSampleIterator) cacheNext() { if it.iter.Next() { - it.cache.Sample = it.iter.Sample() + it.cache.Sample = it.iter.At() it.cache.labels = it.iter.Labels() it.cache.streamHash = it.iter.StreamHash() return @@ -101,7 +93,7 @@ func (it *peekingSampleIterator) cacheNext() { it.cache = nil } -func (it *peekingSampleIterator) Sample() logproto.Sample { +func (it *peekingSampleIterator) At() logproto.Sample { if it.next != nil { return it.next.Sample } @@ -115,8 +107,8 @@ func (it *peekingSampleIterator) Peek() (string, logproto.Sample, bool) { return "", logproto.Sample{}, false } -func (it *peekingSampleIterator) Error() error { - return it.iter.Error() +func (it *peekingSampleIterator) Err() error { + return it.iter.Err() } type SampleIteratorHeap struct { @@ -144,7 +136,7 @@ func (h *SampleIteratorHeap) Pop() interface{} { } func (h SampleIteratorHeap) Less(i, j int) bool { - s1, s2 := h.its[i].Sample(), h.its[j].Sample() + s1, s2 := h.its[i].At(), h.its[j].At() if s1.Timestamp == s2.Timestamp { if h.its[i].StreamHash() == 0 { return h.its[i].Labels() < h.its[j].Labels() @@ -218,7 +210,7 @@ func (i *mergeSampleIterator) requeue(ei SampleIterator, advanced bool) { return } - if err := ei.Error(); err != nil { + if err := ei.Err(); err != nil { i.errs = append(i.errs, err) } util.LogError("closing iterator", ei.Close) @@ -238,7 +230,7 @@ func (i *mergeSampleIterator) Next() bool { // shortcut for the last iterator. if i.heap.Len() == 1 { - i.curr.Sample = i.heap.Peek().Sample() + i.curr.Sample = i.heap.Peek().At() i.curr.labels = i.heap.Peek().Labels() i.curr.streamHash = i.heap.Peek().StreamHash() if !i.heap.Peek().Next() { @@ -254,7 +246,7 @@ func (i *mergeSampleIterator) Next() bool { Outer: for i.heap.Len() > 0 { next := i.heap.Peek() - sample := next.Sample() + sample := next.At() if len(i.buffer) > 0 && (i.buffer[0].streamHash != next.StreamHash() || i.buffer[0].Timestamp != sample.Timestamp) { break } @@ -280,7 +272,7 @@ Outer: if !next.Next() { continue Outer } - sample := next.Sample() + sample := next.At() if next.StreamHash() != i.buffer[0].streamHash || sample.Timestamp != i.buffer[0].Timestamp { break @@ -321,7 +313,7 @@ func (i *mergeSampleIterator) nextFromBuffer() { i.buffer = i.buffer[1:] } -func (i *mergeSampleIterator) Sample() logproto.Sample { +func (i *mergeSampleIterator) At() logproto.Sample { return i.curr.Sample } @@ -333,7 +325,7 @@ func (i *mergeSampleIterator) StreamHash() uint64 { return i.curr.streamHash } -func (i *mergeSampleIterator) Error() error { +func (i *mergeSampleIterator) Err() error { switch len(i.errs) { case 0: return nil @@ -370,7 +362,7 @@ type sortSampleIterator struct { // When timestamp is equal, the iterator sorts samples by their label alphabetically. func NewSortSampleIterator(is []SampleIterator) SampleIterator { if len(is) == 0 { - return NoopIterator + return NoopSampleIterator } if len(is) == 1 { return is[0] @@ -397,7 +389,7 @@ func (i *sortSampleIterator) init() { continue } - if err := it.Error(); err != nil { + if err := it.Err(); err != nil { i.errs = append(i.errs, err) } util.LogError("closing iterator", it.Close) @@ -417,13 +409,13 @@ func (i *sortSampleIterator) Next() bool { } next := i.heap.Peek() - i.curr.Sample = next.Sample() + i.curr.Sample = next.At() i.curr.labels = next.Labels() i.curr.streamHash = next.StreamHash() // if the top iterator is empty, we remove it. if !next.Next() { heap.Pop(i.heap) - if err := next.Error(); err != nil { + if err := next.Err(); err != nil { i.errs = append(i.errs, err) } util.LogError("closing iterator", next.Close) @@ -435,7 +427,7 @@ func (i *sortSampleIterator) Next() bool { return true } -func (i *sortSampleIterator) Sample() logproto.Sample { +func (i *sortSampleIterator) At() logproto.Sample { return i.curr.Sample } @@ -447,7 +439,7 @@ func (i *sortSampleIterator) StreamHash() uint64 { return i.curr.streamHash } -func (i *sortSampleIterator) Error() error { +func (i *sortSampleIterator) Err() error { switch len(i.errs) { case 0: return nil @@ -505,8 +497,8 @@ func (i *sampleQueryClientIterator) Next() bool { return true } -func (i *sampleQueryClientIterator) Sample() logproto.Sample { - return i.curr.Sample() +func (i *sampleQueryClientIterator) At() logproto.Sample { + return i.curr.At() } func (i *sampleQueryClientIterator) Labels() string { @@ -517,7 +509,7 @@ func (i *sampleQueryClientIterator) StreamHash() uint64 { return i.curr.StreamHash() } -func (i *sampleQueryClientIterator) Error() error { +func (i *sampleQueryClientIterator) Err() error { return i.err } @@ -587,7 +579,7 @@ func (i *seriesIterator) Next() bool { return i.i < len(i.series.Samples) } -func (i *seriesIterator) Error() error { +func (i *seriesIterator) Err() error { return nil } @@ -599,7 +591,7 @@ func (i *seriesIterator) StreamHash() uint64 { return i.series.StreamHash } -func (i *seriesIterator) Sample() logproto.Sample { +func (i *seriesIterator) At() logproto.Sample { return i.series.Samples[i.i] } @@ -638,8 +630,8 @@ func (i *nonOverlappingSampleIterator) Next() bool { return true } -func (i *nonOverlappingSampleIterator) Sample() logproto.Sample { - return i.curr.Sample() +func (i *nonOverlappingSampleIterator) At() logproto.Sample { + return i.curr.At() } func (i *nonOverlappingSampleIterator) Labels() string { @@ -656,11 +648,11 @@ func (i *nonOverlappingSampleIterator) StreamHash() uint64 { return i.curr.StreamHash() } -func (i *nonOverlappingSampleIterator) Error() error { +func (i *nonOverlappingSampleIterator) Err() error { if i.curr == nil { return nil } - return i.curr.Error() + return i.curr.Err() } func (i *nonOverlappingSampleIterator) Close() error { @@ -694,13 +686,13 @@ func (i *timeRangedSampleIterator) Next() bool { i.SampleIterator.Close() return ok } - ts := i.SampleIterator.Sample().Timestamp + ts := i.SampleIterator.At().Timestamp for ok && i.mint > ts { ok = i.SampleIterator.Next() if !ok { continue } - ts = i.SampleIterator.Sample().Timestamp + ts = i.SampleIterator.At().Timestamp } if ok { if ts == i.mint { // The mint is inclusive @@ -724,7 +716,7 @@ func ReadSampleBatch(i SampleIterator, size uint32) (*logproto.SampleQueryRespon seriesCount int ) for ; respSize < size && i.Next(); respSize++ { - labels, hash, sample := i.Labels(), i.StreamHash(), i.Sample() + labels, hash, sample := i.Labels(), i.StreamHash(), i.At() streams, ok := series[hash] if !ok { streams = map[string]*logproto.Series{} @@ -750,5 +742,5 @@ func ReadSampleBatch(i SampleIterator, size uint32) (*logproto.SampleQueryRespon result.Series = append(result.Series, *s) } } - return &result, respSize, i.Error() + return &result, respSize, i.Err() } diff --git a/pkg/iter/sample_iterator_test.go b/pkg/iter/sample_iterator_test.go index da3113c547e6..2f37d40c1c2a 100644 --- a/pkg/iter/sample_iterator_test.go +++ b/pkg/iter/sample_iterator_test.go @@ -43,7 +43,7 @@ func TestNewPeekingSampleIterator(t *testing.T) { if !hasNext { t.Fatal("should have next.") } - if iter.Sample().Timestamp != 1 { + if iter.At().Timestamp != 1 { t.Fatal("wrong peeked time.") } @@ -58,7 +58,7 @@ func TestNewPeekingSampleIterator(t *testing.T) { if !hasNext { t.Fatal("should have next.") } - if iter.Sample().Timestamp != 2 { + if iter.At().Timestamp != 2 { t.Fatal("wrong peeked time.") } _, peek, ok = iter.Peek() @@ -72,7 +72,7 @@ func TestNewPeekingSampleIterator(t *testing.T) { if !hasNext { t.Fatal("should have next.") } - if iter.Sample().Timestamp != 3 { + if iter.At().Timestamp != 3 { t.Fatal("wrong peeked time.") } _, _, ok = iter.Peek() @@ -80,7 +80,7 @@ func TestNewPeekingSampleIterator(t *testing.T) { t.Fatal("should not be ok.") } require.NoError(t, iter.Close()) - require.NoError(t, iter.Error()) + require.NoError(t, iter.Err()) } func sample(i int) logproto.Sample { @@ -123,13 +123,13 @@ func TestNewMergeSampleIterator(t *testing.T) { for i := 1; i < 4; i++ { require.True(t, it.Next(), i) require.Equal(t, `{foo="car"}`, it.Labels(), i) - require.Equal(t, sample(i), it.Sample(), i) + require.Equal(t, sample(i), it.At(), i) require.True(t, it.Next(), i) require.Equal(t, `{foo="var"}`, it.Labels(), i) - require.Equal(t, sample(i), it.Sample(), i) + require.Equal(t, sample(i), it.At(), i) } require.False(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) require.NoError(t, it.Close()) }) t.Run("no labels", func(t *testing.T) { @@ -169,13 +169,13 @@ func TestNewMergeSampleIterator(t *testing.T) { for i := 1; i < 4; i++ { require.True(t, it.Next(), i) require.Equal(t, ``, it.Labels(), i) - require.Equal(t, sample(i), it.Sample(), i) + require.Equal(t, sample(i), it.At(), i) require.True(t, it.Next(), i) require.Equal(t, ``, it.Labels(), i) - require.Equal(t, sample(i), it.Sample(), i) + require.Equal(t, sample(i), it.At(), i) } require.False(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) require.NoError(t, it.Close()) }) } @@ -208,15 +208,15 @@ func TestNewSampleQueryClientIterator(t *testing.T) { for i := 1; i < 4; i++ { require.True(t, it.Next(), i) require.Equal(t, `{foo="var"}`, it.Labels(), i) - require.Equal(t, sample(i), it.Sample(), i) + require.Equal(t, sample(i), it.At(), i) } for i := 1; i < 4; i++ { require.True(t, it.Next(), i) require.Equal(t, `{foo="car"}`, it.Labels(), i) - require.Equal(t, sample(i), it.Sample(), i) + require.Equal(t, sample(i), it.At(), i) } require.False(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) require.NoError(t, it.Close()) } @@ -232,10 +232,10 @@ func TestNewNonOverlappingSampleIterator(t *testing.T) { for i := 1; i < 6; i++ { require.True(t, it.Next(), i) require.Equal(t, `{foo="var"}`, it.Labels(), i) - require.Equal(t, sample(i), it.Sample(), i) + require.Equal(t, sample(i), it.At(), i) } require.False(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) require.NoError(t, it.Close()) } @@ -256,11 +256,11 @@ type CloseTestingSmplIterator struct { s logproto.Sample } -func (i *CloseTestingSmplIterator) Next() bool { return true } -func (i *CloseTestingSmplIterator) Sample() logproto.Sample { return i.s } -func (i *CloseTestingSmplIterator) StreamHash() uint64 { return 0 } -func (i *CloseTestingSmplIterator) Labels() string { return "" } -func (i *CloseTestingSmplIterator) Error() error { return nil } +func (i *CloseTestingSmplIterator) Next() bool { return true } +func (i *CloseTestingSmplIterator) At() logproto.Sample { return i.s } +func (i *CloseTestingSmplIterator) StreamHash() uint64 { return 0 } +func (i *CloseTestingSmplIterator) Labels() string { return "" } +func (i *CloseTestingSmplIterator) Err() error { return nil } func (i *CloseTestingSmplIterator) Close() error { i.closed.Store(true) return nil @@ -287,8 +287,7 @@ func TestSampleIteratorWithClose_CloseIdempotent(t *testing.T) { c++ return nil } - ni := noOpIterator{} - it := SampleIteratorWithClose(ni, closeFn) + it := SampleIteratorWithClose(NoopSampleIterator, closeFn) // Multiple calls to close should result in c only ever having been incremented one time from 0 to 1 err := it.Close() assert.NoError(t, err) @@ -301,25 +300,16 @@ func TestSampleIteratorWithClose_CloseIdempotent(t *testing.T) { assert.EqualValues(t, 1, c) } -type alwaysErrorIterator struct { - noOpIterator -} - -func (alwaysErrorIterator) Close() error { - return errors.New("i always error") -} - func TestSampleIteratorWithClose_ReturnsError(t *testing.T) { closeFn := func() error { return errors.New("i broke") } - ei := alwaysErrorIterator{} - it := SampleIteratorWithClose(ei, closeFn) + it := SampleIteratorWithClose(ErrorSampleIterator, closeFn) err := it.Close() // Verify that a proper multi error is returned when both the iterator and the close function return errors if me, ok := err.(util.MultiError); ok { assert.True(t, len(me) == 2, "Expected 2 errors, one from the iterator and one from the close function") - assert.EqualError(t, me[0], "i always error") + assert.EqualError(t, me[0], "close") assert.EqualError(t, me[1], "i broke") } else { t.Error("Expected returned error to be of type util.MultiError") @@ -362,7 +352,7 @@ func BenchmarkSortSampleIterator(b *testing.B) { b.StartTimer() it := NewMergeSampleIterator(ctx, itrs) for it.Next() { - it.Sample() + it.At() } it.Close() } @@ -378,7 +368,7 @@ func BenchmarkSortSampleIterator(b *testing.B) { b.StartTimer() it := NewSortSampleIterator(itrs) for it.Next() { - it.Sample() + it.At() } it.Close() } @@ -410,7 +400,7 @@ func Test_SampleSortIterator(t *testing.T) { var i int64 defer it.Close() for it.Next() { - require.Equal(t, i, it.Sample().Timestamp) + require.Equal(t, i, it.At().Timestamp) i++ } }) @@ -439,13 +429,13 @@ func Test_SampleSortIterator(t *testing.T) { // The first entry appears in both so we expect it to be sorted by Labels. require.True(t, it.Next()) - require.Equal(t, int64(0), it.Sample().Timestamp) + require.Equal(t, int64(0), it.At().Timestamp) require.Equal(t, `a`, it.Labels()) var i int64 defer it.Close() for it.Next() { - require.Equal(t, i, it.Sample().Timestamp) + require.Equal(t, i, it.At().Timestamp) i++ } }) @@ -489,15 +479,15 @@ func TestDedupeMergeSampleIterator(t *testing.T) { }) require.True(t, it.Next()) - require.Equal(t, time.Unix(1, 0).UnixNano(), it.Sample().Timestamp) - require.Equal(t, 1., it.Sample().Value) - require.Equal(t, xxhash.Sum64String("1"), it.Sample().Hash) + require.Equal(t, time.Unix(1, 0).UnixNano(), it.At().Timestamp) + require.Equal(t, 1., it.At().Value) + require.Equal(t, xxhash.Sum64String("1"), it.At().Hash) require.True(t, it.Next()) - require.Equal(t, time.Unix(1, 0).UnixNano(), it.Sample().Timestamp) - require.Equal(t, 1., it.Sample().Value) - require.Equal(t, xxhash.Sum64String("2"), it.Sample().Hash) + require.Equal(t, time.Unix(1, 0).UnixNano(), it.At().Timestamp) + require.Equal(t, 1., it.At().Value) + require.Equal(t, xxhash.Sum64String("2"), it.At().Hash) require.True(t, it.Next()) - require.Equal(t, time.Unix(2, 0).UnixNano(), it.Sample().Timestamp) - require.Equal(t, 1., it.Sample().Value) - require.Equal(t, xxhash.Sum64String("3"), it.Sample().Hash) + require.Equal(t, time.Unix(2, 0).UnixNano(), it.At().Timestamp) + require.Equal(t, 1., it.At().Value) + require.Equal(t, xxhash.Sum64String("3"), it.At().Hash) } diff --git a/pkg/storage/bloom/v1/dedupe.go b/pkg/iter/v2/dedupe.go similarity index 95% rename from pkg/storage/bloom/v1/dedupe.go rename to pkg/iter/v2/dedupe.go index 2e1a7cca42f3..7ea73480ca67 100644 --- a/pkg/storage/bloom/v1/dedupe.go +++ b/pkg/iter/v2/dedupe.go @@ -1,4 +1,4 @@ -package v1 +package v2 // DedupeIter is a deduplicating iterator which creates an Iterator[B] // from a sequence of Iterator[A]. @@ -6,7 +6,7 @@ type DedupeIter[A, B any] struct { eq func(A, B) bool // equality check from func(A) B // convert A to B, used on first element merge func(A, B) B // merge A into B - itr PeekingIterator[A] + itr PeekIterator[A] tmp B } @@ -18,7 +18,7 @@ func NewDedupingIter[A, B any]( eq func(A, B) bool, from func(A) B, merge func(A, B) B, - itr PeekingIterator[A], + itr PeekIterator[A], ) *DedupeIter[A, B] { return &DedupeIter[A, B]{ eq: eq, diff --git a/pkg/iter/v2/interface.go b/pkg/iter/v2/interface.go new file mode 100644 index 000000000000..05aee30f390f --- /dev/null +++ b/pkg/iter/v2/interface.go @@ -0,0 +1,64 @@ +package v2 + +// Iterator is the basic iterator type with the common functions for advancing +// and retrieving the current value. +// +// General usage of the iterator: +// +// for it.Next() { +// curr := it.At() +// // do something +// } +// if it.Err() != nil { +// // do something +// } +type Iterator[T any] interface { + Next() bool + Err() error + At() T +} + +// Iterators with one single added functionality. + +type SizedIterator[T any] interface { + Iterator[T] + Remaining() int // remaining +} + +type PeekIterator[T any] interface { + Iterator[T] + Peek() (T, bool) +} + +type SeekIterator[K, V any] interface { + Iterator[V] + Seek(K) error +} + +type CloseIterator[T any] interface { + Iterator[T] + Close() error +} + +type CountIterator[T any] interface { + Iterator[T] + Count() int +} + +type ResetIterator[T any] interface { + Reset() error + Iterator[T] +} + +// Iterators which are an intersection type of two or more iterators with a +// single added functionality. + +type PeekCloseIterator[T any] interface { + PeekIterator[T] + CloseIterator[T] +} + +type CloseResetIterator[T any] interface { + CloseIterator[T] + ResetIterator[T] +} diff --git a/pkg/iter/v2/iter.go b/pkg/iter/v2/iter.go new file mode 100644 index 000000000000..45e17fcad5ac --- /dev/null +++ b/pkg/iter/v2/iter.go @@ -0,0 +1,228 @@ +package v2 + +import ( + "context" + "io" +) + +type PeekIter[T any] struct { + itr Iterator[T] + + // the first call to Next() will populate cur & next + init bool + zero T // zero value of T for returning empty Peek's + cur, next *T +} + +func NewPeekIter[T any](itr Iterator[T]) *PeekIter[T] { + return &PeekIter[T]{itr: itr} +} + +// populates the first element so Peek can be used and subsequent Next() +// calls will work as expected +func (it *PeekIter[T]) ensureInit() { + if it.init { + return + } + if it.itr.Next() { + at := it.itr.At() + it.next = &at + } + it.init = true +} + +// load the next element and return the cached one +func (it *PeekIter[T]) cacheNext() { + it.cur = it.next + if it.cur != nil && it.itr.Next() { + at := it.itr.At() + it.next = &at + } else { + it.next = nil + } +} + +func (it *PeekIter[T]) Next() bool { + it.ensureInit() + it.cacheNext() + return it.cur != nil +} + +func (it *PeekIter[T]) Peek() (T, bool) { + it.ensureInit() + if it.next == nil { + return it.zero, false + } + return *it.next, true +} + +func (it *PeekIter[T]) Err() error { + return it.itr.Err() +} + +func (it *PeekIter[T]) At() T { + return *it.cur +} + +type SliceIter[T any] struct { + cur int + xs []T +} + +func NewSliceIter[T any](xs []T) *SliceIter[T] { + return &SliceIter[T]{xs: xs, cur: -1} +} + +func (it *SliceIter[T]) Remaining() int { + return max(0, len(it.xs)-(it.cur+1)) +} + +func (it *SliceIter[T]) Next() bool { + it.cur++ + return it.cur < len(it.xs) +} + +func (it *SliceIter[T]) Err() error { + return nil +} + +func (it *SliceIter[T]) At() T { + return it.xs[it.cur] +} + +type MapIter[A any, B any] struct { + Iterator[A] + f func(A) B +} + +func NewMapIter[A any, B any](src Iterator[A], f func(A) B) *MapIter[A, B] { + return &MapIter[A, B]{Iterator: src, f: f} +} + +func (it *MapIter[A, B]) At() B { + return it.f(it.Iterator.At()) +} + +type EmptyIter[T any] struct { + zero T +} + +func (it *EmptyIter[T]) Next() bool { + return false +} + +func (it *EmptyIter[T]) Err() error { + return nil +} + +func (it *EmptyIter[T]) At() T { + return it.zero +} + +func (it *EmptyIter[T]) Peek() (T, bool) { + return it.zero, false +} + +func (it *EmptyIter[T]) Remaining() int { + return 0 +} + +// noop +func (it *EmptyIter[T]) Reset() {} + +func NewEmptyIter[T any]() *EmptyIter[T] { + return &EmptyIter[T]{} +} + +type CancellableIter[T any] struct { + ctx context.Context + Iterator[T] +} + +func (cii *CancellableIter[T]) Next() bool { + select { + case <-cii.ctx.Done(): + return false + default: + return cii.Iterator.Next() + } +} + +func (cii *CancellableIter[T]) Err() error { + if err := cii.ctx.Err(); err != nil { + return err + } + return cii.Iterator.Err() +} + +func NewCancelableIter[T any](ctx context.Context, itr Iterator[T]) *CancellableIter[T] { + return &CancellableIter[T]{ctx: ctx, Iterator: itr} +} + +func NewCloseableIterator[T io.Closer](itr Iterator[T]) *CloseIter[T] { + return &CloseIter[T]{itr} +} + +type CloseIter[T io.Closer] struct { + Iterator[T] +} + +func (i *CloseIter[T]) Close() error { + return i.At().Close() +} + +type PeekCloseIter[T any] struct { + *PeekIter[T] + close func() error +} + +func NewPeekCloseIter[T any](itr CloseIterator[T]) *PeekCloseIter[T] { + return &PeekCloseIter[T]{PeekIter: NewPeekIter[T](itr), close: itr.Close} +} + +func (it *PeekCloseIter[T]) Close() error { + return it.close() +} + +type Predicate[T any] func(T) bool + +func NewFilterIter[T any](it Iterator[T], p Predicate[T]) *FilterIter[T] { + return &FilterIter[T]{ + Iterator: it, + match: p, + } +} + +type FilterIter[T any] struct { + Iterator[T] + match Predicate[T] +} + +func (i *FilterIter[T]) Next() bool { + hasNext := i.Iterator.Next() + for hasNext && !i.match(i.Iterator.At()) { + hasNext = i.Iterator.Next() + } + return hasNext +} + +type CounterIter[T any] struct { + Iterator[T] // the underlying iterator + count int +} + +func NewCounterIter[T any](itr Iterator[T]) *CounterIter[T] { + return &CounterIter[T]{Iterator: itr} +} + +func (it *CounterIter[T]) Next() bool { + if it.Iterator.Next() { + it.count++ + return true + } + return false +} + +func (it *CounterIter[T]) Count() int { + return it.count +} diff --git a/pkg/storage/bloom/v1/util_test.go b/pkg/iter/v2/iter_test.go similarity index 92% rename from pkg/storage/bloom/v1/util_test.go rename to pkg/iter/v2/iter_test.go index 8af93231313b..054a539f9eab 100644 --- a/pkg/storage/bloom/v1/util_test.go +++ b/pkg/iter/v2/iter_test.go @@ -1,4 +1,4 @@ -package v1 +package v2 import ( "testing" @@ -9,7 +9,7 @@ import ( func TestPeekingIterator(t *testing.T) { t.Parallel() data := []int{1, 2, 3, 4, 5} - itr := NewPeekingIter[int](NewSliceIter[int](data)) + itr := NewPeekIter[int](NewSliceIter[int](data)) for i := 0; i < len(data)*2; i++ { if i%2 == 0 { @@ -32,7 +32,7 @@ func TestCounterIter(t *testing.T) { data := []int{1, 2, 3, 4, 5} itr := NewCounterIter[int](NewSliceIter[int](data)) - peekItr := NewPeekingIter[int](itr) + peekItr := NewPeekIter[int](itr) // Consume the outer iter and use peek for { diff --git a/pkg/storage/bloom/v1/ordering.go b/pkg/iter/v2/ordering.go similarity index 89% rename from pkg/storage/bloom/v1/ordering.go rename to pkg/iter/v2/ordering.go index e534fbf29bfc..4231f249fddd 100644 --- a/pkg/storage/bloom/v1/ordering.go +++ b/pkg/iter/v2/ordering.go @@ -1,4 +1,4 @@ -package v1 +package v2 type Ord byte @@ -36,21 +36,21 @@ func NewOrderable[T any](val T, cmp func(T, T) Ord) OrderedImpl[T] { } type UnlessIterator[T Orderable[T]] struct { - a, b PeekingIterator[T] + a, b PeekIterator[T] } // Iterators _must_ be sorted. Defers to underlying `PeekingIterator` implementation // for both iterators if they implement it. func NewUnlessIterator[T Orderable[T]](a, b Iterator[T]) *UnlessIterator[T] { - var peekA, peekB PeekingIterator[T] + var peekA, peekB PeekIterator[T] var ok bool - if peekA, ok = a.(PeekingIterator[T]); !ok { - peekA = NewPeekingIter(a) + if peekA, ok = a.(PeekIterator[T]); !ok { + peekA = NewPeekIter(a) } - if peekB, ok = b.(PeekingIterator[T]); !ok { - peekB = NewPeekingIter(b) + if peekB, ok = b.(PeekIterator[T]); !ok { + peekB = NewPeekIter(b) } return &UnlessIterator[T]{ diff --git a/pkg/storage/bloom/v1/ordering_test.go b/pkg/iter/v2/ordering_test.go similarity index 83% rename from pkg/storage/bloom/v1/ordering_test.go rename to pkg/iter/v2/ordering_test.go index 2da09a1964b0..6a2e81abae01 100644 --- a/pkg/storage/bloom/v1/ordering_test.go +++ b/pkg/iter/v2/ordering_test.go @@ -1,7 +1,9 @@ -package v1 +package v2 import ( "testing" + + "github.com/stretchr/testify/require" ) func TestOrdering(t *testing.T) { @@ -86,3 +88,14 @@ func TestOrdering(t *testing.T) { }) } } + +func EqualIterators[T any](t *testing.T, test func(a, b T), expected, actual Iterator[T]) { + for expected.Next() { + require.True(t, actual.Next()) + a, b := expected.At(), actual.At() + test(a, b) + } + require.False(t, actual.Next()) + require.Nil(t, expected.Err()) + require.Nil(t, actual.Err()) +} diff --git a/pkg/logcli/client/file.go b/pkg/logcli/client/file.go index eabf29aef255..1267681d75c8 100644 --- a/pkg/logcli/client/file.go +++ b/pkg/logcli/client/file.go @@ -278,7 +278,7 @@ func newFileIterator( }) if len(lines) == 0 { - return iter.NoopIterator, nil + return iter.NoopEntryIterator, nil } streams := map[uint64]*logproto.Stream{} @@ -317,7 +317,7 @@ func newFileIterator( } if len(streams) == 0 { - return iter.NoopIterator, nil + return iter.NoopEntryIterator, nil } streamResult := make([]logproto.Stream, 0, len(streams)) diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go index 8b46ed4d833f..f35a1b397a3b 100644 --- a/pkg/logql/engine.go +++ b/pkg/logql/engine.go @@ -540,7 +540,7 @@ func readStreams(i iter.EntryIterator, size uint32, dir logproto.Direction, inte // value here because many unit tests start at time.Unix(0,0) lastEntry := lastEntryMinTime for respSize < size && i.Next() { - streamLabels, entry := i.Labels(), i.Entry() + streamLabels, entry := i.Labels(), i.At() forwardShouldOutput := dir == logproto.FORWARD && (entry.Timestamp.Equal(lastEntry.Add(interval)) || entry.Timestamp.After(lastEntry.Add(interval))) @@ -559,7 +559,7 @@ func readStreams(i iter.EntryIterator, size uint32, dir logproto.Direction, inte streams[streamLabels] = stream } stream.Entries = append(stream.Entries, entry) - lastEntry = i.Entry().Timestamp + lastEntry = i.At().Timestamp respSize++ } } @@ -569,7 +569,7 @@ func readStreams(i iter.EntryIterator, size uint32, dir logproto.Direction, inte result = append(result, *stream) } sort.Sort(result) - return result, i.Error() + return result, i.Err() } type groupedAggregation struct { diff --git a/pkg/logql/engine_test.go b/pkg/logql/engine_test.go index 48e0e2832303..274276a02c8f 100644 --- a/pkg/logql/engine_test.go +++ b/pkg/logql/engine_test.go @@ -35,7 +35,7 @@ import ( var ( testSize = int64(300) - ErrMock = errors.New("mock error") + ErrMock = errors.New("error") ErrMockMultiple = util.MultiError{ErrMock, ErrMock} ) @@ -2298,13 +2298,13 @@ type statsQuerier struct{} func (statsQuerier) SelectLogs(ctx context.Context, _ SelectLogParams) (iter.EntryIterator, error) { st := stats.FromContext(ctx) st.AddDecompressedBytes(1) - return iter.NoopIterator, nil + return iter.NoopEntryIterator, nil } func (statsQuerier) SelectSamples(ctx context.Context, _ SelectSampleParams) (iter.SampleIterator, error) { st := stats.FromContext(ctx) st.AddDecompressedBytes(1) - return iter.NoopIterator, nil + return iter.NoopSampleIterator, nil } func TestEngine_Stats(t *testing.T) { @@ -2332,14 +2332,14 @@ func (metaQuerier) SelectLogs(ctx context.Context, _ SelectLogParams) (iter.Entr Values: []string{"value"}, }, }) - return iter.NoopIterator, nil + return iter.NoopEntryIterator, nil } func (metaQuerier) SelectSamples(ctx context.Context, _ SelectSampleParams) (iter.SampleIterator, error) { _ = metadata.JoinHeaders(ctx, []*definitions.PrometheusResponseHeader{ {Name: "Header", Values: []string{"value"}}, }) - return iter.NoopIterator, nil + return iter.NoopSampleIterator, nil } func TestEngine_Metadata(t *testing.T) { @@ -2409,7 +2409,7 @@ func TestStepEvaluator_Error(t *testing.T) { samples: func() []iter.SampleIterator { return []iter.SampleIterator{ iter.NewSeriesIterator(newSeries(testSize, identity, `{app="foo"}`)), - NewErrorSampleIterator(), + iter.ErrorSampleIterator, } }, }, @@ -2422,7 +2422,7 @@ func TestStepEvaluator_Error(t *testing.T) { entries: func() []iter.EntryIterator { return []iter.EntryIterator{ iter.NewStreamIterator(newStream(testSize, identity, `{app="foo"}`)), - NewErrorEntryIterator(), + iter.ErrorEntryIterator, } }, }, @@ -2435,7 +2435,7 @@ func TestStepEvaluator_Error(t *testing.T) { samples: func() []iter.SampleIterator { return []iter.SampleIterator{ iter.NewSeriesIterator(newSeries(testSize, identity, `{app="foo"}`)), - NewErrorSampleIterator(), + iter.ErrorSampleIterator, } }, }, @@ -2745,7 +2745,7 @@ func (q *querierRecorder) SelectSamples(_ context.Context, p SelectSampleParams) } recordID := paramsID(p) if len(q.series) == 0 { - return iter.NoopIterator, nil + return iter.NoopSampleIterator, nil } series, ok := q.series[recordID] if !ok { @@ -2926,30 +2926,3 @@ func inverse(g generator) generator { return g(-i) } } - -// errorIterator -type errorIterator struct{} - -// NewErrorSampleIterator return an sample iterator that errors out -func NewErrorSampleIterator() iter.SampleIterator { - return &errorIterator{} -} - -// NewErrorEntryIterator return an entry iterator that errors out -func NewErrorEntryIterator() iter.EntryIterator { - return &errorIterator{} -} - -func (errorIterator) Next() bool { return false } - -func (errorIterator) Error() error { return ErrMock } - -func (errorIterator) Labels() string { return "" } - -func (errorIterator) StreamHash() uint64 { return 0 } - -func (errorIterator) Entry() logproto.Entry { return logproto.Entry{} } - -func (errorIterator) Sample() logproto.Sample { return logproto.Sample{} } - -func (errorIterator) Close() error { return nil } diff --git a/pkg/logql/range_vector.go b/pkg/logql/range_vector.go index 180a1bde27ca..f551c6512db4 100644 --- a/pkg/logql/range_vector.go +++ b/pkg/logql/range_vector.go @@ -133,7 +133,7 @@ func (r *batchRangeVectorIterator) Close() error { } func (r *batchRangeVectorIterator) Error() error { - return r.iter.Error() + return r.iter.Err() } // popBack removes all entries out of the current window from the back. @@ -552,7 +552,7 @@ func (r *streamRangeVectorIterator) Close() error { } func (r *streamRangeVectorIterator) Error() error { - return r.iter.Error() + return r.iter.Err() } // load the next sample range window. diff --git a/pkg/pattern/instance.go b/pkg/pattern/instance.go index f19b0373858d..21520ae7d26d 100644 --- a/pkg/pattern/instance.go +++ b/pkg/pattern/instance.go @@ -22,7 +22,7 @@ import ( "github.com/grafana/loki/v3/pkg/util" "github.com/grafana/loki/v3/pkg/util/spanlogger" - loki_iter "github.com/grafana/loki/v3/pkg/iter" + "github.com/grafana/loki/v3/pkg/iter" pattern_iter "github.com/grafana/loki/v3/pkg/pattern/iter" ) @@ -115,10 +115,10 @@ func (i *instance) QuerySample( ctx context.Context, expr syntax.SampleExpr, req *logproto.QuerySamplesRequest, -) (loki_iter.SampleIterator, error) { +) (iter.SampleIterator, error) { if !i.aggregationCfg.Enabled { // Should never get here, but this will prevent nil pointer panics in test - return loki_iter.NoopIterator, nil + return iter.NoopSampleIterator, nil } from, through := util.RoundToMilliseconds(req.Start, req.End) @@ -132,11 +132,11 @@ func (i *instance) QuerySample( return nil, err } - var iters []loki_iter.SampleIterator + var iters []iter.SampleIterator err = i.forMatchingStreams( selector.Matchers(), func(stream *stream) error { - var iter loki_iter.SampleIterator + var iter iter.SampleIterator var err error iter, err = stream.SampleIterator(ctx, expr, from, through, step) if err != nil { diff --git a/pkg/pattern/instance_test.go b/pkg/pattern/instance_test.go index 07351089ca8d..2db0c2797fa4 100644 --- a/pkg/pattern/instance_test.go +++ b/pkg/pattern/instance_test.go @@ -113,7 +113,7 @@ func TestInstance_QuerySample(t *testing.T) { next := iter.Next() require.True(t, next) - sample := iter.Sample() + sample := iter.At() require.Equal(t, float64(4), sample.Value) require.Equal(t, model.Time(thirdPoint).UnixNano(), sample.Timestamp) @@ -132,7 +132,7 @@ func TestInstance_QuerySample(t *testing.T) { next := iter.Next() require.True(t, next) - sample := iter.Sample() + sample := iter.At() require.Equal(t, float64(80), sample.Value) require.Equal(t, model.Time(thirdPoint).UnixNano(), sample.Timestamp) @@ -273,7 +273,7 @@ func TestInstance_QuerySample(t *testing.T) { next := iter.Next() require.True(t, next) - sample := iter.Sample() + sample := iter.At() require.Equal(t, model.Time(thirdStep).UnixNano(), sample.Timestamp) require.Equal(t, float64(8), sample.Value) require.Equal(t, expectedLabels.String(), iter.Labels()) @@ -284,7 +284,7 @@ func TestInstance_QuerySample(t *testing.T) { next = iter.Next() require.True(t, next) - sample = iter.Sample() + sample = iter.At() require.Equal(t, model.Time(fifthStep).UnixNano(), sample.Timestamp) require.Equal(t, float64(6), sample.Value) require.Equal(t, expectedLabels.String(), iter.Labels()) @@ -315,7 +315,7 @@ func TestInstance_QuerySample(t *testing.T) { next := iter.Next() require.True(t, next) - sample := iter.Sample() + sample := iter.At() require.Equal(t, model.Time(thirdStep).UnixNano(), sample.Timestamp) require.Equal(t, float64(8), sample.Value) require.Equal(t, expectedLabels.String(), iter.Labels()) @@ -324,7 +324,7 @@ func TestInstance_QuerySample(t *testing.T) { next = iter.Next() require.True(t, next) - sample = iter.Sample() + sample = iter.At() require.Equal(t, model.Time(fourthStep).UnixNano(), sample.Timestamp) require.Equal(t, float64(8), sample.Value) require.Equal(t, expectedLabels.String(), iter.Labels()) @@ -333,7 +333,7 @@ func TestInstance_QuerySample(t *testing.T) { next = iter.Next() require.True(t, next) - sample = iter.Sample() + sample = iter.At() require.Equal(t, model.Time(fifthStep).UnixNano(), sample.Timestamp) require.Equal(t, float64(14), sample.Value) require.Equal(t, expectedLabels.String(), iter.Labels()) diff --git a/pkg/pattern/iter/batch.go b/pkg/pattern/iter/batch.go index 9d091429ce09..c61fa6ee1700 100644 --- a/pkg/pattern/iter/batch.go +++ b/pkg/pattern/iter/batch.go @@ -56,7 +56,7 @@ func ReadMetricsBatch(it iter.SampleIterator, batchSize int, logger log.Logger) series[hash] = s } - s.Samples = append(s.Samples, it.Sample()) + s.Samples = append(s.Samples, it.At()) series[hash] = s } @@ -68,7 +68,7 @@ func ReadMetricsBatch(it iter.SampleIterator, batchSize int, logger log.Logger) level.Debug(logger).Log("msg", "appending series", "s", fmt.Sprintf("%v", s)) result.Series = append(result.Series, s) } - return &result, it.Error() + return &result, it.Err() } // ReadAllSamples reads all samples from the given iterator. It is only used in tests. diff --git a/pkg/pattern/iter/iterator.go b/pkg/pattern/iter/iterator.go index 5a277c0f2734..7bbdb0ed2738 100644 --- a/pkg/pattern/iter/iterator.go +++ b/pkg/pattern/iter/iterator.go @@ -6,6 +6,7 @@ import ( var Empty Iterator = &emptyIterator{} +// TODO(chaudum): inline v2.Iteratpr[logproto.PatternSample] type Iterator interface { Next() bool @@ -17,6 +18,7 @@ type Iterator interface { } func NewSlice(pattern string, s []logproto.PatternSample) Iterator { + // TODO(chaudum): replace with v2.NewSliceIter() return &sliceIterator{ values: s, pattern: pattern, diff --git a/pkg/pattern/iter/merge_sample.go b/pkg/pattern/iter/merge_sample.go index 7e8f250b48f2..a1f15c5c33c4 100644 --- a/pkg/pattern/iter/merge_sample.go +++ b/pkg/pattern/iter/merge_sample.go @@ -78,7 +78,7 @@ func (i *sumMergeSampleIterator) requeue(ei iter.SampleIterator, advanced bool) return } - if err := ei.Error(); err != nil { + if err := ei.Err(); err != nil { i.errs = append(i.errs, err) } util.LogError("closing iterator", ei.Close) @@ -98,7 +98,7 @@ func (i *sumMergeSampleIterator) Next() bool { // shortcut for the last iterator. if i.heap.Len() == 1 { - i.curr.Sample = i.heap.Peek().Sample() + i.curr.Sample = i.heap.Peek().At() i.curr.labels = i.heap.Peek().Labels() i.curr.streamHash = i.heap.Peek().StreamHash() if !i.heap.Peek().Next() { @@ -112,7 +112,7 @@ func (i *sumMergeSampleIterator) Next() bool { // heap with the same timestamp, and add them to the buffer to sum their values. for i.heap.Len() > 0 { next := i.heap.Peek() - sample := next.Sample() + sample := next.At() if len(i.buffer) > 0 && (i.buffer[0].streamHash != next.StreamHash() || i.buffer[0].Timestamp != sample.Timestamp) { @@ -169,7 +169,7 @@ func (i *sumMergeSampleIterator) nextFromBuffer() { i.buffer = i.buffer[numSamples:] } -func (i *sumMergeSampleIterator) Sample() logproto.Sample { +func (i *sumMergeSampleIterator) At() logproto.Sample { return i.curr.Sample } @@ -181,7 +181,7 @@ func (i *sumMergeSampleIterator) StreamHash() uint64 { return i.curr.streamHash } -func (i *sumMergeSampleIterator) Error() error { +func (i *sumMergeSampleIterator) Err() error { switch len(i.errs) { case 0: return nil diff --git a/pkg/pattern/iter/merge_sample_test.go b/pkg/pattern/iter/merge_sample_test.go index 3f619ca44c3a..298b5ac1c17b 100644 --- a/pkg/pattern/iter/merge_sample_test.go +++ b/pkg/pattern/iter/merge_sample_test.go @@ -21,13 +21,13 @@ func TestNewSumMergeSampleIterator(t *testing.T) { for i := 1; i < 4; i++ { require.True(t, it.Next(), i) require.Equal(t, `{foo="car"}`, it.Labels(), i) - require.Equal(t, sample(int64(i), float64(i)), it.Sample(), i) + require.Equal(t, sample(int64(i), float64(i)), it.At(), i) require.True(t, it.Next(), i) require.Equal(t, `{foo="var"}`, it.Labels(), i) - require.Equal(t, sample(int64(i), float64(i)), it.Sample(), i) + require.Equal(t, sample(int64(i), float64(i)), it.At(), i) } require.False(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) require.NoError(t, it.Close()) }) @@ -45,13 +45,13 @@ func TestNewSumMergeSampleIterator(t *testing.T) { for i := 1; i < 4; i++ { require.True(t, it.Next(), i) require.Equal(t, `{foo="car"}`, it.Labels(), i) - require.Equal(t, sample(int64(i), float64(i*3)), it.Sample(), i) + require.Equal(t, sample(int64(i), float64(i*3)), it.At(), i) require.True(t, it.Next(), i) require.Equal(t, `{foo="var"}`, it.Labels(), i) - require.Equal(t, sample(int64(i), float64(i*3)), it.Sample(), i) + require.Equal(t, sample(int64(i), float64(i*3)), it.At(), i) } require.False(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) require.NoError(t, it.Close()) }) @@ -73,13 +73,13 @@ func TestNewSumMergeSampleIterator(t *testing.T) { for i := 1; i < 4; i++ { require.True(t, it.Next(), i) require.Equal(t, ``, it.Labels(), i) - require.Equal(t, sample(int64(i), float64(i)), it.Sample(), i) + require.Equal(t, sample(int64(i), float64(i)), it.At(), i) require.True(t, it.Next(), i) require.Equal(t, ``, it.Labels(), i) - require.Equal(t, sample(int64(i), float64(i)), it.Sample(), i) + require.Equal(t, sample(int64(i), float64(i)), it.At(), i) } require.False(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) require.NoError(t, it.Close()) }) @@ -121,13 +121,13 @@ func TestNewSumMergeSampleIterator(t *testing.T) { for i := 1; i < 4; i++ { require.True(t, it.Next(), i) require.Equal(t, ``, it.Labels(), i) - require.Equal(t, sample(int64(i), float64(i*3)), it.Sample(), i) + require.Equal(t, sample(int64(i), float64(i*3)), it.At(), i) require.True(t, it.Next(), i) require.Equal(t, ``, it.Labels(), i) - require.Equal(t, sample(int64(i), float64(i*3)), it.Sample(), i) + require.Equal(t, sample(int64(i), float64(i*3)), it.At(), i) } require.False(t, it.Next()) - require.NoError(t, it.Error()) + require.NoError(t, it.Err()) require.NoError(t, it.Close()) }) t.Run("it sums the values from two identical points", func(t *testing.T) { @@ -146,15 +146,15 @@ func TestNewSumMergeSampleIterator(t *testing.T) { require.True(t, it.Next()) require.Equal(t, `{foo="bar"}`, it.Labels()) - require.Equal(t, sample(1, 2), it.Sample()) + require.Equal(t, sample(1, 2), it.At()) require.True(t, it.Next()) require.Equal(t, `{foo="bar"}`, it.Labels()) - require.Equal(t, sample(2, 4), it.Sample()) + require.Equal(t, sample(2, 4), it.At()) require.True(t, it.Next()) require.Equal(t, `{foo="bar"}`, it.Labels()) - require.Equal(t, sample(3, 6), it.Sample()) + require.Equal(t, sample(3, 6), it.At()) require.False(t, it.Next()) }) @@ -189,31 +189,31 @@ func TestNewSumMergeSampleIterator(t *testing.T) { require.True(t, it.Next()) require.Equal(t, `{foo="baz"}`, it.Labels()) - require.Equal(t, sample(1, 1), it.Sample()) + require.Equal(t, sample(1, 1), it.At()) require.True(t, it.Next()) require.Equal(t, `{foo="bar"}`, it.Labels()) - require.Equal(t, sample(1, 1), it.Sample()) // first only + require.Equal(t, sample(1, 1), it.At()) // first only require.True(t, it.Next()) require.Equal(t, `{foo="baz"}`, it.Labels()) - require.Equal(t, sample(2, 2), it.Sample()) + require.Equal(t, sample(2, 2), it.At()) require.True(t, it.Next()) require.Equal(t, `{foo="bar"}`, it.Labels()) - require.Equal(t, sample(2, 4), it.Sample()) // merged + require.Equal(t, sample(2, 4), it.At()) // merged require.True(t, it.Next()) require.Equal(t, `{foo="baz"}`, it.Labels()) - require.Equal(t, sample(3, 4), it.Sample()) + require.Equal(t, sample(3, 4), it.At()) require.True(t, it.Next()) require.Equal(t, `{foo="bar"}`, it.Labels()) - require.Equal(t, sample(3, 3), it.Sample()) + require.Equal(t, sample(3, 3), it.At()) require.True(t, it.Next()) require.Equal(t, `{foo="bar"}`, it.Labels()) - require.Equal(t, sample(4, 4), it.Sample()) // second only + require.Equal(t, sample(4, 4), it.At()) // second only require.False(t, it.Next()) }) diff --git a/pkg/pattern/iter/query_client.go b/pkg/pattern/iter/query_client.go index 997a2623b6f1..8bb82b9be750 100644 --- a/pkg/pattern/iter/query_client.go +++ b/pkg/pattern/iter/query_client.go @@ -99,8 +99,8 @@ func (i *querySamplesClientIterator) Next() bool { return true } -func (i *querySamplesClientIterator) Sample() logproto.Sample { - return i.curr.Sample() +func (i *querySamplesClientIterator) At() logproto.Sample { + return i.curr.At() } func (i *querySamplesClientIterator) StreamHash() uint64 { @@ -111,7 +111,7 @@ func (i *querySamplesClientIterator) Labels() string { return i.curr.Labels() } -func (i *querySamplesClientIterator) Error() error { +func (i *querySamplesClientIterator) Err() error { return i.err } diff --git a/pkg/querier/multi_tenant_querier_test.go b/pkg/querier/multi_tenant_querier_test.go index 38f190562ea1..cac6f981651d 100644 --- a/pkg/querier/multi_tenant_querier_test.go +++ b/pkg/querier/multi_tenant_querier_test.go @@ -97,7 +97,7 @@ func TestMultiTenantQuerier_SelectLogs(t *testing.T) { entriesCount := 0 for iter.Next() { require.Equal(t, tc.expLabels[entriesCount], iter.Labels()) - require.Equal(t, tc.expLines[entriesCount], iter.Entry().Line) + require.Equal(t, tc.expLines[entriesCount], iter.At().Line) entriesCount++ } require.Equalf(t, len(tc.expLabels), entriesCount, "Expected %d entries but got %d", len(tc.expLabels), entriesCount) @@ -256,7 +256,7 @@ func (it mockEntryIterator) Labels() string { return it.labels } -func (it mockEntryIterator) Entry() logproto.Entry { +func (it mockEntryIterator) At() logproto.Entry { return logproto.Entry{} } @@ -268,7 +268,7 @@ func (it mockEntryIterator) StreamHash() uint64 { return 0 } -func (it mockEntryIterator) Error() error { +func (it mockEntryIterator) Err() error { return nil } diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 9f31cb9b82a3..f90331777377 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -1310,7 +1310,7 @@ func streamsForFieldDetection(i iter.EntryIterator, size uint32) (logqlmodel.Str // value here because many unit tests start at time.Unix(0,0) lastEntry := time.Unix(-100, 0) for respSize < size && i.Next() { - streamLabels, entry := i.Labels(), i.Entry() + streamLabels, entry := i.Labels(), i.At() // Always going backward as the direction for field detection is hard-coded to BACKWARD shouldOutput := entry.Timestamp.Equal(lastEntry) || entry.Timestamp.Before(lastEntry) @@ -1326,7 +1326,7 @@ func streamsForFieldDetection(i iter.EntryIterator, size uint32) (logqlmodel.Str streams[streamLabels] = stream } stream.Entries = append(stream.Entries, entry) - lastEntry = i.Entry().Timestamp + lastEntry = i.At().Timestamp respSize++ } } @@ -1336,5 +1336,5 @@ func streamsForFieldDetection(i iter.EntryIterator, size uint32) (logqlmodel.Str result = append(result, *stream) } sort.Sort(result) - return result, i.Error() + return result, i.Err() } diff --git a/pkg/querier/tail.go b/pkg/querier/tail.go index 1bdc01159ed6..56c5ef8f7e5e 100644 --- a/pkg/querier/tail.go +++ b/pkg/querier/tail.go @@ -259,7 +259,7 @@ func (t *Tailer) next() bool { return false } - t.currEntry = t.openStreamIterator.Entry() + t.currEntry = t.openStreamIterator.At() t.currLabels = t.openStreamIterator.Labels() t.recordStream(t.openStreamIterator.StreamHash()) diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go index 6855368aefde..2097be3dec21 100644 --- a/pkg/ruler/compat_test.go +++ b/pkg/ruler/compat_test.go @@ -118,11 +118,11 @@ func TestNonMetricQuery(t *testing.T) { type FakeQuerier struct{} func (q *FakeQuerier) SelectLogs(context.Context, logql.SelectLogParams) (iter.EntryIterator, error) { - return iter.NoopIterator, nil + return iter.NoopEntryIterator, nil } func (q *FakeQuerier) SelectSamples(context.Context, logql.SelectSampleParams) (iter.SampleIterator, error) { - return iter.NoopIterator, nil + return iter.NoopSampleIterator, nil } type fakeChecker struct{} diff --git a/pkg/storage/batch.go b/pkg/storage/batch.go index ff47345670ac..46f708d09155 100644 --- a/pkg/storage/batch.go +++ b/pkg/storage/batch.go @@ -352,12 +352,12 @@ func (it *logBatchIterator) StreamHash() uint64 { return it.curr.StreamHash() } -func (it *logBatchIterator) Error() error { +func (it *logBatchIterator) Err() error { if it.err != nil { return it.err } - if it.curr != nil && it.curr.Error() != nil { - return it.curr.Error() + if it.curr != nil && it.curr.Err() != nil { + return it.curr.Err() } if it.ctx.Err() != nil { return it.ctx.Err() @@ -373,8 +373,8 @@ func (it *logBatchIterator) Close() error { return nil } -func (it *logBatchIterator) Entry() logproto.Entry { - return it.curr.Entry() +func (it *logBatchIterator) At() logproto.Entry { + return it.curr.At() } func (it *logBatchIterator) Next() bool { @@ -497,12 +497,12 @@ func (it *sampleBatchIterator) StreamHash() uint64 { return it.curr.StreamHash() } -func (it *sampleBatchIterator) Error() error { +func (it *sampleBatchIterator) Err() error { if it.err != nil { return it.err } - if it.curr != nil && it.curr.Error() != nil { - return it.curr.Error() + if it.curr != nil && it.curr.Err() != nil { + return it.curr.Err() } if it.ctx.Err() != nil { return it.ctx.Err() @@ -518,8 +518,8 @@ func (it *sampleBatchIterator) Close() error { return nil } -func (it *sampleBatchIterator) Sample() logproto.Sample { - return it.curr.Sample() +func (it *sampleBatchIterator) At() logproto.Sample { + return it.curr.At() } func (it *sampleBatchIterator) Next() bool { diff --git a/pkg/storage/batch_test.go b/pkg/storage/batch_test.go index e041ad186fe8..0159c20a19f6 100644 --- a/pkg/storage/batch_test.go +++ b/pkg/storage/batch_test.go @@ -1745,7 +1745,7 @@ func TestBatchCancel(t *testing.T) { //nolint:revive for it.Next() { } - require.Equal(t, context.Canceled, it.Error()) + require.Equal(t, context.Canceled, it.Err()) } var entry logproto.Entry @@ -1784,7 +1784,7 @@ func Benchmark_store_OverlappingChunks(b *testing.B) { b.Fatal(err) } for it.Next() { - entry = it.Entry() + entry = it.At() } if err := it.Close(); err != nil { b.Fatal(err) diff --git a/pkg/storage/bloom/v1/archive_test.go b/pkg/storage/bloom/v1/archive_test.go index 63a321bca4f3..8ebcdb9aebcc 100644 --- a/pkg/storage/bloom/v1/archive_test.go +++ b/pkg/storage/bloom/v1/archive_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/grafana/loki/v3/pkg/chunkenc" + v2 "github.com/grafana/loki/v3/pkg/iter/v2" ) func TestArchive(t *testing.T) { @@ -32,7 +33,7 @@ func TestArchive(t *testing.T) { ) require.Nil(t, err) - itr := NewSliceIter[SeriesWithBlooms](data) + itr := v2.NewSliceIter[SeriesWithBlooms](data) _, err = builder.BuildFrom(itr) require.Nil(t, err) diff --git a/pkg/storage/bloom/v1/bloom_tester.go b/pkg/storage/bloom/v1/bloom_tester.go index 80178a54deab..349f3691f6ea 100644 --- a/pkg/storage/bloom/v1/bloom_tester.go +++ b/pkg/storage/bloom/v1/bloom_tester.go @@ -5,6 +5,7 @@ import ( "github.com/grafana/regexp" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/logql/log" "github.com/grafana/loki/v3/pkg/logql/log/pattern" "github.com/grafana/loki/v3/pkg/logql/syntax" @@ -176,7 +177,7 @@ func (n matchAllTest) MatchesWithPrefixBuf(_ filter.Checker, _ []byte, _ int) bo // Extracting this interface allows us to test the bloom filter without having to use the actual tokenizer // TODO: This should be moved to tokenizer.go type NGramBuilder interface { - Tokens(line string) Iterator[[]byte] + Tokens(line string) iter.Iterator[[]byte] N() int SkipFactor() int } diff --git a/pkg/storage/bloom/v1/bloom_tokenizer.go b/pkg/storage/bloom/v1/bloom_tokenizer.go index f0365f7cc78d..7e3623aea933 100644 --- a/pkg/storage/bloom/v1/bloom_tokenizer.go +++ b/pkg/storage/bloom/v1/bloom_tokenizer.go @@ -5,12 +5,12 @@ import ( "github.com/go-kit/log/level" + "github.com/grafana/loki/pkg/push" + "github.com/grafana/loki/v3/pkg/iter" + v2iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/storage/bloom/v1/filter" - - "github.com/grafana/loki/pkg/push" - "github.com/grafana/loki/v3/pkg/util/encoding" util_log "github.com/grafana/loki/v3/pkg/util/log" ) @@ -100,8 +100,8 @@ func (bt *BloomTokenizer) newBloom() *Bloom { // Populates a bloom filter(s) with the tokens from the given chunks. // Called once per series func (bt *BloomTokenizer) Populate( - blooms SizedIterator[*Bloom], - chks Iterator[ChunkRefWithIter], + blooms v2iter.SizedIterator[*Bloom], + chks v2iter.Iterator[ChunkRefWithIter], ch chan *BloomCreation, ) { clear(bt.cache) // MUST always clear the cache before starting a new series @@ -187,7 +187,7 @@ func (bt *BloomTokenizer) sendBloom( // so we can advance the iterator only after we're sure the bloom has accepted the line. // This is because the _line_ is the atom in Loki's data model and a query must either match (or not) an individual line. // Therefore, we index entire lines into a bloom to ensure a lookups are accurate. -func (bt *BloomTokenizer) addChunkToBloom(bloom *Bloom, ref ChunkRef, entryIter PeekingIterator[push.Entry]) (full bool, bytesAdded int) { +func (bt *BloomTokenizer) addChunkToBloom(bloom *Bloom, ref ChunkRef, entryIter v2iter.PeekIterator[push.Entry]) (full bool, bytesAdded int) { var ( tokenBuf, prefixLn = prefixedToken(bt.lineTokenizer.N(), ref, nil) tokens int @@ -204,7 +204,7 @@ outer: line := entry.Line chunkBytes += len(line) - tokenItrs := []Iterator[[]byte]{ + tokenItrs := []v2iter.Iterator[[]byte]{ // two iterators, one for the raw tokens and one for the chunk prefixed tokens. // Warning: the underlying line tokenizer (used in both iterators) uses the same buffer for tokens. // They are NOT SAFE for concurrent use. @@ -273,13 +273,13 @@ type entryIterAdapter struct { } func (a entryIterAdapter) At() logproto.Entry { - return a.EntryIterator.Entry() + return a.EntryIterator.At() } func (a entryIterAdapter) Err() error { - return a.EntryIterator.Error() + return a.EntryIterator.Err() } -func newPeekingEntryIterAdapter(itr iter.EntryIterator) *PeekIter[logproto.Entry] { - return NewPeekingIter[logproto.Entry](entryIterAdapter{itr}) +func newPeekingEntryIterAdapter(itr iter.EntryIterator) *v2iter.PeekIter[logproto.Entry] { + return v2iter.NewPeekIter[logproto.Entry](entryIterAdapter{itr}) } diff --git a/pkg/storage/bloom/v1/bloom_tokenizer_test.go b/pkg/storage/bloom/v1/bloom_tokenizer_test.go index 0f837fbee27b..7023958eca11 100644 --- a/pkg/storage/bloom/v1/bloom_tokenizer_test.go +++ b/pkg/storage/bloom/v1/bloom_tokenizer_test.go @@ -14,6 +14,7 @@ import ( "github.com/grafana/loki/v3/pkg/chunkenc" "github.com/grafana/loki/v3/pkg/iter" + v2 "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/logql/log" @@ -120,8 +121,8 @@ func TestTokenizerPopulate(t *testing.T) { blooms, err := populateAndConsumeBloom( bt, - NewSliceIter([]*Bloom{&bloom}), - NewSliceIter([]ChunkRefWithIter{{Ref: ChunkRef{}, + v2.NewSliceIter([]*Bloom{&bloom}), + v2.NewSliceIter([]ChunkRefWithIter{{Ref: ChunkRef{}, Itr: itr}}), ) require.NoError(t, err) @@ -155,8 +156,8 @@ func TestBloomTokenizerPopulateWithoutPreexistingBloom(t *testing.T) { blooms, err := populateAndConsumeBloom( bt, - NewEmptyIter[*Bloom](), - NewSliceIter([]ChunkRefWithIter{{Ref: ChunkRef{}, + v2.NewEmptyIter[*Bloom](), + v2.NewSliceIter([]ChunkRefWithIter{{Ref: ChunkRef{}, Itr: itr}}), ) require.NoError(t, err) @@ -211,12 +212,12 @@ func TestTokenizerPopulateWontExceedMaxSize(t *testing.T) { itr, err := chunkRefItrFromLines(line) require.NoError(t, err) go bt.Populate( - NewSliceIter([]*Bloom{ + v2.NewSliceIter([]*Bloom{ { *filter.NewScalableBloomFilter(1024, 0.01, 0.8), }, }), - NewSliceIter([]ChunkRefWithIter{ + v2.NewSliceIter([]ChunkRefWithIter{ { Ref: ChunkRef{}, Itr: itr, @@ -237,8 +238,8 @@ func TestTokenizerPopulateWontExceedMaxSize(t *testing.T) { func populateAndConsumeBloom( bt *BloomTokenizer, - blooms SizedIterator[*Bloom], - chks Iterator[ChunkRefWithIter], + blooms v2.SizedIterator[*Bloom], + chks v2.Iterator[ChunkRefWithIter], ) (res []*Bloom, err error) { var e multierror.MultiError ch := make(chan *BloomCreation) @@ -280,8 +281,8 @@ func BenchmarkPopulateSeriesWithBloom(b *testing.B) { _, err = populateAndConsumeBloom( bt, - NewSliceIter([]*Bloom{&bloom}), - NewSliceIter([]ChunkRefWithIter{{Ref: ChunkRef{}, + v2.NewSliceIter([]*Bloom{&bloom}), + v2.NewSliceIter([]ChunkRefWithIter{{Ref: ChunkRef{}, Itr: itr}}), ) require.NoError(b, err) @@ -298,8 +299,8 @@ func TestTokenizerClearsCacheBetweenPopulateCalls(t *testing.T) { itr, err := chunkRefItrFromLines(line) require.NoError(t, err) go bt.Populate( - NewEmptyIter[*Bloom](), - NewSliceIter([]ChunkRefWithIter{ + v2.NewEmptyIter[*Bloom](), + v2.NewSliceIter([]ChunkRefWithIter{ { Ref: ChunkRef{}, Itr: itr, diff --git a/pkg/storage/bloom/v1/bounds.go b/pkg/storage/bloom/v1/bounds.go index 1b482e46665b..d33a3dec6aa6 100644 --- a/pkg/storage/bloom/v1/bounds.go +++ b/pkg/storage/bloom/v1/bounds.go @@ -11,6 +11,7 @@ import ( "github.com/prometheus/common/model" "golang.org/x/exp/slices" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/util/encoding" ) @@ -242,7 +243,7 @@ func (mb MultiFingerprintBounds) Union(target FingerprintBounds) MultiFingerprin // unused, but illustrative type BoundedIter[V any] struct { - Iterator[V] + iter.Iterator[V] cmp func(V) BoundsCheck } @@ -260,6 +261,6 @@ func (bi *BoundedIter[V]) Next() bool { return false } -func NewBoundedIter[V any](itr Iterator[V], cmp func(V) BoundsCheck) *BoundedIter[V] { +func NewBoundedIter[V any](itr iter.Iterator[V], cmp func(V) BoundsCheck) *BoundedIter[V] { return &BoundedIter[V]{Iterator: itr, cmp: cmp} } diff --git a/pkg/storage/bloom/v1/builder.go b/pkg/storage/bloom/v1/builder.go index 4ccd011fd1ec..09a0dc2778f4 100644 --- a/pkg/storage/bloom/v1/builder.go +++ b/pkg/storage/bloom/v1/builder.go @@ -8,6 +8,7 @@ import ( "github.com/pkg/errors" "github.com/grafana/loki/v3/pkg/chunkenc" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/util/encoding" ) @@ -160,23 +161,23 @@ type BloomCreation struct { // from a list of blocks and a store of series. type MergeBuilder struct { // existing blocks - blocks Iterator[*SeriesWithBlooms] + blocks iter.Iterator[*SeriesWithBlooms] // store - store Iterator[*Series] + store iter.Iterator[*Series] // Add chunks to a bloom - populate func(s *Series, srcBlooms SizedIterator[*Bloom], toAdd ChunkRefs, ch chan *BloomCreation) + populate func(s *Series, srcBlooms iter.SizedIterator[*Bloom], toAdd ChunkRefs, ch chan *BloomCreation) metrics *Metrics } -type BloomPopulatorFunc = func(s *Series, srcBlooms SizedIterator[*Bloom], toAdd ChunkRefs, ch chan *BloomCreation) +type BloomPopulatorFunc = func(s *Series, srcBlooms iter.SizedIterator[*Bloom], toAdd ChunkRefs, ch chan *BloomCreation) // NewMergeBuilder is a specific builder which does the following: // 1. merges multiple blocks into a single ordered querier, // i) When two blocks have the same series, it will prefer the one with the most chunks already indexed // 2. iterates through the store, adding chunks to the relevant blooms via the `populate` argument func NewMergeBuilder( - blocks Iterator[*SeriesWithBlooms], - store Iterator[*Series], + blocks iter.Iterator[*SeriesWithBlooms], + store iter.Iterator[*Series], populate BloomPopulatorFunc, metrics *Metrics, ) *MergeBuilder { @@ -184,13 +185,13 @@ func NewMergeBuilder( // because blooms dont contain the label-set (only the fingerprint), // in the case of a fingerprint collision we simply treat it as one // series with multiple chunks. - combinedSeriesIter := NewDedupingIter[*Series, *Series]( + combinedSeriesIter := iter.NewDedupingIter[*Series, *Series]( // eq func(s1, s2 *Series) bool { return s1.Fingerprint == s2.Fingerprint }, // from - Identity[*Series], + iter.Identity[*Series], // merge func(s1, s2 *Series) *Series { return &Series{ @@ -198,7 +199,7 @@ func NewMergeBuilder( Chunks: s1.Chunks.Union(s2.Chunks), } }, - NewPeekingIter[*Series](store), + iter.NewPeekIter[*Series](store), ) return &MergeBuilder{ @@ -256,8 +257,8 @@ func (mb *MergeBuilder) processNextSeries( var ( offsets []BloomOffset - chunksToAdd = nextInStore.Chunks - preExistingBlooms SizedIterator[*Bloom] = NewEmptyIter[*Bloom]() + chunksToAdd = nextInStore.Chunks + preExistingBlooms iter.SizedIterator[*Bloom] = iter.NewEmptyIter[*Bloom]() ) if nextInBlocks != nil && nextInBlocks.Series.Fingerprint == nextInStore.Fingerprint { diff --git a/pkg/storage/bloom/v1/builder_test.go b/pkg/storage/bloom/v1/builder_test.go index 15f0de0842a9..6abed637d7c7 100644 --- a/pkg/storage/bloom/v1/builder_test.go +++ b/pkg/storage/bloom/v1/builder_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/grafana/loki/v3/pkg/chunkenc" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/storage/bloom/v1/filter" "github.com/grafana/loki/v3/pkg/util/encoding" "github.com/grafana/loki/v3/pkg/util/mempool" @@ -101,9 +102,9 @@ func TestBlockBuilder_RoundTrip(t *testing.T) { builder, err := NewBlockBuilder(blockOpts, tc.writer) require.Nil(t, err) - itr := NewPeekingIter[SeriesWithBlooms]( - NewMapIter( - NewSliceIter[SeriesWithLiteralBlooms](data), + itr := iter.NewPeekIter[SeriesWithBlooms]( + iter.NewMapIter( + iter.NewSliceIter[SeriesWithLiteralBlooms](data), func(x SeriesWithLiteralBlooms) SeriesWithBlooms { return x.SeriesWithBlooms() }, ), ) @@ -132,7 +133,7 @@ func TestBlockBuilder_RoundTrip(t *testing.T) { for i := 0; i < len(processedData); i++ { require.Equal(t, true, querier.Next(), "on iteration %d with error %v", i, querier.Err()) got := querier.At() - blooms, err := Collect(got.Blooms) + blooms, err := iter.Collect(got.Blooms) require.Nil(t, err) require.Equal(t, processedData[i].Series, got.Series) for _, key := range keys[i] { @@ -159,7 +160,7 @@ func TestBlockBuilder_RoundTrip(t *testing.T) { for j := 0; j < len(halfData); j++ { require.Equal(t, true, querier.Next(), "on iteration %d", j) got := querier.At() - blooms, err := Collect(got.Blooms) + blooms, err := iter.Collect(got.Blooms) require.Nil(t, err) require.Equal(t, halfData[j].Series, got.Series) for _, key := range halfKeys[j] { @@ -183,20 +184,20 @@ func TestBlockBuilder_RoundTrip(t *testing.T) { } } -func dedupedBlocks(blocks []PeekingIterator[*SeriesWithBlooms]) Iterator[*SeriesWithBlooms] { +func dedupedBlocks(blocks []iter.PeekIterator[*SeriesWithBlooms]) iter.Iterator[*SeriesWithBlooms] { orderedBlocks := NewHeapIterForSeriesWithBloom(blocks...) - return NewDedupingIter[*SeriesWithBlooms]( + return iter.NewDedupingIter[*SeriesWithBlooms]( func(a *SeriesWithBlooms, b *SeriesWithBlooms) bool { return a.Series.Fingerprint == b.Series.Fingerprint }, - Identity[*SeriesWithBlooms], + iter.Identity[*SeriesWithBlooms], func(a *SeriesWithBlooms, b *SeriesWithBlooms) *SeriesWithBlooms { if len(a.Series.Chunks) > len(b.Series.Chunks) { return a } return b }, - NewPeekingIter[*SeriesWithBlooms](orderedBlocks), + iter.NewPeekIter[*SeriesWithBlooms](orderedBlocks), ) } @@ -205,7 +206,7 @@ func TestMergeBuilder(t *testing.T) { nBlocks := 10 numSeries := 100 - blocks := make([]PeekingIterator[*SeriesWithBlooms], 0, nBlocks) + blocks := make([]iter.PeekIterator[*SeriesWithBlooms], 0, nBlocks) data, _ := MkBasicSeriesWithBlooms(numSeries, 0, 0xffff, 0, 10000) blockOpts := BlockOptions{ Schema: Schema{ @@ -237,14 +238,14 @@ func TestMergeBuilder(t *testing.T) { ) require.Nil(t, err) - itr := NewSliceIter[SeriesWithBlooms](data[min:max]) + itr := iter.NewSliceIter[SeriesWithBlooms](data[min:max]) _, err = builder.BuildFrom(itr) require.Nil(t, err) - blocks = append(blocks, NewPeekingIter[*SeriesWithBlooms](NewBlockQuerier(NewBlock(reader, NewMetrics(nil)), &mempool.SimpleHeapAllocator{}, DefaultMaxPageSize).Iter())) + blocks = append(blocks, iter.NewPeekIter[*SeriesWithBlooms](NewBlockQuerier(NewBlock(reader, NewMetrics(nil)), &mempool.SimpleHeapAllocator{}, DefaultMaxPageSize).Iter())) } // We're not testing the ability to extend a bloom in this test - pop := func(s *Series, srcBlooms SizedIterator[*Bloom], toAdd ChunkRefs, ch chan *BloomCreation) { + pop := func(s *Series, srcBlooms iter.SizedIterator[*Bloom], toAdd ChunkRefs, ch chan *BloomCreation) { for srcBlooms.Next() { bloom := srcBlooms.At() ch <- &BloomCreation{ @@ -257,8 +258,8 @@ func TestMergeBuilder(t *testing.T) { // storage should contain references to all the series we ingested, // regardless of block allocation/overlap. - storeItr := NewMapIter[SeriesWithBlooms, *Series]( - NewSliceIter[SeriesWithBlooms](data), + storeItr := iter.NewMapIter[SeriesWithBlooms, *Series]( + iter.NewSliceIter[SeriesWithBlooms](data), func(swb SeriesWithBlooms) *Series { return swb.Series }, @@ -288,7 +289,7 @@ func TestMergeBuilder(t *testing.T) { func(a, b *SeriesWithBlooms) { require.Equal(t, a.Series, b.Series, "expected %+v, got %+v", a, b) }, - NewSliceIter[*SeriesWithBlooms](PointerSlice(data)), + iter.NewSliceIter[*SeriesWithBlooms](PointerSlice(data)), querier.Iter(), ) } @@ -352,7 +353,7 @@ func TestMergeBuilderFingerprintCollision(t *testing.T) { } // We're not testing the ability to extend a bloom in this test - pop := func(s *Series, srcBlooms SizedIterator[*Bloom], toAdd ChunkRefs, ch chan *BloomCreation) { + pop := func(s *Series, srcBlooms iter.SizedIterator[*Bloom], toAdd ChunkRefs, ch chan *BloomCreation) { ch <- &BloomCreation{ Bloom: &Bloom{ ScalableBloomFilter: *filter.NewScalableBloomFilter(1024, 0.01, 0.8), @@ -363,8 +364,8 @@ func TestMergeBuilderFingerprintCollision(t *testing.T) { require.Nil(t, err) mergeBuilder := NewMergeBuilder( - NewEmptyIter[*SeriesWithBlooms](), - NewSliceIter(data), + iter.NewEmptyIter[*SeriesWithBlooms](), + iter.NewSliceIter(data), pop, NewMetrics(nil), ) @@ -414,7 +415,7 @@ func TestBlockReset(t *testing.T) { ) require.Nil(t, err) - itr := NewSliceIter[SeriesWithBlooms](data) + itr := iter.NewSliceIter[SeriesWithBlooms](data) _, err = builder.BuildFrom(itr) require.Nil(t, err) block := NewBlock(reader, NewMetrics(nil)) @@ -479,7 +480,7 @@ func TestMergeBuilder_Roundtrip(t *testing.T) { require.Nil(t, err) // each set of copies gets a different slice of the data minIdx, maxIdx := i*len(xs)/len(sets), (i+1)*len(xs)/len(sets) - itr := NewSliceIter[SeriesWithBlooms](xs[minIdx:maxIdx]) + itr := iter.NewSliceIter[SeriesWithBlooms](xs[minIdx:maxIdx]) _, err = builder.BuildFrom(itr) require.Nil(t, err) block := NewBlock(reader, NewMetrics(nil)) @@ -497,16 +498,16 @@ func TestMergeBuilder_Roundtrip(t *testing.T) { // we keep 2 copies of the data as iterators. One for the blocks, and one for the "store" // which will force it to reference the same series - var blocks []PeekingIterator[*SeriesWithBlooms] - var store []PeekingIterator[*SeriesWithBlooms] + var blocks []iter.PeekIterator[*SeriesWithBlooms] + var store []iter.PeekIterator[*SeriesWithBlooms] for _, x := range data { - blocks = append(blocks, NewPeekingIter[*SeriesWithBlooms](NewSliceIter[*SeriesWithBlooms](x))) - store = append(store, NewPeekingIter[*SeriesWithBlooms](NewSliceIter[*SeriesWithBlooms](x))) + blocks = append(blocks, iter.NewPeekIter[*SeriesWithBlooms](iter.NewSliceIter[*SeriesWithBlooms](x))) + store = append(store, iter.NewPeekIter[*SeriesWithBlooms](iter.NewSliceIter[*SeriesWithBlooms](x))) } orderedStore := NewHeapIterForSeriesWithBloom(store...) - dedupedStore := NewDedupingIter[*SeriesWithBlooms, *Series]( + dedupedStore := iter.NewDedupingIter[*SeriesWithBlooms, *Series]( func(a *SeriesWithBlooms, b *Series) bool { return a.Series.Fingerprint == b.Fingerprint }, @@ -519,11 +520,11 @@ func TestMergeBuilder_Roundtrip(t *testing.T) { } return b }, - NewPeekingIter[*SeriesWithBlooms](orderedStore), + iter.NewPeekIter[*SeriesWithBlooms](orderedStore), ) // We're not testing the ability to extend a bloom in this test - pop := func(s *Series, srcBlooms SizedIterator[*Bloom], toAdd ChunkRefs, ch chan *BloomCreation) { + pop := func(s *Series, srcBlooms iter.SizedIterator[*Bloom], toAdd ChunkRefs, ch chan *BloomCreation) { for srcBlooms.Next() { bloom := srcBlooms.At() ch <- &BloomCreation{ @@ -554,7 +555,7 @@ func TestMergeBuilder_Roundtrip(t *testing.T) { // ensure the new block contains one copy of all the data // by comparing it against an iterator over the source data mergedBlockQuerier := NewBlockQuerier(NewBlock(reader, NewMetrics(nil)), &mempool.SimpleHeapAllocator{}, DefaultMaxPageSize) - sourceItr := NewSliceIter[*SeriesWithBlooms](PointerSlice[SeriesWithBlooms](xs)) + sourceItr := iter.NewSliceIter[*SeriesWithBlooms](PointerSlice[SeriesWithBlooms](xs)) EqualIterators[*SeriesWithBlooms]( t, diff --git a/pkg/storage/bloom/v1/dedupe_test.go b/pkg/storage/bloom/v1/dedupe_test.go index e008bee6834c..8c4dd43629b5 100644 --- a/pkg/storage/bloom/v1/dedupe_test.go +++ b/pkg/storage/bloom/v1/dedupe_test.go @@ -4,6 +4,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + iter "github.com/grafana/loki/v3/pkg/iter/v2" ) func TestMergeDedupeIter(t *testing.T) { @@ -12,11 +14,11 @@ func TestMergeDedupeIter(t *testing.T) { numSeries = 100 data, _ = MkBasicSeriesWithBlooms(numSeries, 0, 0xffff, 0, 10000) dataPtr = PointerSlice(data) - queriers = make([]PeekingIterator[*SeriesWithBlooms], 4) + queriers = make([]iter.PeekIterator[*SeriesWithBlooms], 4) ) for i := 0; i < len(queriers); i++ { - queriers[i] = NewPeekingIter[*SeriesWithBlooms](NewSliceIter[*SeriesWithBlooms](dataPtr)) + queriers[i] = iter.NewPeekIter[*SeriesWithBlooms](iter.NewSliceIter[*SeriesWithBlooms](dataPtr)) } mbq := NewHeapIterForSeriesWithBloom(queriers...) @@ -26,11 +28,11 @@ func TestMergeDedupeIter(t *testing.T) { merge := func(a, _ *SeriesWithBlooms) *SeriesWithBlooms { return a } - deduper := NewDedupingIter[*SeriesWithBlooms, *SeriesWithBlooms]( + deduper := iter.NewDedupingIter[*SeriesWithBlooms, *SeriesWithBlooms]( eq, - Identity[*SeriesWithBlooms], + iter.Identity[*SeriesWithBlooms], merge, - NewPeekingIter[*SeriesWithBlooms](mbq), + iter.NewPeekIter[*SeriesWithBlooms](mbq), ) for i := 0; i < len(data); i++ { diff --git a/pkg/storage/bloom/v1/fuse.go b/pkg/storage/bloom/v1/fuse.go index 435299d12971..3e14e57c5806 100644 --- a/pkg/storage/bloom/v1/fuse.go +++ b/pkg/storage/bloom/v1/fuse.go @@ -9,6 +9,7 @@ import ( "github.com/prometheus/common/model" "go.uber.org/atomic" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/util/spanlogger" ) @@ -125,17 +126,17 @@ type Output struct { // Fuse combines multiple requests into a single loop iteration // over the data set and returns the corresponding outputs // TODO(owen-d): better async control -func (bq *BlockQuerier) Fuse(inputs []PeekingIterator[Request], logger log.Logger) *FusedQuerier { +func (bq *BlockQuerier) Fuse(inputs []iter.PeekIterator[Request], logger log.Logger) *FusedQuerier { return NewFusedQuerier(bq, inputs, logger) } type FusedQuerier struct { bq *BlockQuerier - inputs Iterator[[]Request] + inputs iter.Iterator[[]Request] logger log.Logger } -func NewFusedQuerier(bq *BlockQuerier, inputs []PeekingIterator[Request], logger log.Logger) *FusedQuerier { +func NewFusedQuerier(bq *BlockQuerier, inputs []iter.PeekIterator[Request], logger log.Logger) *FusedQuerier { heap := NewHeapIterator[Request]( func(a, b Request) bool { return a.Fp < b.Fp @@ -143,7 +144,7 @@ func NewFusedQuerier(bq *BlockQuerier, inputs []PeekingIterator[Request], logger inputs..., ) - merging := NewDedupingIter[Request, []Request]( + merging := iter.NewDedupingIter[Request, []Request]( func(a Request, b []Request) bool { return a.Fp == b[0].Fp }, @@ -151,7 +152,7 @@ func NewFusedQuerier(bq *BlockQuerier, inputs []PeekingIterator[Request], logger func(a Request, b []Request) []Request { return append(b, a) }, - NewPeekingIter[Request](heap), + iter.NewPeekIter[Request](heap), ) return &FusedQuerier{ bq: bq, diff --git a/pkg/storage/bloom/v1/fuse_test.go b/pkg/storage/bloom/v1/fuse_test.go index 745981965893..47c9348b3fe5 100644 --- a/pkg/storage/bloom/v1/fuse_test.go +++ b/pkg/storage/bloom/v1/fuse_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" "github.com/grafana/loki/v3/pkg/chunkenc" + v2 "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/storage/bloom/v1/filter" "github.com/grafana/loki/v3/pkg/util/mempool" ) @@ -31,8 +32,8 @@ type fakeNgramBuilder struct{} func (f fakeNgramBuilder) N() int { return 4 } func (f fakeNgramBuilder) SkipFactor() int { return 0 } -func (f fakeNgramBuilder) Tokens(line string) Iterator[[]byte] { - return NewSliceIter[[]byte]([][]byte{[]byte(line)}) +func (f fakeNgramBuilder) Tokens(line string) v2.Iterator[[]byte] { + return v2.NewSliceIter[[]byte]([][]byte{[]byte(line)}) } func keysToBloomTest(keys [][]byte) BloomTest { @@ -66,7 +67,7 @@ func TestFusedQuerier(t *testing.T) { writer, ) require.Nil(t, err) - itr := NewSliceIter[SeriesWithBlooms](data) + itr := v2.NewSliceIter[SeriesWithBlooms](data) _, err = builder.BuildFrom(itr) require.NoError(t, err) require.False(t, itr.Next()) @@ -95,9 +96,9 @@ func TestFusedQuerier(t *testing.T) { resChans = append(resChans, ch) } - var itrs []PeekingIterator[Request] + var itrs []v2.PeekIterator[Request] for _, reqs := range inputs { - itrs = append(itrs, NewPeekingIter[Request](NewSliceIter[Request](reqs))) + itrs = append(itrs, v2.NewPeekIter[Request](v2.NewSliceIter[Request](reqs))) } resps := make([][]Output, nReqs) @@ -189,10 +190,10 @@ func TestFuseMultiPage(t *testing.T) { b2.Add(key2) b2.Add(append(buf[:prefixLn], key2...)) - _, err = builder.BuildFrom(NewSliceIter([]SeriesWithBlooms{ + _, err = builder.BuildFrom(v2.NewSliceIter([]SeriesWithBlooms{ { series, - NewSliceIter([]*Bloom{ + v2.NewSliceIter([]*Bloom{ b1, b2, }), }, @@ -231,8 +232,8 @@ func TestFuseMultiPage(t *testing.T) { } fused := querier.Fuse( - []PeekingIterator[Request]{ - NewPeekingIter(NewSliceIter(reqs)), + []v2.PeekIterator[Request]{ + v2.NewPeekIter(v2.NewSliceIter(reqs)), }, log.NewNopLogger(), ) @@ -300,7 +301,7 @@ func TestLazyBloomIter_Seek_ResetError(t *testing.T) { data = append(data, SeriesWithBlooms{ Series: &series, - Blooms: NewSliceIter([]*Bloom{&bloom}), + Blooms: v2.NewSliceIter([]*Bloom{&bloom}), }) } @@ -316,7 +317,7 @@ func TestLazyBloomIter_Seek_ResetError(t *testing.T) { writer, ) require.Nil(t, err) - itr := NewSliceIter[SeriesWithBlooms](data) + itr := v2.NewSliceIter[SeriesWithBlooms](data) _, err = builder.BuildFrom(itr) require.NoError(t, err) require.False(t, itr.Next()) @@ -375,7 +376,7 @@ func setupBlockForBenchmark(b *testing.B) (*BlockQuerier, [][]Request, []chan Ou writer, ) require.Nil(b, err) - itr := NewSliceIter[SeriesWithBlooms](data) + itr := v2.NewSliceIter[SeriesWithBlooms](data) _, err = builder.BuildFrom(itr) require.Nil(b, err) block := NewBlock(reader, NewMetrics(nil)) @@ -432,12 +433,12 @@ func BenchmarkBlockQuerying(b *testing.B) { )) }() - var itrs []PeekingIterator[Request] + var itrs []v2.PeekIterator[Request] for i := 0; i < b.N; i++ { itrs = itrs[:0] for _, reqs := range requestChains { - itrs = append(itrs, NewPeekingIter[Request](NewSliceIter[Request](reqs))) + itrs = append(itrs, v2.NewPeekIter[Request](v2.NewSliceIter[Request](reqs))) } fused := querier.Fuse(itrs, log.NewNopLogger()) _ = fused.Run() diff --git a/pkg/storage/bloom/v1/index_querier.go b/pkg/storage/bloom/v1/index_querier.go index ef270551952b..7fdaa4617571 100644 --- a/pkg/storage/bloom/v1/index_querier.go +++ b/pkg/storage/bloom/v1/index_querier.go @@ -5,10 +5,12 @@ import ( "github.com/efficientgo/core/errors" "github.com/prometheus/common/model" + + iter "github.com/grafana/loki/v3/pkg/iter/v2" ) type SeriesIterator interface { - Iterator[*SeriesWithOffset] + iter.Iterator[*SeriesWithOffset] Reset() } diff --git a/pkg/storage/bloom/v1/iter.go b/pkg/storage/bloom/v1/iter.go deleted file mode 100644 index b1b460fb6420..000000000000 --- a/pkg/storage/bloom/v1/iter.go +++ /dev/null @@ -1,70 +0,0 @@ -package v1 - -type IndexedValue[T any] struct { - idx int - val T -} - -func (iv IndexedValue[T]) Value() T { - return iv.val -} - -func (iv IndexedValue[T]) Index() int { - return iv.idx -} - -type IterWithIndex[T any] struct { - Iterator[T] - zero T // zero value of T - cache IndexedValue[T] -} - -func (it *IterWithIndex[T]) At() IndexedValue[T] { - it.cache.val = it.Iterator.At() - return it.cache -} - -func NewIterWithIndex[T any](iter Iterator[T], idx int) Iterator[IndexedValue[T]] { - return &IterWithIndex[T]{ - Iterator: iter, - cache: IndexedValue[T]{idx: idx}, - } -} - -type SliceIterWithIndex[T any] struct { - xs []T // source slice - pos int // position within the slice - zero T // zero value of T - cache IndexedValue[T] -} - -func (it *SliceIterWithIndex[T]) Next() bool { - it.pos++ - return it.pos < len(it.xs) -} - -func (it *SliceIterWithIndex[T]) Err() error { - return nil -} - -func (it *SliceIterWithIndex[T]) At() IndexedValue[T] { - it.cache.val = it.xs[it.pos] - return it.cache -} - -func (it *SliceIterWithIndex[T]) Peek() (IndexedValue[T], bool) { - if it.pos+1 >= len(it.xs) { - it.cache.val = it.zero - return it.cache, false - } - it.cache.val = it.xs[it.pos+1] - return it.cache, true -} - -func NewSliceIterWithIndex[T any](xs []T, idx int) PeekingIterator[IndexedValue[T]] { - return &SliceIterWithIndex[T]{ - xs: xs, - pos: -1, - cache: IndexedValue[T]{idx: idx}, - } -} diff --git a/pkg/storage/bloom/v1/iter_test.go b/pkg/storage/bloom/v1/iter_test.go deleted file mode 100644 index 91604dd27868..000000000000 --- a/pkg/storage/bloom/v1/iter_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package v1 - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestSliceIterWithIndex(t *testing.T) { - t.Parallel() - t.Run("SliceIterWithIndex implements PeekingIterator interface", func(t *testing.T) { - xs := []string{"a", "b", "c"} - it := NewSliceIterWithIndex(xs, 123) - - // peek at first item - p, ok := it.Peek() - require.True(t, ok) - require.Equal(t, "a", p.val) - require.Equal(t, 123, p.idx) - - // proceed to first item - require.True(t, it.Next()) - require.Equal(t, "a", it.At().val) - require.Equal(t, 123, it.At().idx) - - // proceed to second and third item - require.True(t, it.Next()) - require.True(t, it.Next()) - - // peek at non-existing fourth item - p, ok = it.Peek() - require.False(t, ok) - require.Equal(t, "", p.val) // "" is zero value for type string - require.Equal(t, 123, p.idx) - }) -} diff --git a/pkg/storage/bloom/v1/merge.go b/pkg/storage/bloom/v1/merge.go index d89ca2a7d7f9..0e94d0d50640 100644 --- a/pkg/storage/bloom/v1/merge.go +++ b/pkg/storage/bloom/v1/merge.go @@ -1,11 +1,15 @@ package v1 +import ( + iter "github.com/grafana/loki/v3/pkg/iter/v2" +) + // HeapIterator is a heap implementation of BlockQuerier backed by multiple blocks // It is used to merge multiple blocks into a single ordered querier // NB(owen-d): it uses a custom heap implementation because Pop() only returns a single // value of the top-most iterator, rather than the iterator itself type HeapIterator[T any] struct { - itrs []PeekingIterator[T] + itrs []iter.PeekIterator[T] less func(T, T) bool zero T // zero value of T @@ -13,7 +17,7 @@ type HeapIterator[T any] struct { ok bool } -func NewHeapIterForSeriesWithBloom(queriers ...PeekingIterator[*SeriesWithBlooms]) *HeapIterator[*SeriesWithBlooms] { +func NewHeapIterForSeriesWithBloom(queriers ...iter.PeekIterator[*SeriesWithBlooms]) *HeapIterator[*SeriesWithBlooms] { return NewHeapIterator( func(a, b *SeriesWithBlooms) bool { return a.Series.Fingerprint < b.Series.Fingerprint @@ -22,7 +26,7 @@ func NewHeapIterForSeriesWithBloom(queriers ...PeekingIterator[*SeriesWithBlooms ) } -func NewHeapIterator[T any](less func(T, T) bool, itrs ...PeekingIterator[T]) *HeapIterator[T] { +func NewHeapIterator[T any](less func(T, T) bool, itrs ...iter.PeekIterator[T]) *HeapIterator[T] { res := &HeapIterator[T]{ itrs: itrs, less: less, @@ -65,7 +69,7 @@ func (mbq *HeapIterator[T]) At() T { return mbq.cache } -func (mbq *HeapIterator[T]) push(x PeekingIterator[T]) { +func (mbq *HeapIterator[T]) push(x iter.PeekIterator[T]) { mbq.itrs = append(mbq.itrs, x) mbq.up(mbq.Len() - 1) } diff --git a/pkg/storage/bloom/v1/merge_test.go b/pkg/storage/bloom/v1/merge_test.go index 259888ae064d..f57c629d7542 100644 --- a/pkg/storage/bloom/v1/merge_test.go +++ b/pkg/storage/bloom/v1/merge_test.go @@ -4,6 +4,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + v2 "github.com/grafana/loki/v3/pkg/iter/v2" ) func TestMergeBlockQuerier_NonOverlapping(t *testing.T) { @@ -11,7 +13,7 @@ func TestMergeBlockQuerier_NonOverlapping(t *testing.T) { var ( numSeries = 100 numQueriers = 4 - queriers []PeekingIterator[*SeriesWithBlooms] + queriers []v2.PeekIterator[*SeriesWithBlooms] data, _ = MkBasicSeriesWithBlooms(numSeries, 0, 0xffff, 0, 10000) ) for i := 0; i < numQueriers; i++ { @@ -19,7 +21,7 @@ func TestMergeBlockQuerier_NonOverlapping(t *testing.T) { for j := 0; j < numSeries/numQueriers; j++ { ptrs = append(ptrs, &data[i*numSeries/numQueriers+j]) } - queriers = append(queriers, NewPeekingIter[*SeriesWithBlooms](NewSliceIter[*SeriesWithBlooms](ptrs))) + queriers = append(queriers, v2.NewPeekIter[*SeriesWithBlooms](v2.NewSliceIter[*SeriesWithBlooms](ptrs))) } mbq := NewHeapIterForSeriesWithBloom(queriers...) @@ -38,14 +40,14 @@ func TestMergeBlockQuerier_Duplicate(t *testing.T) { var ( numSeries = 100 numQueriers = 2 - queriers []PeekingIterator[*SeriesWithBlooms] + queriers []v2.PeekIterator[*SeriesWithBlooms] data, _ = MkBasicSeriesWithBlooms(numSeries, 0, 0xffff, 0, 10000) ) for i := 0; i < numQueriers; i++ { queriers = append( queriers, - NewPeekingIter[*SeriesWithBlooms]( - NewSliceIter[*SeriesWithBlooms]( + v2.NewPeekIter[*SeriesWithBlooms]( + v2.NewSliceIter[*SeriesWithBlooms]( PointerSlice[SeriesWithBlooms](data), ), ), @@ -69,7 +71,7 @@ func TestMergeBlockQuerier_Overlapping(t *testing.T) { var ( numSeries = 100 numQueriers = 4 - queriers []PeekingIterator[*SeriesWithBlooms] + queriers []v2.PeekIterator[*SeriesWithBlooms] data, _ = MkBasicSeriesWithBlooms(numSeries, 0, 0xffff, 0, 10000) slices = make([][]*SeriesWithBlooms, numQueriers) ) @@ -77,7 +79,7 @@ func TestMergeBlockQuerier_Overlapping(t *testing.T) { slices[i%numQueriers] = append(slices[i%numQueriers], &data[i]) } for i := 0; i < numQueriers; i++ { - queriers = append(queriers, NewPeekingIter[*SeriesWithBlooms](NewSliceIter[*SeriesWithBlooms](slices[i]))) + queriers = append(queriers, v2.NewPeekIter[*SeriesWithBlooms](v2.NewSliceIter[*SeriesWithBlooms](slices[i]))) } mbq := NewHeapIterForSeriesWithBloom(queriers...) diff --git a/pkg/storage/bloom/v1/reader.go b/pkg/storage/bloom/v1/reader.go index d73ce38c1ca8..d402ee1fd971 100644 --- a/pkg/storage/bloom/v1/reader.go +++ b/pkg/storage/bloom/v1/reader.go @@ -7,12 +7,14 @@ import ( "path/filepath" "github.com/pkg/errors" + + iter "github.com/grafana/loki/v3/pkg/iter/v2" ) type BlockReader interface { Index() (io.ReadSeeker, error) Blooms() (io.ReadSeeker, error) - TarEntries() (Iterator[TarEntry], error) + TarEntries() (iter.Iterator[TarEntry], error) } // In memory reader @@ -32,7 +34,7 @@ func (r *ByteReader) Blooms() (io.ReadSeeker, error) { return bytes.NewReader(r.blooms.Bytes()), nil } -func (r *ByteReader) TarEntries() (Iterator[TarEntry], error) { +func (r *ByteReader) TarEntries() (iter.Iterator[TarEntry], error) { indexLn := r.index.Len() index, err := r.Index() if err != nil { @@ -56,7 +58,7 @@ func (r *ByteReader) TarEntries() (Iterator[TarEntry], error) { }, } - return NewSliceIter[TarEntry](entries), err + return iter.NewSliceIter[TarEntry](entries), err } // File reader @@ -110,7 +112,7 @@ func (r *DirectoryBlockReader) Blooms() (io.ReadSeeker, error) { return r.blooms, nil } -func (r *DirectoryBlockReader) TarEntries() (Iterator[TarEntry], error) { +func (r *DirectoryBlockReader) TarEntries() (iter.Iterator[TarEntry], error) { if !r.initialized { if err := r.Init(); err != nil { return nil, err @@ -140,5 +142,5 @@ func (r *DirectoryBlockReader) TarEntries() (Iterator[TarEntry], error) { }, } - return NewSliceIter[TarEntry](entries), nil + return iter.NewSliceIter[TarEntry](entries), nil } diff --git a/pkg/storage/bloom/v1/test_util.go b/pkg/storage/bloom/v1/test_util.go index 9c1fb6047497..4fbbfa8d7bc1 100644 --- a/pkg/storage/bloom/v1/test_util.go +++ b/pkg/storage/bloom/v1/test_util.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/grafana/loki/v3/pkg/chunkenc" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/storage/bloom/v1/filter" ) @@ -38,7 +39,7 @@ func MakeBlock(t testing.TB, nth int, fromFp, throughFp model.Fingerprint, fromT writer, ) require.Nil(t, err) - itr := NewSliceIter[SeriesWithBlooms](data) + itr := iter.NewSliceIter[SeriesWithBlooms](data) _, err = builder.BuildFrom(itr) require.Nil(t, err) block := NewBlock(reader, NewMetrics(nil)) @@ -55,7 +56,7 @@ type SeriesWithLiteralBlooms struct { func (s *SeriesWithLiteralBlooms) SeriesWithBlooms() SeriesWithBlooms { return SeriesWithBlooms{ Series: s.Series, - Blooms: NewSliceIter[*Bloom](s.Blooms), + Blooms: iter.NewSliceIter[*Bloom](s.Blooms), } } @@ -124,7 +125,7 @@ func MkBasicSeriesWithLiteralBlooms(nSeries int, fromFp, throughFp model.Fingerp return } -func EqualIterators[T any](t *testing.T, test func(a, b T), expected, actual Iterator[T]) { +func EqualIterators[T any](t *testing.T, test func(a, b T), expected, actual iter.Iterator[T]) { for expected.Next() { require.True(t, actual.Next()) a, b := expected.At(), actual.At() @@ -142,8 +143,8 @@ func EqualIterators[T any](t *testing.T, test func(a, b T), expected, actual Ite func CompareIterators[A, B any]( t *testing.T, f func(t *testing.T, a A, b B), - a Iterator[A], - b Iterator[B], + a iter.Iterator[A], + b iter.Iterator[B], ) { for a.Next() { require.True(t, b.Next()) diff --git a/pkg/storage/bloom/v1/tokenizer.go b/pkg/storage/bloom/v1/tokenizer.go index 131a1d057edf..dcd7c2146869 100644 --- a/pkg/storage/bloom/v1/tokenizer.go +++ b/pkg/storage/bloom/v1/tokenizer.go @@ -2,6 +2,8 @@ package v1 import ( "unicode/utf8" + + iter "github.com/grafana/loki/v3/pkg/iter/v2" ) const ( @@ -51,7 +53,7 @@ func NewNGramTokenizer(n, skip int) *NGramTokenizer { // Token implements the NGramBuilder interface // The Token iterator uses shared buffers for performance. The []byte returned by At() // is not safe for use after subsequent calls to Next() -func (t *NGramTokenizer) Tokens(line string) Iterator[[]byte] { +func (t *NGramTokenizer) Tokens(line string) iter.Iterator[[]byte] { return &NGramTokenIter{ n: t.N(), skip: t.SkipFactor(), @@ -108,17 +110,17 @@ type PrefixedTokenIter struct { buf []byte prefixLen int - Iterator[[]byte] + iter.Iterator[[]byte] } func (t *PrefixedTokenIter) At() []byte { return append(t.buf[:t.prefixLen], t.Iterator.At()...) } -func NewPrefixedTokenIter(buf []byte, prefixLn int, iter Iterator[[]byte]) *PrefixedTokenIter { +func NewPrefixedTokenIter(buf []byte, prefixLn int, itr iter.Iterator[[]byte]) *PrefixedTokenIter { return &PrefixedTokenIter{ buf: buf, prefixLen: prefixLn, - Iterator: iter, + Iterator: itr, } } diff --git a/pkg/storage/bloom/v1/util.go b/pkg/storage/bloom/v1/util.go index ae0a70453098..ec46d2633b7a 100644 --- a/pkg/storage/bloom/v1/util.go +++ b/pkg/storage/bloom/v1/util.go @@ -1,7 +1,6 @@ package v1 import ( - "context" "fmt" "hash" "hash/crc32" @@ -65,181 +64,6 @@ func (p *ChecksumPool) Put(h hash.Hash32) { p.Pool.Put(h) } -type Iterator[T any] interface { - Next() bool - Err() error - At() T -} - -type SizedIterator[T any] interface { - Iterator[T] - Remaining() int // remaining -} - -type PeekingIterator[T any] interface { - Peek() (T, bool) - Iterator[T] -} - -type PeekIter[T any] struct { - itr Iterator[T] - - // the first call to Next() will populate cur & next - init bool - zero T // zero value of T for returning empty Peek's - cur, next *T -} - -func NewPeekingIter[T any](itr Iterator[T]) *PeekIter[T] { - return &PeekIter[T]{itr: itr} -} - -// populates the first element so Peek can be used and subsequent Next() -// calls will work as expected -func (it *PeekIter[T]) ensureInit() { - if it.init { - return - } - if it.itr.Next() { - at := it.itr.At() - it.next = &at - } - it.init = true -} - -// load the next element and return the cached one -func (it *PeekIter[T]) cacheNext() { - it.cur = it.next - if it.cur != nil && it.itr.Next() { - at := it.itr.At() - it.next = &at - } else { - it.next = nil - } -} - -func (it *PeekIter[T]) Next() bool { - it.ensureInit() - it.cacheNext() - return it.cur != nil -} - -func (it *PeekIter[T]) Peek() (T, bool) { - it.ensureInit() - if it.next == nil { - return it.zero, false - } - return *it.next, true -} - -func (it *PeekIter[T]) Err() error { - return it.itr.Err() -} - -func (it *PeekIter[T]) At() T { - return *it.cur -} - -type SeekIter[K, V any] interface { - Seek(K) error - Iterator[V] -} - -type SliceIter[T any] struct { - cur int - xs []T -} - -func NewSliceIter[T any](xs []T) *SliceIter[T] { - return &SliceIter[T]{xs: xs, cur: -1} -} - -func (it *SliceIter[T]) Remaining() int { - return max(0, len(it.xs)-(it.cur+1)) -} - -func (it *SliceIter[T]) Next() bool { - it.cur++ - return it.cur < len(it.xs) -} - -func (it *SliceIter[T]) Err() error { - return nil -} - -func (it *SliceIter[T]) At() T { - return it.xs[it.cur] -} - -type MapIter[A any, B any] struct { - Iterator[A] - f func(A) B -} - -func NewMapIter[A any, B any](src Iterator[A], f func(A) B) *MapIter[A, B] { - return &MapIter[A, B]{Iterator: src, f: f} -} - -func (it *MapIter[A, B]) At() B { - return it.f(it.Iterator.At()) -} - -type EmptyIter[T any] struct { - zero T -} - -func (it *EmptyIter[T]) Next() bool { - return false -} - -func (it *EmptyIter[T]) Err() error { - return nil -} - -func (it *EmptyIter[T]) At() T { - return it.zero -} - -func (it *EmptyIter[T]) Peek() (T, bool) { - return it.zero, false -} - -func (it *EmptyIter[T]) Remaining() int { - return 0 -} - -// noop -func (it *EmptyIter[T]) Reset() {} - -func NewEmptyIter[T any]() *EmptyIter[T] { - return &EmptyIter[T]{} -} - -type CancellableIter[T any] struct { - ctx context.Context - Iterator[T] -} - -func (cii *CancellableIter[T]) Next() bool { - select { - case <-cii.ctx.Done(): - return false - default: - return cii.Iterator.Next() - } -} - -func (cii *CancellableIter[T]) Err() error { - if err := cii.ctx.Err(); err != nil { - return err - } - return cii.Iterator.Err() -} - -func NewCancelableIter[T any](ctx context.Context, itr Iterator[T]) *CancellableIter[T] { - return &CancellableIter[T]{ctx: ctx, Iterator: itr} -} - type NoopCloser struct { io.Writer } @@ -259,96 +83,3 @@ func PointerSlice[T any](xs []T) []*T { } return out } - -type CloseableIterator[T any] interface { - Iterator[T] - Close() error -} - -func NewCloseableIterator[T io.Closer](itr Iterator[T]) *CloseIter[T] { - return &CloseIter[T]{itr} -} - -type CloseIter[T io.Closer] struct { - Iterator[T] -} - -func (i *CloseIter[T]) Close() error { - return i.At().Close() -} - -type PeekingCloseableIterator[T any] interface { - PeekingIterator[T] - CloseableIterator[T] -} - -type PeekCloseIter[T any] struct { - *PeekIter[T] - close func() error -} - -func NewPeekCloseIter[T any](itr CloseableIterator[T]) *PeekCloseIter[T] { - return &PeekCloseIter[T]{PeekIter: NewPeekingIter[T](itr), close: itr.Close} -} - -func (it *PeekCloseIter[T]) Close() error { - return it.close() -} - -type ResettableIterator[T any] interface { - Reset() error - Iterator[T] -} - -type CloseableResettableIterator[T any] interface { - CloseableIterator[T] - ResettableIterator[T] -} - -type Predicate[T any] func(T) bool - -func NewFilterIter[T any](it Iterator[T], p Predicate[T]) *FilterIter[T] { - return &FilterIter[T]{ - Iterator: it, - match: p, - } -} - -type FilterIter[T any] struct { - Iterator[T] - match Predicate[T] -} - -func (i *FilterIter[T]) Next() bool { - hasNext := i.Iterator.Next() - for hasNext && !i.match(i.Iterator.At()) { - hasNext = i.Iterator.Next() - } - return hasNext -} - -type CounterIterator[T any] interface { - Iterator[T] - Count() int -} - -type CounterIter[T any] struct { - Iterator[T] // the underlying iterator - count int -} - -func NewCounterIter[T any](itr Iterator[T]) *CounterIter[T] { - return &CounterIter[T]{Iterator: itr} -} - -func (it *CounterIter[T]) Next() bool { - if it.Iterator.Next() { - it.count++ - return true - } - return false -} - -func (it *CounterIter[T]) Count() int { - return it.count -} diff --git a/pkg/storage/bloom/v1/versioned_builder.go b/pkg/storage/bloom/v1/versioned_builder.go index 8b262ee62e55..175d651dc460 100644 --- a/pkg/storage/bloom/v1/versioned_builder.go +++ b/pkg/storage/bloom/v1/versioned_builder.go @@ -1,6 +1,10 @@ package v1 -import "github.com/pkg/errors" +import ( + "github.com/pkg/errors" + + iter "github.com/grafana/loki/v3/pkg/iter/v2" +) /* Each binary format (version) has it's own builder. This provides type-safe way to build the binary format @@ -34,7 +38,7 @@ type V2Builder struct { type SeriesWithBlooms struct { Series *Series - Blooms SizedIterator[*Bloom] + Blooms iter.SizedIterator[*Bloom] } func NewBlockBuilderV2(opts BlockOptions, writer BlockWriter) (*V2Builder, error) { @@ -59,7 +63,7 @@ func NewBlockBuilderV2(opts BlockOptions, writer BlockWriter) (*V2Builder, error }, nil } -func (b *V2Builder) BuildFrom(itr Iterator[SeriesWithBlooms]) (uint32, error) { +func (b *V2Builder) BuildFrom(itr iter.Iterator[SeriesWithBlooms]) (uint32, error) { for itr.Next() { at := itr.At() var offsets []BloomOffset @@ -160,7 +164,7 @@ func NewBlockBuilderV1(opts BlockOptions, writer BlockWriter) (*V1Builder, error }, nil } -func (b *V1Builder) BuildFrom(itr Iterator[SeriesWithBloom]) (uint32, error) { +func (b *V1Builder) BuildFrom(itr iter.Iterator[SeriesWithBloom]) (uint32, error) { for itr.Next() { at := itr.At() offset, err := b.AddBloom(at.Bloom) diff --git a/pkg/storage/bloom/v1/versioned_builder_test.go b/pkg/storage/bloom/v1/versioned_builder_test.go index eca86ef7aaa1..4b1103f1bbda 100644 --- a/pkg/storage/bloom/v1/versioned_builder_test.go +++ b/pkg/storage/bloom/v1/versioned_builder_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/require" "github.com/grafana/loki/v3/pkg/chunkenc" + v2 "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/util/encoding" "github.com/grafana/loki/v3/pkg/util/mempool" ) @@ -47,8 +48,8 @@ func TestV1RoundTrip(t *testing.T) { b, err := NewBlockBuilderV1(opts, writer) require.NoError(t, err) - mapped := NewMapIter[SeriesWithLiteralBlooms]( - NewSliceIter(data), + mapped := v2.NewMapIter[SeriesWithLiteralBlooms]( + v2.NewSliceIter(data), func(s SeriesWithLiteralBlooms) SeriesWithBloom { return SeriesWithBloom{ Series: s.Series, @@ -68,7 +69,7 @@ func TestV1RoundTrip(t *testing.T) { t, func(t *testing.T, a SeriesWithLiteralBlooms, b *SeriesWithBlooms) { require.Equal(t, a.Series, b.Series) // ensure series equality - bs, err := Collect(b.Blooms) + bs, err := v2.Collect(b.Blooms) require.NoError(t, err) // ensure we only have one bloom in v1 @@ -81,7 +82,7 @@ func TestV1RoundTrip(t *testing.T) { require.Equal(t, encA.Get(), encB.Get()) }, - NewSliceIter(data), + v2.NewSliceIter(data), querier, ) } @@ -89,9 +90,9 @@ func TestV1RoundTrip(t *testing.T) { func TestV2Roundtrip(t *testing.T) { opts, data, writer, reader := setup(V2) - data, err := Collect( - NewMapIter[SeriesWithLiteralBlooms, SeriesWithLiteralBlooms]( - NewSliceIter(data), + data, err := v2.Collect( + v2.NewMapIter[SeriesWithLiteralBlooms, SeriesWithLiteralBlooms]( + v2.NewSliceIter(data), func(swlb SeriesWithLiteralBlooms) SeriesWithLiteralBlooms { return SeriesWithLiteralBlooms{ Series: swlb.Series, @@ -107,8 +108,8 @@ func TestV2Roundtrip(t *testing.T) { b, err := NewBlockBuilderV2(opts, writer) require.NoError(t, err) - mapped := NewMapIter[SeriesWithLiteralBlooms]( - NewSliceIter(data), + mapped := v2.NewMapIter[SeriesWithLiteralBlooms]( + v2.NewSliceIter(data), func(s SeriesWithLiteralBlooms) SeriesWithBlooms { return s.SeriesWithBlooms() }, @@ -125,7 +126,7 @@ func TestV2Roundtrip(t *testing.T) { t, func(t *testing.T, a SeriesWithLiteralBlooms, b *SeriesWithBlooms) { require.Equal(t, a.Series, b.Series) // ensure series equality - bs, err := Collect(b.Blooms) + bs, err := v2.Collect(b.Blooms) require.NoError(t, err) // ensure we only have one bloom in v1 @@ -141,7 +142,7 @@ func TestV2Roundtrip(t *testing.T) { encB.Reset() } }, - NewSliceIter(data), + v2.NewSliceIter(data), querier, ) } diff --git a/pkg/storage/lazy_chunk.go b/pkg/storage/lazy_chunk.go index 4c741228eee3..3ff1a3c89102 100644 --- a/pkg/storage/lazy_chunk.go +++ b/pkg/storage/lazy_chunk.go @@ -46,7 +46,7 @@ func (c *LazyChunk) Iterator( lokiChunk := c.Chunk.Data.(*chunkenc.Facade).LokiChunk() blocks := lokiChunk.Blocks(from, through) if len(blocks) == 0 { - return iter.NoopIterator, nil + return iter.NoopEntryIterator, nil } its := make([]iter.EntryIterator, 0, len(blocks)) @@ -126,7 +126,7 @@ func (c *LazyChunk) SampleIterator( lokiChunk := c.Chunk.Data.(*chunkenc.Facade).LokiChunk() blocks := lokiChunk.Blocks(from, through) if len(blocks) == 0 { - return iter.NoopIterator, nil + return iter.NoopSampleIterator, nil } its := make([]iter.SampleIterator, 0, len(blocks)) diff --git a/pkg/storage/store.go b/pkg/storage/store.go index 1900803637d7..db4a0a498e17 100644 --- a/pkg/storage/store.go +++ b/pkg/storage/store.go @@ -502,7 +502,7 @@ func (s *LokiStore) SelectLogs(ctx context.Context, req logql.SelectLogParams) ( } if len(lazyChunks) == 0 { - return iter.NoopIterator, nil + return iter.NoopEntryIterator, nil } expr, err := req.LogSelector() @@ -549,7 +549,7 @@ func (s *LokiStore) SelectSamples(ctx context.Context, req logql.SelectSamplePar } if len(lazyChunks) == 0 { - return iter.NoopIterator, nil + return iter.NoopSampleIterator, nil } expr, err := req.Expr() diff --git a/pkg/storage/store_test.go b/pkg/storage/store_test.go index 13bcaa9688a9..666ab241924a 100644 --- a/pkg/storage/store_test.go +++ b/pkg/storage/store_test.go @@ -126,7 +126,7 @@ func Benchmark_store_SelectSample(b *testing.B) { } for iter.Next() { - _ = iter.Sample() + _ = iter.At() sampleCount++ } iter.Close() @@ -168,7 +168,7 @@ func benchmarkStoreQuery(b *testing.B, query *logproto.QueryRequest) { for iter.Next() { j++ printHeap(b, false) - res = append(res, iter.Entry()) + res = append(res, iter.At()) // limit result like the querier would do. if j == query.Limit { break @@ -928,7 +928,7 @@ func Test_PipelineWrapper(t *testing.T) { } defer logit.Close() for logit.Next() { - require.NoError(t, logit.Error()) // consume the iterator + require.NoError(t, logit.Err()) // consume the iterator } require.Equal(t, "test-user", wrapper.tenant) @@ -959,7 +959,7 @@ func Test_PipelineWrapper_disabled(t *testing.T) { } defer logit.Close() for logit.Next() { - require.NoError(t, logit.Error()) // consume the iterator + require.NoError(t, logit.Err()) // consume the iterator } require.Equal(t, "", wrapper.tenant) @@ -1044,7 +1044,7 @@ func Test_SampleWrapper(t *testing.T) { } defer it.Close() for it.Next() { - require.NoError(t, it.Error()) // consume the iterator + require.NoError(t, it.Err()) // consume the iterator } require.Equal(t, "test-user", wrapper.tenant) @@ -1074,7 +1074,7 @@ func Test_SampleWrapper_disabled(t *testing.T) { } defer it.Close() for it.Next() { - require.NoError(t, it.Error()) // consume the iterator + require.NoError(t, it.Err()) // consume the iterator } require.Equal(t, "", wrapper.tenant) @@ -1656,13 +1656,13 @@ func Test_OverlappingChunks(t *testing.T) { } defer it.Close() require.True(t, it.Next()) - require.Equal(t, "4", it.Entry().Line) + require.Equal(t, "4", it.At().Line) require.True(t, it.Next()) - require.Equal(t, "3", it.Entry().Line) + require.Equal(t, "3", it.At().Line) require.True(t, it.Next()) - require.Equal(t, "2", it.Entry().Line) + require.Equal(t, "2", it.At().Line) require.True(t, it.Next()) - require.Equal(t, "1", it.Entry().Line) + require.Equal(t, "1", it.At().Line) require.False(t, it.Next()) } @@ -2094,7 +2094,7 @@ func TestQueryReferencingStructuredMetadata(t *testing.T) { }, } } - require.Equal(t, expectedEntry, it.Entry()) + require.Equal(t, expectedEntry, it.At()) } require.False(t, it.Next()) diff --git a/pkg/storage/stores/shipper/bloomshipper/cache.go b/pkg/storage/stores/shipper/bloomshipper/cache.go index eab363866e88..bf533dac25b4 100644 --- a/pkg/storage/stores/shipper/bloomshipper/cache.go +++ b/pkg/storage/stores/shipper/bloomshipper/cache.go @@ -11,6 +11,7 @@ import ( "github.com/grafana/dskit/multierror" "github.com/pkg/errors" + iter "github.com/grafana/loki/v3/pkg/iter/v2" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/chunk/cache" "github.com/grafana/loki/v3/pkg/util" @@ -32,11 +33,11 @@ func (c *CloseableBlockQuerier) Close() error { return err.Err() } -func (c *CloseableBlockQuerier) SeriesIter() (v1.PeekingIterator[*v1.SeriesWithBlooms], error) { +func (c *CloseableBlockQuerier) SeriesIter() (iter.PeekIterator[*v1.SeriesWithBlooms], error) { if err := c.Reset(); err != nil { return nil, err } - return v1.NewPeekingIter[*v1.SeriesWithBlooms](c.BlockQuerier.Iter()), nil + return iter.NewPeekIter[*v1.SeriesWithBlooms](c.BlockQuerier.Iter()), nil } func LoadBlocksDirIntoCache(paths []string, c Cache, logger log.Logger) error {