Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rearrange concurrency control and error handling #191

Merged
merged 3 commits into from
Mar 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
144 changes: 61 additions & 83 deletions pkg/download/buffer.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,6 @@ import (
"net/http"
"strconv"

"golang.org/x/sync/errgroup"

"github.com/replicate/pget/pkg/client"
"github.com/replicate/pget/pkg/logging"
)
Expand All @@ -17,25 +15,17 @@ type BufferMode struct {
Client *client.HTTPClient
Options

// we use this errgroup as a semaphore (via sem.SetLimit())
sem *errgroup.Group
queue *workQueue
pool *bufferPool
queue *priorityWorkQueue
}

func GetBufferMode(opts Options) *BufferMode {
client := client.NewHTTPClient(opts.Client)
sem := new(errgroup.Group)
sem.SetLimit(opts.maxConcurrency())
queue := newWorkQueue(opts.maxConcurrency())
queue.start()
m := &BufferMode{
Client: client,
Options: opts,
sem: sem,
queue: queue,
}
m.pool = newBufferPool(m.chunkSize())
m.queue = newWorkQueue(opts.maxConcurrency(), m.chunkSize())
m.queue.start()
return m
}

Expand Down Expand Up @@ -64,42 +54,34 @@ type firstReqResult struct {
func (m *BufferMode) Fetch(ctx context.Context, url string) (io.Reader, int64, error) {
logger := logging.GetLogger()

br := newBufferedReader(m.pool)
firstChunk := newReaderPromise()

firstReqResultCh := make(chan firstReqResult)
m.queue.submit(func() {
m.sem.Go(func() error {
defer close(firstReqResultCh)
defer br.Done()
firstChunkResp, err := m.DoRequest(ctx, 0, m.chunkSize()-1, url)
if err != nil {
firstReqResultCh <- firstReqResult{err: err}
return err
}

defer firstChunkResp.Body.Close()

trueURL := firstChunkResp.Request.URL.String()
if trueURL != url {
logger.Info().Str("url", url).Str("redirect_url", trueURL).Msg("Redirect")
}

fileSize, err := m.getFileSizeFromContentRange(firstChunkResp.Header.Get("Content-Range"))
if err != nil {
firstReqResultCh <- firstReqResult{err: err}
return err
}
firstReqResultCh <- firstReqResult{fileSize: fileSize, trueURL: trueURL}

contentLength := firstChunkResp.ContentLength
n, err := br.ReadFrom(firstChunkResp.Body)
if err != nil {
return err
} else if n != contentLength {
return ErrContentLengthMismatch{downloadedBytes: n, contentLength: contentLength}
}
return nil
})
m.queue.submitLow(func(buf []byte) {
defer close(firstReqResultCh)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think I probably need to sit down with you to really grok what this code is doing, but there seems to be a pretty significant change between the old code and the new code, which is that the enqueued "work" items here actually executed in parallel before (because m.sem.Go returns immediately), whereas now they execute serially. Doesn't that matter?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

let's discuss.

firstChunkResp, err := m.DoRequest(ctx, 0, m.chunkSize()-1, url)
if err != nil {
firstReqResultCh <- firstReqResult{err: err}
return
}

defer firstChunkResp.Body.Close()

trueURL := firstChunkResp.Request.URL.String()
if trueURL != url {
logger.Info().Str("url", url).Str("redirect_url", trueURL).Msg("Redirect")
}

fileSize, err := m.getFileSizeFromContentRange(firstChunkResp.Header.Get("Content-Range"))
if err != nil {
firstReqResultCh <- firstReqResult{err: err}
return
}
firstReqResultCh <- firstReqResult{fileSize: fileSize, trueURL: trueURL}

contentLength := firstChunkResp.ContentLength
n, err := io.ReadFull(firstChunkResp.Body, buf[0:contentLength])
firstChunk.Deliver(buf[0:n], err)
})

firstReqResult, ok := <-firstReqResultCh
Expand All @@ -116,62 +98,58 @@ func (m *BufferMode) Fetch(ctx context.Context, url string) (io.Reader, int64, e

if fileSize <= m.chunkSize() {
// we only need a single chunk: just download it and finish
return br, fileSize, nil
return firstChunk, fileSize, nil
}

remainingBytes := fileSize - m.chunkSize()
// integer divide rounding up
numChunks := int((remainingBytes-1)/m.chunkSize() + 1)

readersCh := make(chan io.Reader, numChunks+1)
readersCh <- br
chunks := make([]io.Reader, numChunks+1)
chunks[0] = firstChunk

startOffset := m.chunkSize()

m.queue.submit(func() {
defer close(readersCh)
logger.Debug().Str("url", url).
Int64("size", fileSize).
Int("connections", numChunks).
Int64("chunkSize", m.chunkSize()).
Msg("Downloading")

for i := 0; i < numChunks; i++ {
start := startOffset + m.chunkSize()*int64(i)
end := start + m.chunkSize() - 1
logger.Debug().Str("url", url).
Int64("size", fileSize).
Int("connections", numChunks).
Int64("chunkSize", m.chunkSize()).
Msg("Downloading")

if i == numChunks-1 {
end = fileSize - 1
}

br := newBufferedReader(m.pool)
readersCh <- br
for i := 0; i < numChunks; i++ {
chunk := newReaderPromise()
chunks[i+1] = chunk
}
go func(chunks []io.Reader) {
for i, reader := range chunks {
chunk := reader.(*readerPromise)
m.queue.submitHigh(func(buf []byte) {
start := startOffset + m.chunkSize()*int64(i)
end := start + m.chunkSize() - 1

if i == numChunks-1 {
end = fileSize - 1
}
logger.Debug().Str("url", url).
Int64("size", fileSize).
Int("chunk", i).
Msg("Downloading chunk")

m.sem.Go(func() error {
defer br.Done()
resp, err := m.DoRequest(ctx, start, end, trueURL)
if err != nil {
return err
chunk.Deliver(nil, err)
return
}
defer resp.Body.Close()

contentLength := resp.ContentLength
n, err := br.ReadFrom(resp.Body)
if err != nil {
return err
} else if n != contentLength {
return ErrContentLengthMismatch{downloadedBytes: n, contentLength: contentLength}
}
return nil
n, err := io.ReadFull(resp.Body, buf[0:contentLength])
chunk.Deliver(buf[0:n], err)
})
}
})

return newChanMultiReader(readersCh), fileSize, nil
}
}(chunks[1:])

func (m *BufferMode) Wait() error {
return m.sem.Wait()
return io.MultiReader(chunks...), fileSize, nil
}

func (m *BufferMode) DoRequest(ctx context.Context, start, end int64, trueURL string) (*http.Response, error) {
Expand Down
9 changes: 3 additions & 6 deletions pkg/download/buffer_unit_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,16 +117,14 @@ func TestFileToBufferChunkCountExceedsMaxChunks(t *testing.T) {
require.NoError(t, err)
data, err := io.ReadAll(download)
assert.NoError(t, err)
err = bufferMode.Wait()
assert.NoError(t, err)
assert.Equal(t, contentSize, size)
assert.Equal(t, len(content), len(data))
assert.Equal(t, content, data)
})
}
}

func TestWaitReturnsErrorWhenRequestFails(t *testing.T) {
func TestReaderReturnsErrorWhenRequestFails(t *testing.T) {
mockTransport := httpmock.NewMockTransport()
opts := Options{
Client: client.Options{Transport: mockTransport},
Expand Down Expand Up @@ -160,8 +158,7 @@ func TestWaitReturnsErrorWhenRequestFails(t *testing.T) {
download, _, err := bufferMode.Fetch(context.Background(), "http://test.example/hello.txt")
// No error here, because the first chunk was fetched successfully
require.NoError(t, err)
// the read might or might not return an error
_, _ = io.ReadAll(download)
err = bufferMode.Wait()
// the read should return any error we expect
_, err = io.ReadAll(download)
assert.ErrorIs(t, err, expectedErr)
}
104 changes: 0 additions & 104 deletions pkg/download/buffered_reader.go

This file was deleted.

Loading