8000 Add code to handle pagination of parts. Fixes max layer size of 10GB bug by bainsy88 · Pull Request #2815 · distribution/distribution · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

Add code to handle pagination of parts. Fixes max layer size of 10GB bug #2815

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 16, 2021
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 16 additions & 7 deletions registry/storage/driver/s3-aws/s3.go
Original file line number Diff line number Diff line change
Expand Up @@ -549,9 +549,9 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read

// Writer returns a FileWriter which will store the content written to it
// at the location designated by "path" after the call to Commit.
func (d *driver) Writer(ctx context.Context, path string, a 8000 ppend bool) (storagedriver.FileWriter, error) {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Setting a variable with the name append overwrites the built-in function which meant I couldn't use it further down.

func (d *driver) Writer(ctx context.Context, path string, appendParam bool) (storagedriver.FileWriter, error) {
key := d.s3Path(path)
if !append {
if !appendParam {
// TODO (brianbland): cancel other uploads at this path
resp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{
Bucket: aws.String(d.Bucket),
Expand All @@ -574,7 +574,7 @@ func (d *driver) Writer(ctx context.Context, path string, append bool) (storaged
if err != nil {
return nil, parseError(path, err)
}

var allParts []*s3.Part
for _, multi := range resp.Uploads {
if key != *multi.Key {
continue
Expand All @@ -587,11 +587,20 @@ func (d *driver) Writer(ctx context.Context, path string, append bool) (storaged
if err != nil {
return nil, parseError(path, err)
}
var multiSize int64
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This multiSize was never used so is redundant code. The size is calculated by the calling function

for _, part := range resp.Parts {
multiSize += *part.Size
allParts = append(allParts, resp.Parts...)
for *resp.IsTruncated {
resp, err = d.S3.ListParts(&s3.ListPartsInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(key),
UploadId: multi.UploadId,
PartNumberMarker: resp.NextPartNumberMarker,
})
if err != nil {
return nil, parseError(path, err)
}
allParts = append(allParts, resp.Parts...)
}
return d.newWriter(key, *multi.UploadId, resp.Parts), nil
return d.newWriter(key, *multi.UploadId, allParts), nil
}
return nil, storagedriver.PathNotFoundError{Path: path}
}
Expand Down
0