1
0
mirror of https://github.com/distribution/distribution synced 2024-12-25 15:05:51 +01:00

avoid appending directory as file path in s3 driver Walk (#4485)

This commit is contained in:
Milos Gajdos 2024-10-16 21:14:56 +01:00 committed by GitHub
commit bce9fcd135
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 91 additions and 0 deletions

@ -1178,6 +1178,16 @@ func (d *driver) doWalk(parentCtx context.Context, objectCount *int64, from stri
prevDir = dir
}
// in some cases the _uploads dir might be empty. when this happens, it would
// be appended twice to the walkInfos slice, once as [...]/_uploads and
// once more erroneously as [...]/_uploads/. the easiest way to avoid this is
// to skip appending filePath to walkInfos if it ends in "/". the loop through
// dirs will already have handled it in that case, so it's safe to continue this
// loop.
if strings.HasSuffix(filePath, "/") {
continue
}
walkInfos = append(walkInfos, storagedriver.FileInfoInternal{
FileInfoFields: storagedriver.FileInfoFields{
IsDir: false,
@ -1541,6 +1551,7 @@ func (w *writer) Write(p []byte) (int, error) {
func (w *writer) Size() int64 {
return w.size
}
func (w *writer) Close() error {
if w.closed {
return fmt.Errorf("already closed")

@ -514,6 +514,86 @@ func TestDelete(t *testing.T) {
}
}
func TestWalkEmptyUploadsDir(t *testing.T) {
skipCheck(t)
ctx := dcontext.Background()
drvr, err := s3DriverConstructor("s3walktest", s3.StorageClassStandard)
if err != nil {
t.Fatalf("unexpected error creating driver with standard storage: %v", err)
}
fileset := []string{
"/docker/registry/v2/blobs/sha256/04/046909",
"/docker/registry/v2/blobs/sha256/07/071a45",
"/docker/registry/v2/repositories/testns/testimg/_layers/sha256/2a43dc",
"/docker/registry/v2/repositories/testns/testimg/_layers/sha256/3ae7e8",
"/docker/registry/v2/repositories/testns/testimg/_manifests/revisions/sha256/3ae7e8",
"/docker/registry/v2/repositories/testns/testimg/_uploads/",
}
// create file structure matching fileset above.
// we use the s3 sdk directly because the driver doesn't allow creation
// of empty directories, which we need to simulate cases when purgeuploads
// leaves behind empty directories.
created := make([]string, 0, len(fileset))
d := drvr.baseEmbed.Base.StorageDriver.(*driver)
for _, p := range fileset {
_, err := d.S3.PutObjectWithContext(ctx, &s3.PutObjectInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(d.s3Path(p)),
ContentType: d.getContentType(),
ACL: d.getACL(),
ServerSideEncryption: d.getEncryptionMode(),
SSEKMSKeyId: d.getSSEKMSKeyID(),
StorageClass: d.getStorageClass(),
Body: bytes.NewReader([]byte("content " + p)),
})
if err != nil {
fmt.Printf("unable to create file %s: %s\n", p, err)
continue
}
created = append(created, p)
}
// use a custom cleanup here because we create an empty dir during this test's
// setup, and the regular driver.Delete will error when trying to delete it.
defer func() {
s3Objects := make([]*s3.ObjectIdentifier, 0, len(fileset))
for _, p := range created {
s3Objects = append(s3Objects, &s3.ObjectIdentifier{
Key: aws.String(d.s3Path(p)),
})
}
resp, err := d.S3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
Bucket: aws.String(d.Bucket),
Delete: &s3.Delete{
Objects: s3Objects,
Quiet: aws.Bool(false),
},
})
if err != nil {
t.Logf("DeleteObjectsWithContext resp: %+v", resp)
t.Fatalf("cleanup failed: %s", err)
}
}()
err = drvr.Walk(ctx, "/docker/registry/v2", func(fileInfo storagedriver.FileInfo) error {
// attempt to split filepath into dir and filename, just like purgeuploads would.
filePath := fileInfo.Path()
_, file := path.Split(filePath)
if len(file) == 0 {
t.Logf("fileInfo.Path(): %s", fileInfo.Path())
t.Fatalf("File part of fileInfo.Path() had zero length, this shouldn't happen.")
}
return nil
}, func(*storagedriver.WalkOptions) {})
if err != nil {
t.Fatalf("driver.Walk failed: %s", err)
}
}
func TestWalk(t *testing.T) {
skipCheck(t)