seaweedfs/weed/s3api/s3api_objects_list_handlers.go

386 lines
11 KiB
Go
Raw Normal View History

2018-07-22 16:14:36 +08:00
package s3api
import (
2018-07-22 16:15:11 +08:00
"context"
"encoding/xml"
2018-07-22 16:15:11 +08:00
"fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
2019-12-13 16:22:37 +08:00
"io"
2018-07-22 16:14:36 +08:00
"net/http"
"net/url"
"path/filepath"
2018-07-22 16:15:11 +08:00
"strconv"
2019-07-09 03:37:20 +08:00
"strings"
2018-07-22 16:14:36 +08:00
"time"
2018-07-24 16:38:08 +08:00
2020-09-01 15:21:19 +08:00
"github.com/chrislusf/seaweedfs/weed/filer"
2018-07-24 16:38:08 +08:00
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
2020-10-29 16:05:40 +08:00
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
2018-07-22 16:14:36 +08:00
)
type ListBucketResultV2 struct {
2020-08-30 13:28:33 +08:00
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
Name string `xml:"Name"`
Prefix string `xml:"Prefix"`
MaxKeys int `xml:"MaxKeys"`
Delimiter string `xml:"Delimiter,omitempty"`
IsTruncated bool `xml:"IsTruncated"`
Contents []ListEntry `xml:"Contents,omitempty"`
CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"`
ContinuationToken string `xml:"ContinuationToken,omitempty"`
NextContinuationToken string `xml:"NextContinuationToken,omitempty"`
KeyCount int `xml:"KeyCount"`
StartAfter string `xml:"StartAfter,omitempty"`
}
2018-07-22 17:04:07 +08:00
func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
// https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html
// collect parameters
2021-12-07 15:15:48 +08:00
bucket, _ := xhttp.GetBucketAndObject(r)
2021-09-19 15:18:59 +08:00
glog.V(3).Infof("ListObjectsV2Handler %s", bucket)
2018-07-23 16:55:26 +08:00
originalPrefix, continuationToken, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query())
2018-07-22 17:04:07 +08:00
if maxKeys < 0 {
2021-11-01 09:05:34 +08:00
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)
2018-07-22 17:04:07 +08:00
return
}
if delimiter != "" && delimiter != "/" {
2021-11-01 09:05:34 +08:00
s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)
2018-07-22 17:04:07 +08:00
return
}
marker := continuationToken
if continuationToken == "" {
2018-07-22 17:04:07 +08:00
marker = startAfter
}
2020-08-01 01:08:30 +08:00
response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)
2018-07-22 17:04:07 +08:00
if err != nil {
2021-11-01 09:05:34 +08:00
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
2018-07-22 17:04:07 +08:00
return
}
if len(response.Contents) == 0 {
if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {
2021-11-01 09:05:34 +08:00
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
return
}
}
responseV2 := &ListBucketResultV2{
XMLName: response.XMLName,
Name: response.Name,
CommonPrefixes: response.CommonPrefixes,
Contents: response.Contents,
ContinuationToken: continuationToken,
Delimiter: response.Delimiter,
IsTruncated: response.IsTruncated,
KeyCount: len(response.Contents) + len(response.CommonPrefixes),
MaxKeys: response.MaxKeys,
NextContinuationToken: response.NextMarker,
Prefix: response.Prefix,
StartAfter: startAfter,
}
2018-07-22 17:04:07 +08:00
2021-11-01 09:02:08 +08:00
writeSuccessResponseXML(w, r, responseV2)
2018-07-22 17:04:07 +08:00
}
2018-07-22 16:14:36 +08:00
func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
// collect parameters
2021-12-07 15:15:48 +08:00
bucket, _ := xhttp.GetBucketAndObject(r)
2021-09-19 15:18:59 +08:00
glog.V(3).Infof("ListObjectsV1Handler %s", bucket)
2018-07-22 16:14:36 +08:00
originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query())
if maxKeys < 0 {
2021-11-01 09:05:34 +08:00
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)
2018-07-22 16:14:36 +08:00
return
}
if delimiter != "" && delimiter != "/" {
2021-11-01 09:05:34 +08:00
s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)
2018-07-22 17:04:07 +08:00
return
}
2020-08-01 01:08:30 +08:00
response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)
2018-07-22 17:04:07 +08:00
if err != nil {
2021-11-01 09:05:34 +08:00
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
2018-07-22 17:04:07 +08:00
return
2018-07-22 16:14:36 +08:00
}
if len(response.Contents) == 0 {
if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {
2021-11-01 09:05:34 +08:00
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
return
}
}
2021-11-01 09:02:08 +08:00
writeSuccessResponseXML(w, r, response)
2018-07-22 17:04:07 +08:00
}
func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, maxKeys int, marker string, delimiter string) (response ListBucketResult, err error) {
2018-07-22 16:14:36 +08:00
// convert full path prefix into directory name and prefix for entry name
reqDir, prefix := filepath.Split(originalPrefix)
if strings.HasPrefix(reqDir, "/") {
reqDir = reqDir[1:]
2019-07-09 03:37:20 +08:00
}
bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket)
reqDir = fmt.Sprintf("%s%s", bucketPrefix, reqDir)
if strings.HasSuffix(reqDir, "/") {
// remove trailing "/"
reqDir = reqDir[:len(reqDir)-1]
}
var contents []ListEntry
var commonPrefixes []PrefixEntry
var isTruncated bool
var doErr error
var nextMarker string
2018-07-22 16:14:36 +08:00
// check filer
2020-03-23 14:52:55 +08:00
err = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
2018-07-22 16:14:36 +08:00
_, isTruncated, nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, maxKeys, marker, delimiter, func(dir string, entry *filer_pb.Entry) {
2018-07-22 16:14:36 +08:00
if entry.IsDirectory {
if delimiter == "/" {
commonPrefixes = append(commonPrefixes, PrefixEntry{
Prefix: fmt.Sprintf("%s/%s/", dir, entry.Name)[len(bucketPrefix):],
})
}
2018-07-22 16:14:36 +08:00
} else {
storageClass := "STANDARD"
2020-10-29 16:05:40 +08:00
if v, ok := entry.Extended[xhttp.AmzStorageClass]; ok {
storageClass = string(v)
}
2019-02-27 16:21:37 +08:00
contents = append(contents, ListEntry{
Key: fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):],
2020-06-19 15:26:40 +08:00
LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),
2020-09-01 15:21:19 +08:00
ETag: "\"" + filer.ETag(entry) + "\"",
Size: int64(filer.FileSize(entry)),
2019-02-27 16:21:37 +08:00
Owner: CanonicalUser{
ID: fmt.Sprintf("%x", entry.Attributes.Uid),
DisplayName: entry.Attributes.UserName,
2018-07-22 16:14:36 +08:00
},
StorageClass: StorageClass(storageClass),
2018-07-22 16:14:36 +08:00
})
}
})
if doErr != nil {
return doErr
}
2019-12-13 16:22:37 +08:00
if !isTruncated {
nextMarker = ""
2018-07-22 16:14:36 +08:00
}
2019-02-27 16:21:37 +08:00
response = ListBucketResult{
Name: bucket,
Prefix: originalPrefix,
Marker: marker,
NextMarker: nextMarker,
2019-02-27 16:21:37 +08:00
MaxKeys: maxKeys,
2020-08-10 00:09:35 +08:00
Delimiter: delimiter,
2019-02-27 16:21:37 +08:00
IsTruncated: isTruncated,
2018-07-23 16:55:26 +08:00
Contents: contents,
CommonPrefixes: commonPrefixes,
2018-07-22 16:14:36 +08:00
}
return nil
})
2018-07-22 17:04:07 +08:00
return
}
2018-07-22 16:14:36 +08:00
func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, maxKeys int, marker, delimiter string, eachEntryFn func(dir string, entry *filer_pb.Entry)) (counter int, isTruncated bool, nextMarker string, err error) {
// invariants
// prefix and marker should be under dir, marker may contain "/"
// maxKeys should be updated for each recursion
if prefix == "/" && delimiter == "/" {
return
}
if maxKeys <= 0 {
return
}
if strings.Contains(marker, "/") {
sepIndex := strings.Index(marker, "/")
subDir, subMarker := marker[0:sepIndex], marker[sepIndex+1:]
// println("doListFilerEntries dir", dir+"/"+subDir, "subMarker", subMarker, "maxKeys", maxKeys)
subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+subDir, "", maxKeys, subMarker, delimiter, eachEntryFn)
if subErr != nil {
err = subErr
return
}
counter += subCounter
isTruncated = isTruncated || subIsTruncated
maxKeys -= subCounter
nextMarker = subDir + "/" + subNextMarker
// finished processing this sub directory
marker = subDir
}
if maxKeys <= 0 {
return
}
// now marker is also a direct child of dir
request := &filer_pb.ListEntriesRequest{
Directory: dir,
Prefix: prefix,
Limit: uint32(maxKeys + 1),
StartFromFileName: marker,
InclusiveStartFrom: false,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, listErr := client.ListEntries(ctx, request)
if listErr != nil {
err = fmt.Errorf("list entires %+v: %v", request, listErr)
return
}
for {
resp, recvErr := stream.Recv()
if recvErr != nil {
if recvErr == io.EOF {
break
} else {
err = fmt.Errorf("iterating entires %+v: %v", request, recvErr)
return
}
}
if counter >= maxKeys {
isTruncated = true
return
}
entry := resp.Entry
nextMarker = entry.Name
if entry.IsDirectory {
2020-08-10 05:42:25 +08:00
// println("ListEntries", dir, "dir:", entry.Name)
if entry.Name != ".uploads" { // FIXME no need to apply to all directories. this extra also affects maxKeys
if delimiter != "/" {
eachEntryFn(dir, entry)
// println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter)
subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", maxKeys-counter, "", delimiter, eachEntryFn)
if subErr != nil {
err = fmt.Errorf("doListFilerEntries2: %v", subErr)
return
}
// println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter, "subCounter", subCounter, "subNextMarker", subNextMarker, "subIsTruncated", subIsTruncated)
counter += subCounter
nextMarker = entry.Name + "/" + subNextMarker
if subIsTruncated {
isTruncated = true
return
}
} else {
var isEmpty bool
2020-12-28 02:50:27 +08:00
if !s3a.option.AllowEmptyFolder {
if isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil {
glog.Errorf("check empty folder %s: %v", dir, err)
}
}
2020-12-13 05:25:19 +08:00
if !isEmpty {
eachEntryFn(dir, entry)
counter++
}
}
}
} else {
// println("ListEntries", dir, "file:", entry.Name)
eachEntryFn(dir, entry)
counter++
}
}
return
}
2018-07-22 17:04:07 +08:00
func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimiter string, fetchOwner bool, maxkeys int) {
prefix = values.Get("prefix")
token = values.Get("continuation-token")
startAfter = values.Get("start-after")
delimiter = values.Get("delimiter")
if values.Get("max-keys") != "" {
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
} else {
maxkeys = maxObjectListSizeLimit
}
fetchOwner = values.Get("fetch-owner") == "true"
return
2018-07-22 16:14:36 +08:00
}
func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int) {
prefix = values.Get("prefix")
marker = values.Get("marker")
delimiter = values.Get("delimiter")
if values.Get("max-keys") != "" {
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
} else {
maxkeys = maxObjectListSizeLimit
}
return
}
2020-12-13 05:25:19 +08:00
func (s3a *S3ApiServer) isDirectoryAllEmpty(filerClient filer_pb.SeaweedFilerClient, parentDir, name string) (isEmpty bool, err error) {
// println("+ isDirectoryAllEmpty", dir, name)
2020-12-24 17:52:06 +08:00
glog.V(4).Infof("+ isEmpty %s/%s", parentDir, name)
defer glog.V(4).Infof("- isEmpty %s/%s %v", parentDir, name, isEmpty)
2020-12-13 05:25:19 +08:00
var fileCounter int
var subDirs []string
currentDir := parentDir + "/" + name
var startFrom string
var isExhausted bool
var foundEntry bool
2020-12-24 06:34:59 +08:00
for fileCounter == 0 && !isExhausted && err == nil {
err = filer_pb.SeaweedList(filerClient, currentDir, "", func(entry *filer_pb.Entry, isLast bool) error {
foundEntry = true
if entry.IsDirectory {
subDirs = append(subDirs, entry.Name)
} else {
fileCounter++
}
startFrom = entry.Name
isExhausted = isExhausted || isLast
2020-12-24 17:52:06 +08:00
glog.V(4).Infof(" * %s/%s isLast: %t", currentDir, startFrom, isLast)
return nil
}, startFrom, false, 8)
if !foundEntry {
break
}
}
2020-12-13 05:25:19 +08:00
if err != nil {
return false, err
}
2020-12-13 05:25:19 +08:00
if fileCounter > 0 {
return false, nil
}
for _, subDir := range subDirs {
isSubEmpty, subErr := s3a.isDirectoryAllEmpty(filerClient, currentDir, subDir)
if subErr != nil {
return false, subErr
}
if !isSubEmpty {
return false, nil
}
}
glog.V(1).Infof("deleting empty folder %s", currentDir)
2020-12-13 05:25:19 +08:00
if err = doDeleteEntry(filerClient, parentDir, name, true, true); err != nil {
return
}
return true, nil
}