2018-07-22 16:14:36 +08:00
|
|
|
package s3api
|
|
|
|
|
|
|
|
import (
|
2018-07-22 16:15:11 +08:00
|
|
|
"context"
|
2020-08-10 05:35:53 +08:00
|
|
|
"encoding/xml"
|
2018-07-22 16:15:11 +08:00
|
|
|
"fmt"
|
2022-07-29 15:17:28 +08:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
|
2019-12-13 16:22:37 +08:00
|
|
|
"io"
|
2018-07-22 16:14:36 +08:00
|
|
|
"net/http"
|
|
|
|
"net/url"
|
2018-07-22 16:15:11 +08:00
|
|
|
"strconv"
|
2019-07-09 03:37:20 +08:00
|
|
|
"strings"
|
2018-07-22 16:14:36 +08:00
|
|
|
"time"
|
2018-07-24 16:38:08 +08:00
|
|
|
|
2022-07-29 15:17:28 +08:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/filer"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
|
2018-07-22 16:14:36 +08:00
|
|
|
)
|
|
|
|
|
2020-08-10 05:35:53 +08:00
|
|
|
type ListBucketResultV2 struct {
|
2020-08-30 13:28:33 +08:00
|
|
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
|
|
|
|
Name string `xml:"Name"`
|
|
|
|
Prefix string `xml:"Prefix"`
|
|
|
|
MaxKeys int `xml:"MaxKeys"`
|
|
|
|
Delimiter string `xml:"Delimiter,omitempty"`
|
|
|
|
IsTruncated bool `xml:"IsTruncated"`
|
|
|
|
Contents []ListEntry `xml:"Contents,omitempty"`
|
|
|
|
CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"`
|
|
|
|
ContinuationToken string `xml:"ContinuationToken,omitempty"`
|
|
|
|
NextContinuationToken string `xml:"NextContinuationToken,omitempty"`
|
|
|
|
KeyCount int `xml:"KeyCount"`
|
|
|
|
StartAfter string `xml:"StartAfter,omitempty"`
|
2020-08-10 05:35:53 +08:00
|
|
|
}
|
|
|
|
|
2018-07-22 17:04:07 +08:00
|
|
|
func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
|
|
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html
|
|
|
|
|
|
|
|
// collect parameters
|
2022-05-31 13:57:41 +08:00
|
|
|
bucket, _ := s3_constants.GetBucketAndObject(r)
|
2021-09-19 15:18:59 +08:00
|
|
|
glog.V(3).Infof("ListObjectsV2Handler %s", bucket)
|
2018-07-23 16:55:26 +08:00
|
|
|
|
2020-08-10 05:35:53 +08:00
|
|
|
originalPrefix, continuationToken, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query())
|
2018-07-22 17:04:07 +08:00
|
|
|
|
|
|
|
if maxKeys < 0 {
|
2021-11-01 09:05:34 +08:00
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)
|
2018-07-22 17:04:07 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-08-10 05:35:53 +08:00
|
|
|
marker := continuationToken
|
|
|
|
if continuationToken == "" {
|
2018-07-22 17:04:07 +08:00
|
|
|
marker = startAfter
|
|
|
|
}
|
|
|
|
|
2020-08-01 01:08:30 +08:00
|
|
|
response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)
|
2018-07-22 17:04:07 +08:00
|
|
|
|
|
|
|
if err != nil {
|
2021-11-01 09:05:34 +08:00
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
2018-07-22 17:04:07 +08:00
|
|
|
return
|
|
|
|
}
|
2021-05-03 12:30:37 +08:00
|
|
|
|
|
|
|
if len(response.Contents) == 0 {
|
|
|
|
if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {
|
2021-11-01 09:05:34 +08:00
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
|
2021-05-03 12:30:37 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-10 05:35:53 +08:00
|
|
|
responseV2 := &ListBucketResultV2{
|
|
|
|
XMLName: response.XMLName,
|
|
|
|
Name: response.Name,
|
|
|
|
CommonPrefixes: response.CommonPrefixes,
|
|
|
|
Contents: response.Contents,
|
|
|
|
ContinuationToken: continuationToken,
|
|
|
|
Delimiter: response.Delimiter,
|
|
|
|
IsTruncated: response.IsTruncated,
|
2021-01-11 19:50:45 +08:00
|
|
|
KeyCount: len(response.Contents) + len(response.CommonPrefixes),
|
2020-08-10 05:35:53 +08:00
|
|
|
MaxKeys: response.MaxKeys,
|
|
|
|
NextContinuationToken: response.NextMarker,
|
|
|
|
Prefix: response.Prefix,
|
|
|
|
StartAfter: startAfter,
|
|
|
|
}
|
2018-07-22 17:04:07 +08:00
|
|
|
|
2021-11-01 09:02:08 +08:00
|
|
|
writeSuccessResponseXML(w, r, responseV2)
|
2018-07-22 17:04:07 +08:00
|
|
|
}
|
|
|
|
|
2018-07-22 16:14:36 +08:00
|
|
|
func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
|
|
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
|
|
|
|
|
|
|
|
// collect parameters
|
2022-05-31 13:57:41 +08:00
|
|
|
bucket, _ := s3_constants.GetBucketAndObject(r)
|
2021-09-19 15:18:59 +08:00
|
|
|
glog.V(3).Infof("ListObjectsV1Handler %s", bucket)
|
2018-07-22 16:14:36 +08:00
|
|
|
|
|
|
|
originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query())
|
|
|
|
|
|
|
|
if maxKeys < 0 {
|
2021-11-01 09:05:34 +08:00
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)
|
2018-07-22 16:14:36 +08:00
|
|
|
return
|
|
|
|
}
|
2018-07-22 17:04:07 +08:00
|
|
|
|
2020-08-01 01:08:30 +08:00
|
|
|
response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)
|
2018-07-22 17:04:07 +08:00
|
|
|
|
|
|
|
if err != nil {
|
2021-11-01 09:05:34 +08:00
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
2018-07-22 17:04:07 +08:00
|
|
|
return
|
2018-07-22 16:14:36 +08:00
|
|
|
}
|
|
|
|
|
2021-05-03 12:30:37 +08:00
|
|
|
if len(response.Contents) == 0 {
|
|
|
|
if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {
|
2021-11-01 09:05:34 +08:00
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
|
2021-05-03 12:30:37 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-01 09:02:08 +08:00
|
|
|
writeSuccessResponseXML(w, r, response)
|
2018-07-22 17:04:07 +08:00
|
|
|
}
|
|
|
|
|
2022-08-15 15:30:19 +08:00
|
|
|
func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, maxKeys int, originalMarker string, delimiter string) (response ListBucketResult, err error) {
|
2018-07-22 16:14:36 +08:00
|
|
|
// convert full path prefix into directory name and prefix for entry name
|
2022-08-15 15:30:19 +08:00
|
|
|
requestDir, prefix, marker := normalizePrefixMarker(originalPrefix, originalMarker)
|
2020-08-20 03:07:43 +08:00
|
|
|
bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket)
|
2022-08-15 15:30:19 +08:00
|
|
|
reqDir := bucketPrefix[:len(bucketPrefix)-1]
|
|
|
|
if requestDir != "" {
|
|
|
|
reqDir = fmt.Sprintf("%s%s", bucketPrefix, requestDir)
|
2020-08-10 05:35:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
var contents []ListEntry
|
|
|
|
var commonPrefixes []PrefixEntry
|
|
|
|
var doErr error
|
|
|
|
var nextMarker string
|
2022-08-15 15:30:19 +08:00
|
|
|
cursor := &ListingCursor{
|
2023-04-12 14:52:35 +08:00
|
|
|
maxKeys: maxKeys,
|
2023-04-13 07:53:49 +08:00
|
|
|
prefixEndsOnDelimiter: strings.HasSuffix(originalPrefix, "/") && len(originalMarker) == 0,
|
2022-08-15 15:30:19 +08:00
|
|
|
}
|
2018-07-22 16:14:36 +08:00
|
|
|
|
|
|
|
// check filer
|
2021-12-26 16:15:03 +08:00
|
|
|
err = s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
2023-02-22 15:08:52 +08:00
|
|
|
for {
|
|
|
|
empty := true
|
|
|
|
nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, cursor, marker, delimiter, false, func(dir string, entry *filer_pb.Entry) {
|
|
|
|
empty = false
|
|
|
|
if entry.IsDirectory {
|
2023-04-13 07:53:49 +08:00
|
|
|
if entry.IsDirectoryKeyObject() {
|
2023-02-22 15:08:52 +08:00
|
|
|
contents = append(contents, ListEntry{
|
|
|
|
Key: fmt.Sprintf("%s/%s/", dir, entry.Name)[len(bucketPrefix):],
|
|
|
|
LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),
|
|
|
|
ETag: "\"" + filer.ETag(entry) + "\"",
|
|
|
|
Owner: CanonicalUser{
|
|
|
|
ID: fmt.Sprintf("%x", entry.Attributes.Uid),
|
|
|
|
DisplayName: entry.Attributes.UserName,
|
|
|
|
},
|
|
|
|
StorageClass: "STANDARD",
|
|
|
|
})
|
|
|
|
cursor.maxKeys--
|
2023-04-13 07:53:49 +08:00
|
|
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html
|
|
|
|
} else if delimiter == "/" { // A response can contain CommonPrefixes only if you specify a delimiter.
|
|
|
|
commonPrefixes = append(commonPrefixes, PrefixEntry{
|
|
|
|
Prefix: fmt.Sprintf("%s/%s/", dir, entry.Name)[len(bucketPrefix):],
|
|
|
|
})
|
|
|
|
//All of the keys (up to 1,000) rolled up into a common prefix count as a single return when calculating the number of returns.
|
|
|
|
cursor.maxKeys--
|
2023-02-22 15:08:52 +08:00
|
|
|
}
|
|
|
|
} else {
|
2024-03-06 23:24:44 +08:00
|
|
|
var delimiterFound bool
|
|
|
|
if delimiter != "" {
|
|
|
|
// keys that contain the same string between the prefix and the first occurrence of the delimiter are grouped together as a commonPrefix.
|
|
|
|
// extract the string between the prefix and the delimiter and add it to the commonPrefixes if it's unique.
|
2024-03-22 22:03:34 +08:00
|
|
|
undelimitedPath := fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):]
|
|
|
|
|
|
|
|
// take into account a prefix if supplied while delimiting.
|
|
|
|
undelimitedPath = strings.TrimPrefix(undelimitedPath, originalPrefix)
|
2024-03-06 23:24:44 +08:00
|
|
|
|
2024-03-22 22:03:34 +08:00
|
|
|
delimitedPath := strings.SplitN(undelimitedPath, delimiter, 2)
|
|
|
|
|
|
|
|
if len(delimitedPath) == 2 {
|
|
|
|
// S3 clients expect the delimited prefix to contain the delimiter and prefix.
|
|
|
|
delimitedPrefix := originalPrefix + delimitedPath[0] + delimiter
|
2024-03-06 23:24:44 +08:00
|
|
|
|
|
|
|
for i := range commonPrefixes {
|
|
|
|
if commonPrefixes[i].Prefix == delimitedPrefix {
|
|
|
|
delimiterFound = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !delimiterFound {
|
|
|
|
commonPrefixes = append(commonPrefixes, PrefixEntry{
|
|
|
|
Prefix: delimitedPrefix,
|
|
|
|
})
|
|
|
|
cursor.maxKeys--
|
|
|
|
delimiterFound = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !delimiterFound {
|
|
|
|
storageClass := "STANDARD"
|
|
|
|
if v, ok := entry.Extended[s3_constants.AmzStorageClass]; ok {
|
|
|
|
storageClass = string(v)
|
|
|
|
}
|
|
|
|
contents = append(contents, ListEntry{
|
|
|
|
Key: fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):],
|
|
|
|
LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),
|
|
|
|
ETag: "\"" + filer.ETag(entry) + "\"",
|
|
|
|
Size: int64(filer.FileSize(entry)),
|
|
|
|
Owner: CanonicalUser{
|
|
|
|
ID: fmt.Sprintf("%x", entry.Attributes.Uid),
|
|
|
|
DisplayName: entry.Attributes.UserName,
|
|
|
|
},
|
|
|
|
StorageClass: StorageClass(storageClass),
|
|
|
|
})
|
|
|
|
cursor.maxKeys--
|
2023-02-22 15:08:52 +08:00
|
|
|
}
|
2022-05-30 21:09:08 +08:00
|
|
|
}
|
2023-02-22 15:08:52 +08:00
|
|
|
})
|
|
|
|
if doErr != nil {
|
|
|
|
return doErr
|
2022-06-01 23:10:52 +08:00
|
|
|
}
|
2022-05-30 21:09:08 +08:00
|
|
|
|
2023-02-22 15:08:52 +08:00
|
|
|
if cursor.isTruncated {
|
|
|
|
if requestDir != "" {
|
|
|
|
nextMarker = requestDir + "/" + nextMarker
|
|
|
|
}
|
|
|
|
break
|
2023-04-12 14:52:35 +08:00
|
|
|
} else if empty || strings.HasSuffix(originalPrefix, "/") {
|
2023-02-22 15:08:52 +08:00
|
|
|
nextMarker = ""
|
|
|
|
break
|
|
|
|
} else {
|
|
|
|
// start next loop
|
|
|
|
marker = nextMarker
|
2022-08-15 15:30:19 +08:00
|
|
|
}
|
2022-06-07 17:43:10 +08:00
|
|
|
}
|
|
|
|
|
2019-02-27 16:21:37 +08:00
|
|
|
response = ListBucketResult{
|
|
|
|
Name: bucket,
|
|
|
|
Prefix: originalPrefix,
|
2022-08-15 15:30:19 +08:00
|
|
|
Marker: originalMarker,
|
2020-08-10 05:35:53 +08:00
|
|
|
NextMarker: nextMarker,
|
2019-02-27 16:21:37 +08:00
|
|
|
MaxKeys: maxKeys,
|
2020-08-10 00:09:35 +08:00
|
|
|
Delimiter: delimiter,
|
2022-08-15 15:30:19 +08:00
|
|
|
IsTruncated: cursor.isTruncated,
|
2018-07-23 16:55:26 +08:00
|
|
|
Contents: contents,
|
|
|
|
CommonPrefixes: commonPrefixes,
|
2018-07-22 16:14:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2018-07-22 17:04:07 +08:00
|
|
|
return
|
|
|
|
}
|
2018-07-22 16:14:36 +08:00
|
|
|
|
2022-08-15 15:30:19 +08:00
|
|
|
type ListingCursor struct {
|
2023-04-12 14:52:35 +08:00
|
|
|
maxKeys int
|
|
|
|
isTruncated bool
|
|
|
|
prefixEndsOnDelimiter bool
|
2022-08-15 15:30:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// the prefix and marker may be in different directories
|
|
|
|
// normalizePrefixMarker ensures the prefix and marker both starts from the same directory
|
|
|
|
func normalizePrefixMarker(prefix, marker string) (alignedDir, alignedPrefix, alignedMarker string) {
|
|
|
|
// alignedDir should not end with "/"
|
|
|
|
// alignedDir, alignedPrefix, alignedMarker should only have "/" in middle
|
2023-04-13 07:53:49 +08:00
|
|
|
if len(marker) == 0 {
|
|
|
|
prefix = strings.Trim(prefix, "/")
|
|
|
|
} else {
|
|
|
|
prefix = strings.TrimLeft(prefix, "/")
|
|
|
|
}
|
2022-08-15 15:30:19 +08:00
|
|
|
marker = strings.TrimLeft(marker, "/")
|
|
|
|
if prefix == "" {
|
|
|
|
return "", "", marker
|
|
|
|
}
|
|
|
|
if marker == "" {
|
|
|
|
alignedDir, alignedPrefix = toDirAndName(prefix)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !strings.HasPrefix(marker, prefix) {
|
|
|
|
// something wrong
|
|
|
|
return "", prefix, marker
|
|
|
|
}
|
|
|
|
if strings.HasPrefix(marker, prefix+"/") {
|
|
|
|
alignedDir = prefix
|
|
|
|
alignedPrefix = ""
|
|
|
|
alignedMarker = marker[len(alignedDir)+1:]
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
alignedDir, alignedPrefix = toDirAndName(prefix)
|
|
|
|
if alignedDir != "" {
|
|
|
|
alignedMarker = marker[len(alignedDir)+1:]
|
|
|
|
} else {
|
|
|
|
alignedMarker = marker
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2023-04-12 14:52:35 +08:00
|
|
|
|
2022-08-15 15:30:19 +08:00
|
|
|
func toDirAndName(dirAndName string) (dir, name string) {
|
|
|
|
sepIndex := strings.LastIndex(dirAndName, "/")
|
|
|
|
if sepIndex >= 0 {
|
|
|
|
dir, name = dirAndName[0:sepIndex], dirAndName[sepIndex+1:]
|
|
|
|
} else {
|
|
|
|
name = dirAndName
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2023-04-12 14:52:35 +08:00
|
|
|
|
2022-08-15 15:30:19 +08:00
|
|
|
func toParentAndDescendants(dirAndName string) (dir, name string) {
|
|
|
|
sepIndex := strings.Index(dirAndName, "/")
|
|
|
|
if sepIndex >= 0 {
|
|
|
|
dir, name = dirAndName[0:sepIndex], dirAndName[sepIndex+1:]
|
|
|
|
} else {
|
|
|
|
name = dirAndName
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, cursor *ListingCursor, marker, delimiter string, inclusiveStartFrom bool, eachEntryFn func(dir string, entry *filer_pb.Entry)) (nextMarker string, err error) {
|
2020-08-10 05:35:53 +08:00
|
|
|
// invariants
|
|
|
|
// prefix and marker should be under dir, marker may contain "/"
|
|
|
|
// maxKeys should be updated for each recursion
|
2023-04-12 14:52:35 +08:00
|
|
|
// glog.V(4).Infof("doListFilerEntries dir: %s, prefix: %s, marker %s, maxKeys: %d, prefixEndsOnDelimiter: %+v", dir, prefix, marker, cursor.maxKeys, cursor.prefixEndsOnDelimiter)
|
2020-08-10 05:35:53 +08:00
|
|
|
if prefix == "/" && delimiter == "/" {
|
|
|
|
return
|
|
|
|
}
|
2022-08-15 15:30:19 +08:00
|
|
|
if cursor.maxKeys <= 0 {
|
2020-08-10 05:35:53 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.Contains(marker, "/") {
|
2022-08-15 15:30:19 +08:00
|
|
|
subDir, subMarker := toParentAndDescendants(marker)
|
|
|
|
// println("doListFilerEntries dir", dir+"/"+subDir, "subMarker", subMarker)
|
|
|
|
subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+subDir, "", cursor, subMarker, delimiter, false, eachEntryFn)
|
|
|
|
if subErr != nil {
|
|
|
|
err = subErr
|
|
|
|
return
|
2020-08-10 05:35:53 +08:00
|
|
|
}
|
2022-08-15 15:30:19 +08:00
|
|
|
nextMarker = subDir + "/" + subNextMarker
|
2023-12-23 03:29:54 +08:00
|
|
|
// finished processing this subdirectory
|
2022-08-15 15:30:19 +08:00
|
|
|
marker = subDir
|
2020-08-10 05:35:53 +08:00
|
|
|
}
|
2022-10-25 12:15:37 +08:00
|
|
|
if cursor.isTruncated {
|
2021-11-08 04:39:36 +08:00
|
|
|
return
|
|
|
|
}
|
2020-08-10 05:35:53 +08:00
|
|
|
|
|
|
|
// now marker is also a direct child of dir
|
|
|
|
request := &filer_pb.ListEntriesRequest{
|
|
|
|
Directory: dir,
|
|
|
|
Prefix: prefix,
|
2022-08-15 15:30:19 +08:00
|
|
|
Limit: uint32(cursor.maxKeys + 2), // bucket root directory needs to skip additional s3_constants.MultipartUploadsFolder folder
|
2020-08-10 05:35:53 +08:00
|
|
|
StartFromFileName: marker,
|
2022-05-30 21:09:08 +08:00
|
|
|
InclusiveStartFrom: inclusiveStartFrom,
|
2020-08-10 05:35:53 +08:00
|
|
|
}
|
2023-04-12 14:52:35 +08:00
|
|
|
if cursor.prefixEndsOnDelimiter {
|
|
|
|
request.Limit = uint32(1)
|
|
|
|
}
|
2020-08-10 05:35:53 +08:00
|
|
|
|
2020-09-10 03:07:15 +08:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
stream, listErr := client.ListEntries(ctx, request)
|
2020-08-10 05:35:53 +08:00
|
|
|
if listErr != nil {
|
|
|
|
err = fmt.Errorf("list entires %+v: %v", request, listErr)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
resp, recvErr := stream.Recv()
|
|
|
|
if recvErr != nil {
|
|
|
|
if recvErr == io.EOF {
|
|
|
|
break
|
|
|
|
} else {
|
|
|
|
err = fmt.Errorf("iterating entires %+v: %v", request, recvErr)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2022-08-15 15:30:19 +08:00
|
|
|
if cursor.maxKeys <= 0 {
|
|
|
|
cursor.isTruncated = true
|
2020-08-10 05:35:53 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
entry := resp.Entry
|
2022-08-15 15:30:19 +08:00
|
|
|
nextMarker = entry.Name
|
2023-04-12 14:52:35 +08:00
|
|
|
if cursor.prefixEndsOnDelimiter {
|
|
|
|
if entry.Name == prefix && entry.IsDirectory {
|
2023-04-12 23:47:20 +08:00
|
|
|
if delimiter != "/" {
|
|
|
|
cursor.prefixEndsOnDelimiter = false
|
|
|
|
}
|
2023-04-12 14:52:35 +08:00
|
|
|
} else {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
2020-08-10 05:35:53 +08:00
|
|
|
if entry.IsDirectory {
|
2023-04-12 14:52:35 +08:00
|
|
|
// glog.V(4).Infof("List Dir Entries %s, file: %s, maxKeys %d", dir, entry.Name, cursor.maxKeys)
|
2022-06-29 19:21:16 +08:00
|
|
|
if entry.Name == s3_constants.MultipartUploadsFolder { // FIXME no need to apply to all directories. this extra also affects maxKeys
|
2022-05-30 21:09:08 +08:00
|
|
|
continue
|
|
|
|
}
|
2023-04-12 23:47:20 +08:00
|
|
|
if delimiter != "/" || cursor.prefixEndsOnDelimiter {
|
|
|
|
if cursor.prefixEndsOnDelimiter {
|
|
|
|
cursor.prefixEndsOnDelimiter = false
|
2023-04-13 07:53:49 +08:00
|
|
|
if entry.IsDirectoryKeyObject() {
|
|
|
|
eachEntryFn(dir, entry)
|
|
|
|
}
|
2023-04-12 23:47:20 +08:00
|
|
|
} else {
|
|
|
|
eachEntryFn(dir, entry)
|
|
|
|
}
|
2022-08-15 15:30:19 +08:00
|
|
|
subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", cursor, "", delimiter, false, eachEntryFn)
|
2022-05-30 21:09:08 +08:00
|
|
|
if subErr != nil {
|
|
|
|
err = fmt.Errorf("doListFilerEntries2: %v", subErr)
|
|
|
|
return
|
|
|
|
}
|
2022-08-15 15:30:19 +08:00
|
|
|
// println("doListFilerEntries2 dir", dir+"/"+entry.Name, "subNextMarker", subNextMarker)
|
|
|
|
nextMarker = entry.Name + "/" + subNextMarker
|
|
|
|
if cursor.isTruncated {
|
2022-05-30 21:09:08 +08:00
|
|
|
return
|
|
|
|
}
|
2022-08-15 15:30:19 +08:00
|
|
|
// println("doListFilerEntries2 nextMarker", nextMarker)
|
|
|
|
} else {
|
2022-05-30 21:09:08 +08:00
|
|
|
var isEmpty bool
|
2023-04-25 23:31:14 +08:00
|
|
|
if !s3a.option.AllowEmptyFolder && entry.IsOlderDir() {
|
2022-08-03 15:10:47 +08:00
|
|
|
if isEmpty, err = s3a.ensureDirectoryAllEmpty(client, dir, entry.Name); err != nil {
|
2022-05-30 21:09:08 +08:00
|
|
|
glog.Errorf("check empty folder %s: %v", dir, err)
|
2022-05-30 18:03:27 +08:00
|
|
|
}
|
2020-08-10 05:35:53 +08:00
|
|
|
}
|
2022-05-30 21:09:08 +08:00
|
|
|
if !isEmpty {
|
|
|
|
eachEntryFn(dir, entry)
|
|
|
|
}
|
2020-08-10 05:35:53 +08:00
|
|
|
}
|
2022-08-15 15:30:19 +08:00
|
|
|
} else {
|
2020-08-10 05:35:53 +08:00
|
|
|
eachEntryFn(dir, entry)
|
2023-04-12 14:52:35 +08:00
|
|
|
// glog.V(4).Infof("List File Entries %s, file: %s, maxKeys %d", dir, entry.Name, cursor.maxKeys)
|
2020-08-10 05:35:53 +08:00
|
|
|
}
|
2023-04-12 23:47:20 +08:00
|
|
|
if cursor.prefixEndsOnDelimiter {
|
|
|
|
cursor.prefixEndsOnDelimiter = false
|
|
|
|
}
|
2020-08-10 05:35:53 +08:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-07-22 17:04:07 +08:00
|
|
|
func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimiter string, fetchOwner bool, maxkeys int) {
|
|
|
|
prefix = values.Get("prefix")
|
|
|
|
token = values.Get("continuation-token")
|
|
|
|
startAfter = values.Get("start-after")
|
|
|
|
delimiter = values.Get("delimiter")
|
|
|
|
if values.Get("max-keys") != "" {
|
|
|
|
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
|
|
|
|
} else {
|
|
|
|
maxkeys = maxObjectListSizeLimit
|
|
|
|
}
|
|
|
|
fetchOwner = values.Get("fetch-owner") == "true"
|
|
|
|
return
|
2018-07-22 16:14:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int) {
|
|
|
|
prefix = values.Get("prefix")
|
|
|
|
marker = values.Get("marker")
|
|
|
|
delimiter = values.Get("delimiter")
|
|
|
|
if values.Get("max-keys") != "" {
|
|
|
|
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
|
|
|
|
} else {
|
|
|
|
maxkeys = maxObjectListSizeLimit
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2020-12-12 19:38:34 +08:00
|
|
|
|
2022-08-03 15:10:47 +08:00
|
|
|
func (s3a *S3ApiServer) ensureDirectoryAllEmpty(filerClient filer_pb.SeaweedFilerClient, parentDir, name string) (isEmpty bool, err error) {
|
|
|
|
// println("+ ensureDirectoryAllEmpty", dir, name)
|
2020-12-24 17:52:06 +08:00
|
|
|
glog.V(4).Infof("+ isEmpty %s/%s", parentDir, name)
|
|
|
|
defer glog.V(4).Infof("- isEmpty %s/%s %v", parentDir, name, isEmpty)
|
2020-12-13 05:25:19 +08:00
|
|
|
var fileCounter int
|
|
|
|
var subDirs []string
|
2020-12-13 08:19:29 +08:00
|
|
|
currentDir := parentDir + "/" + name
|
|
|
|
var startFrom string
|
|
|
|
var isExhausted bool
|
2020-12-25 03:34:52 +08:00
|
|
|
var foundEntry bool
|
2020-12-24 06:34:59 +08:00
|
|
|
for fileCounter == 0 && !isExhausted && err == nil {
|
2020-12-13 08:19:29 +08:00
|
|
|
err = filer_pb.SeaweedList(filerClient, currentDir, "", func(entry *filer_pb.Entry, isLast bool) error {
|
2020-12-25 03:34:52 +08:00
|
|
|
foundEntry = true
|
2023-04-25 23:31:14 +08:00
|
|
|
if entry.IsOlderDir() {
|
|
|
|
subDirs = append(subDirs, entry.Name)
|
2020-12-13 08:19:29 +08:00
|
|
|
} else {
|
|
|
|
fileCounter++
|
|
|
|
}
|
|
|
|
startFrom = entry.Name
|
|
|
|
isExhausted = isExhausted || isLast
|
2020-12-24 17:52:06 +08:00
|
|
|
glog.V(4).Infof(" * %s/%s isLast: %t", currentDir, startFrom, isLast)
|
2020-12-13 08:19:29 +08:00
|
|
|
return nil
|
|
|
|
}, startFrom, false, 8)
|
2020-12-25 03:34:52 +08:00
|
|
|
if !foundEntry {
|
|
|
|
break
|
|
|
|
}
|
2020-12-13 08:19:29 +08:00
|
|
|
}
|
2020-12-13 05:25:19 +08:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
2020-12-12 19:38:34 +08:00
|
|
|
}
|
2020-12-13 05:25:19 +08:00
|
|
|
|
|
|
|
if fileCounter > 0 {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, subDir := range subDirs {
|
2022-08-03 15:10:47 +08:00
|
|
|
isSubEmpty, subErr := s3a.ensureDirectoryAllEmpty(filerClient, currentDir, subDir)
|
2020-12-13 05:25:19 +08:00
|
|
|
if subErr != nil {
|
|
|
|
return false, subErr
|
|
|
|
}
|
|
|
|
if !isSubEmpty {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-13 08:19:29 +08:00
|
|
|
glog.V(1).Infof("deleting empty folder %s", currentDir)
|
2023-04-25 23:31:14 +08:00
|
|
|
if err = doDeleteEntry(filerClient, parentDir, name, true, false); err != nil {
|
2020-12-13 05:25:19 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
2020-12-12 19:38:34 +08:00
|
|
|
}
|