2018-07-22 16:14:36 +08:00
|
|
|
package s3api
|
|
|
|
|
|
|
|
import (
|
2018-07-22 16:15:11 +08:00
|
|
|
"context"
|
2020-08-10 05:35:53 +08:00
|
|
|
"encoding/xml"
|
2018-07-22 16:15:11 +08:00
|
|
|
"fmt"
|
2019-12-13 16:22:37 +08:00
|
|
|
"io"
|
2018-07-22 16:14:36 +08:00
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"path/filepath"
|
2018-07-22 16:15:11 +08:00
|
|
|
"strconv"
|
2019-07-09 03:37:20 +08:00
|
|
|
"strings"
|
2018-07-22 16:14:36 +08:00
|
|
|
"time"
|
2018-07-24 16:38:08 +08:00
|
|
|
|
2020-09-01 15:21:19 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
2018-07-24 16:38:08 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
2020-10-29 16:05:40 +08:00
|
|
|
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
2018-07-22 16:14:36 +08:00
|
|
|
)
|
|
|
|
|
2020-08-10 05:35:53 +08:00
|
|
|
type ListBucketResultV2 struct {
|
2020-08-30 13:28:33 +08:00
|
|
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
|
|
|
|
Name string `xml:"Name"`
|
|
|
|
Prefix string `xml:"Prefix"`
|
|
|
|
MaxKeys int `xml:"MaxKeys"`
|
|
|
|
Delimiter string `xml:"Delimiter,omitempty"`
|
|
|
|
IsTruncated bool `xml:"IsTruncated"`
|
|
|
|
Contents []ListEntry `xml:"Contents,omitempty"`
|
|
|
|
CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"`
|
|
|
|
ContinuationToken string `xml:"ContinuationToken,omitempty"`
|
|
|
|
NextContinuationToken string `xml:"NextContinuationToken,omitempty"`
|
|
|
|
KeyCount int `xml:"KeyCount"`
|
|
|
|
StartAfter string `xml:"StartAfter,omitempty"`
|
2020-08-10 05:35:53 +08:00
|
|
|
}
|
|
|
|
|
2018-07-22 17:04:07 +08:00
|
|
|
func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
|
|
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html
|
|
|
|
|
|
|
|
// collect parameters
|
2020-07-27 03:58:58 +08:00
|
|
|
bucket, _ := getBucketAndObject(r)
|
2018-07-23 16:55:26 +08:00
|
|
|
|
2020-08-10 05:35:53 +08:00
|
|
|
originalPrefix, continuationToken, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query())
|
2018-07-22 17:04:07 +08:00
|
|
|
|
|
|
|
if maxKeys < 0 {
|
2020-09-20 05:09:58 +08:00
|
|
|
writeErrorResponse(w, s3err.ErrInvalidMaxKeys, r.URL)
|
2018-07-22 17:04:07 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if delimiter != "" && delimiter != "/" {
|
2020-09-20 05:09:58 +08:00
|
|
|
writeErrorResponse(w, s3err.ErrNotImplemented, r.URL)
|
2018-07-22 17:04:07 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-08-10 05:35:53 +08:00
|
|
|
marker := continuationToken
|
|
|
|
if continuationToken == "" {
|
2018-07-22 17:04:07 +08:00
|
|
|
marker = startAfter
|
|
|
|
}
|
|
|
|
|
2020-08-01 01:08:30 +08:00
|
|
|
response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)
|
2018-07-22 17:04:07 +08:00
|
|
|
|
|
|
|
if err != nil {
|
2020-09-20 05:09:58 +08:00
|
|
|
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
|
2018-07-22 17:04:07 +08:00
|
|
|
return
|
|
|
|
}
|
2020-08-10 05:35:53 +08:00
|
|
|
responseV2 := &ListBucketResultV2{
|
|
|
|
XMLName: response.XMLName,
|
|
|
|
Name: response.Name,
|
|
|
|
CommonPrefixes: response.CommonPrefixes,
|
|
|
|
Contents: response.Contents,
|
|
|
|
ContinuationToken: continuationToken,
|
|
|
|
Delimiter: response.Delimiter,
|
|
|
|
IsTruncated: response.IsTruncated,
|
|
|
|
KeyCount: len(response.Contents),
|
|
|
|
MaxKeys: response.MaxKeys,
|
|
|
|
NextContinuationToken: response.NextMarker,
|
|
|
|
Prefix: response.Prefix,
|
|
|
|
StartAfter: startAfter,
|
|
|
|
}
|
2018-07-22 17:04:07 +08:00
|
|
|
|
2020-08-10 05:35:53 +08:00
|
|
|
writeSuccessResponseXML(w, encodeResponse(responseV2))
|
2018-07-22 17:04:07 +08:00
|
|
|
}
|
|
|
|
|
2018-07-22 16:14:36 +08:00
|
|
|
func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
|
|
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
|
|
|
|
|
|
|
|
// collect parameters
|
2020-07-27 03:58:58 +08:00
|
|
|
bucket, _ := getBucketAndObject(r)
|
2018-07-22 16:14:36 +08:00
|
|
|
|
|
|
|
originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query())
|
|
|
|
|
|
|
|
if maxKeys < 0 {
|
2020-09-20 05:09:58 +08:00
|
|
|
writeErrorResponse(w, s3err.ErrInvalidMaxKeys, r.URL)
|
2018-07-22 16:14:36 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if delimiter != "" && delimiter != "/" {
|
2020-09-20 05:09:58 +08:00
|
|
|
writeErrorResponse(w, s3err.ErrNotImplemented, r.URL)
|
2018-07-22 17:04:07 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-08-01 01:08:30 +08:00
|
|
|
response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)
|
2018-07-22 17:04:07 +08:00
|
|
|
|
|
|
|
if err != nil {
|
2020-09-20 05:09:58 +08:00
|
|
|
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
|
2018-07-22 17:04:07 +08:00
|
|
|
return
|
2018-07-22 16:14:36 +08:00
|
|
|
}
|
|
|
|
|
2018-07-22 17:04:07 +08:00
|
|
|
writeSuccessResponseXML(w, encodeResponse(response))
|
|
|
|
}
|
|
|
|
|
2020-08-10 05:35:53 +08:00
|
|
|
func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, maxKeys int, marker string, delimiter string) (response ListBucketResult, err error) {
|
2018-07-22 16:14:36 +08:00
|
|
|
// convert full path prefix into directory name and prefix for entry name
|
2020-08-10 05:35:53 +08:00
|
|
|
reqDir, prefix := filepath.Split(originalPrefix)
|
|
|
|
if strings.HasPrefix(reqDir, "/") {
|
|
|
|
reqDir = reqDir[1:]
|
2019-07-09 03:37:20 +08:00
|
|
|
}
|
2020-08-20 03:07:43 +08:00
|
|
|
bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket)
|
|
|
|
reqDir = fmt.Sprintf("%s%s", bucketPrefix, reqDir)
|
2020-08-10 05:35:53 +08:00
|
|
|
if strings.HasSuffix(reqDir, "/") {
|
|
|
|
// remove trailing "/"
|
|
|
|
reqDir = reqDir[:len(reqDir)-1]
|
|
|
|
}
|
|
|
|
|
|
|
|
var contents []ListEntry
|
|
|
|
var commonPrefixes []PrefixEntry
|
|
|
|
var isTruncated bool
|
|
|
|
var doErr error
|
|
|
|
var nextMarker string
|
2018-07-22 16:14:36 +08:00
|
|
|
|
|
|
|
// check filer
|
2020-03-23 14:52:55 +08:00
|
|
|
err = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
2018-07-22 16:14:36 +08:00
|
|
|
|
2020-08-10 05:35:53 +08:00
|
|
|
_, isTruncated, nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, maxKeys, marker, delimiter, func(dir string, entry *filer_pb.Entry) {
|
2018-07-22 16:14:36 +08:00
|
|
|
if entry.IsDirectory {
|
2020-08-10 05:35:53 +08:00
|
|
|
if delimiter == "/" {
|
2019-09-29 21:05:37 +08:00
|
|
|
commonPrefixes = append(commonPrefixes, PrefixEntry{
|
2020-08-20 03:07:43 +08:00
|
|
|
Prefix: fmt.Sprintf("%s/%s/", dir, entry.Name)[len(bucketPrefix):],
|
2019-09-29 21:05:37 +08:00
|
|
|
})
|
|
|
|
}
|
2018-07-22 16:14:36 +08:00
|
|
|
} else {
|
2020-10-29 14:57:19 +08:00
|
|
|
storageClass := "STANDARD"
|
2020-10-29 16:05:40 +08:00
|
|
|
if v, ok := entry.Extended[xhttp.AmzStorageClass]; ok {
|
2020-10-29 14:57:19 +08:00
|
|
|
storageClass = string(v)
|
|
|
|
}
|
2019-02-27 16:21:37 +08:00
|
|
|
contents = append(contents, ListEntry{
|
2020-08-20 03:07:43 +08:00
|
|
|
Key: fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):],
|
2020-06-19 15:26:40 +08:00
|
|
|
LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),
|
2020-09-01 15:21:19 +08:00
|
|
|
ETag: "\"" + filer.ETag(entry) + "\"",
|
|
|
|
Size: int64(filer.FileSize(entry)),
|
2019-02-27 16:21:37 +08:00
|
|
|
Owner: CanonicalUser{
|
2019-04-02 07:13:29 +08:00
|
|
|
ID: fmt.Sprintf("%x", entry.Attributes.Uid),
|
|
|
|
DisplayName: entry.Attributes.UserName,
|
2018-07-22 16:14:36 +08:00
|
|
|
},
|
2020-10-29 14:57:19 +08:00
|
|
|
StorageClass: StorageClass(storageClass),
|
2018-07-22 16:14:36 +08:00
|
|
|
})
|
|
|
|
}
|
2020-08-10 05:35:53 +08:00
|
|
|
})
|
|
|
|
if doErr != nil {
|
|
|
|
return doErr
|
|
|
|
}
|
2019-12-13 16:22:37 +08:00
|
|
|
|
2020-08-10 05:35:53 +08:00
|
|
|
if !isTruncated {
|
|
|
|
nextMarker = ""
|
2018-07-22 16:14:36 +08:00
|
|
|
}
|
|
|
|
|
2019-02-27 16:21:37 +08:00
|
|
|
response = ListBucketResult{
|
|
|
|
Name: bucket,
|
|
|
|
Prefix: originalPrefix,
|
|
|
|
Marker: marker,
|
2020-08-10 05:35:53 +08:00
|
|
|
NextMarker: nextMarker,
|
2019-02-27 16:21:37 +08:00
|
|
|
MaxKeys: maxKeys,
|
2020-08-10 00:09:35 +08:00
|
|
|
Delimiter: delimiter,
|
2019-02-27 16:21:37 +08:00
|
|
|
IsTruncated: isTruncated,
|
2018-07-23 16:55:26 +08:00
|
|
|
Contents: contents,
|
|
|
|
CommonPrefixes: commonPrefixes,
|
2018-07-22 16:14:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2018-07-22 17:04:07 +08:00
|
|
|
return
|
|
|
|
}
|
2018-07-22 16:14:36 +08:00
|
|
|
|
2020-08-10 05:35:53 +08:00
|
|
|
func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, maxKeys int, marker, delimiter string, eachEntryFn func(dir string, entry *filer_pb.Entry)) (counter int, isTruncated bool, nextMarker string, err error) {
|
|
|
|
// invariants
|
|
|
|
// prefix and marker should be under dir, marker may contain "/"
|
|
|
|
// maxKeys should be updated for each recursion
|
|
|
|
|
|
|
|
if prefix == "/" && delimiter == "/" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if maxKeys <= 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.Contains(marker, "/") {
|
|
|
|
sepIndex := strings.Index(marker, "/")
|
|
|
|
subDir, subMarker := marker[0:sepIndex], marker[sepIndex+1:]
|
|
|
|
// println("doListFilerEntries dir", dir+"/"+subDir, "subMarker", subMarker, "maxKeys", maxKeys)
|
2020-12-10 08:47:34 +08:00
|
|
|
subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+subDir, "", maxKeys, subMarker, delimiter, eachEntryFn)
|
2020-08-10 05:35:53 +08:00
|
|
|
if subErr != nil {
|
|
|
|
err = subErr
|
|
|
|
return
|
|
|
|
}
|
2020-12-10 08:47:34 +08:00
|
|
|
isTruncated = isTruncated || subIsTruncated
|
2020-08-10 05:35:53 +08:00
|
|
|
maxKeys -= subCounter
|
|
|
|
nextMarker = subDir + "/" + subNextMarker
|
|
|
|
counter += subCounter
|
|
|
|
// finished processing this sub directory
|
|
|
|
marker = subDir
|
|
|
|
}
|
|
|
|
|
|
|
|
// now marker is also a direct child of dir
|
|
|
|
request := &filer_pb.ListEntriesRequest{
|
|
|
|
Directory: dir,
|
|
|
|
Prefix: prefix,
|
|
|
|
Limit: uint32(maxKeys + 1),
|
|
|
|
StartFromFileName: marker,
|
|
|
|
InclusiveStartFrom: false,
|
|
|
|
}
|
|
|
|
|
2020-09-10 03:07:15 +08:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
stream, listErr := client.ListEntries(ctx, request)
|
2020-08-10 05:35:53 +08:00
|
|
|
if listErr != nil {
|
|
|
|
err = fmt.Errorf("list entires %+v: %v", request, listErr)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
resp, recvErr := stream.Recv()
|
|
|
|
if recvErr != nil {
|
|
|
|
if recvErr == io.EOF {
|
|
|
|
break
|
|
|
|
} else {
|
|
|
|
err = fmt.Errorf("iterating entires %+v: %v", request, recvErr)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if counter >= maxKeys {
|
|
|
|
isTruncated = true
|
|
|
|
return
|
|
|
|
}
|
|
|
|
entry := resp.Entry
|
|
|
|
nextMarker = entry.Name
|
|
|
|
if entry.IsDirectory {
|
2020-08-10 05:42:25 +08:00
|
|
|
// println("ListEntries", dir, "dir:", entry.Name)
|
2020-08-10 05:35:53 +08:00
|
|
|
if entry.Name != ".uploads" { // FIXME no need to apply to all directories. this extra also affects maxKeys
|
|
|
|
if delimiter != "/" {
|
2020-12-12 19:38:34 +08:00
|
|
|
eachEntryFn(dir, entry)
|
2020-08-10 05:35:53 +08:00
|
|
|
// println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter)
|
|
|
|
subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", maxKeys-counter, "", delimiter, eachEntryFn)
|
|
|
|
if subErr != nil {
|
|
|
|
err = fmt.Errorf("doListFilerEntries2: %v", subErr)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter, "subCounter", subCounter, "subNextMarker", subNextMarker, "subIsTruncated", subIsTruncated)
|
|
|
|
counter += subCounter
|
|
|
|
nextMarker = entry.Name + "/" + subNextMarker
|
|
|
|
if subIsTruncated {
|
|
|
|
isTruncated = true
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else {
|
2020-12-12 19:38:34 +08:00
|
|
|
var isEmpty bool
|
2020-12-13 05:25:19 +08:00
|
|
|
if isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil {
|
2020-12-12 19:38:34 +08:00
|
|
|
return
|
|
|
|
}
|
2020-12-13 05:25:19 +08:00
|
|
|
if !isEmpty {
|
2020-12-12 19:38:34 +08:00
|
|
|
eachEntryFn(dir, entry)
|
|
|
|
counter++
|
|
|
|
}
|
2020-08-10 05:35:53 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// println("ListEntries", dir, "file:", entry.Name)
|
|
|
|
eachEntryFn(dir, entry)
|
|
|
|
counter++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-07-22 17:04:07 +08:00
|
|
|
func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimiter string, fetchOwner bool, maxkeys int) {
|
|
|
|
prefix = values.Get("prefix")
|
|
|
|
token = values.Get("continuation-token")
|
|
|
|
startAfter = values.Get("start-after")
|
|
|
|
delimiter = values.Get("delimiter")
|
|
|
|
if values.Get("max-keys") != "" {
|
|
|
|
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
|
|
|
|
} else {
|
|
|
|
maxkeys = maxObjectListSizeLimit
|
|
|
|
}
|
|
|
|
fetchOwner = values.Get("fetch-owner") == "true"
|
|
|
|
return
|
2018-07-22 16:14:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int) {
|
|
|
|
prefix = values.Get("prefix")
|
|
|
|
marker = values.Get("marker")
|
|
|
|
delimiter = values.Get("delimiter")
|
|
|
|
if values.Get("max-keys") != "" {
|
|
|
|
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
|
|
|
|
} else {
|
|
|
|
maxkeys = maxObjectListSizeLimit
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2020-12-12 19:38:34 +08:00
|
|
|
|
2020-12-13 05:25:19 +08:00
|
|
|
func (s3a *S3ApiServer) isDirectoryAllEmpty(filerClient filer_pb.SeaweedFilerClient, parentDir, name string) (isEmpty bool, err error) {
|
|
|
|
// println("+ isDirectoryAllEmpty", dir, name)
|
|
|
|
var fileCounter int
|
|
|
|
var subDirs []string
|
|
|
|
currentDir := parentDir+"/"+name
|
|
|
|
err = filer_pb.SeaweedList(filerClient, currentDir, "", func(entry *filer_pb.Entry, isLast bool) error {
|
|
|
|
if entry.IsDirectory {
|
|
|
|
subDirs = append(subDirs, entry.Name)
|
|
|
|
} else {
|
|
|
|
println("existing file", currentDir, entry.Name)
|
|
|
|
fileCounter++
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}, "",false, 32)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
2020-12-12 19:38:34 +08:00
|
|
|
}
|
2020-12-13 05:25:19 +08:00
|
|
|
|
|
|
|
if fileCounter > 0 {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, subDir := range subDirs {
|
|
|
|
isSubEmpty, subErr := s3a.isDirectoryAllEmpty(filerClient, currentDir, subDir)
|
|
|
|
if subErr != nil {
|
|
|
|
return false, subErr
|
|
|
|
}
|
|
|
|
if !isSubEmpty {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
println("deleting empty", currentDir)
|
|
|
|
if err = doDeleteEntry(filerClient, parentDir, name, true, true); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
2020-12-12 19:38:34 +08:00
|
|
|
}
|