seaweedfs/weed/s3api/s3api_objects_list_handlers.go

206 lines
5.0 KiB
Go
Raw Normal View History

2018-07-22 16:14:36 +08:00
package s3api
import (
2018-07-22 16:15:11 +08:00
"context"
"fmt"
2019-12-13 16:22:37 +08:00
"io"
2018-07-22 16:14:36 +08:00
"net/http"
"net/url"
"path/filepath"
2018-07-22 16:15:11 +08:00
"strconv"
2019-07-09 03:37:20 +08:00
"strings"
2018-07-22 16:14:36 +08:00
"time"
2018-07-24 16:38:08 +08:00
2018-09-10 07:26:11 +08:00
"github.com/chrislusf/seaweedfs/weed/filer2"
2018-07-24 16:38:08 +08:00
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/gorilla/mux"
2018-07-22 16:14:36 +08:00
)
const (
maxObjectListSizeLimit = 1000 // Limit number of objects in a listObjectsResponse.
)
2018-07-22 17:04:07 +08:00
func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
// https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html
// collect parameters
vars := mux.Vars(r)
bucket := vars["bucket"]
2018-07-23 16:55:26 +08:00
glog.V(4).Infof("read v2: %v", vars)
2018-07-22 17:04:07 +08:00
originalPrefix, marker, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query())
if maxKeys < 0 {
writeErrorResponse(w, ErrInvalidMaxKeys, r.URL)
return
}
if delimiter != "" && delimiter != "/" {
writeErrorResponse(w, ErrNotImplemented, r.URL)
return
}
if marker == "" {
marker = startAfter
}
2019-03-16 08:20:24 +08:00
ctx := context.Background()
response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker)
2018-07-22 17:04:07 +08:00
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
return
}
writeSuccessResponseXML(w, encodeResponse(response))
}
2018-07-22 16:14:36 +08:00
func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
// collect parameters
vars := mux.Vars(r)
bucket := vars["bucket"]
2019-03-16 08:20:24 +08:00
ctx := context.Background()
2018-07-22 16:14:36 +08:00
originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query())
if maxKeys < 0 {
writeErrorResponse(w, ErrInvalidMaxKeys, r.URL)
return
}
if delimiter != "" && delimiter != "/" {
writeErrorResponse(w, ErrNotImplemented, r.URL)
2018-07-22 17:04:07 +08:00
return
}
2019-03-16 08:20:24 +08:00
response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker)
2018-07-22 17:04:07 +08:00
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
return
2018-07-22 16:14:36 +08:00
}
2018-07-22 17:04:07 +08:00
writeSuccessResponseXML(w, encodeResponse(response))
}
2019-03-16 08:20:24 +08:00
func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) {
2018-07-22 17:04:07 +08:00
2018-07-22 16:14:36 +08:00
// convert full path prefix into directory name and prefix for entry name
dir, prefix := filepath.Split(originalPrefix)
2019-07-09 03:37:20 +08:00
if strings.HasPrefix(dir, "/") {
dir = dir[1:]
}
2018-07-22 16:14:36 +08:00
// check filer
2019-03-16 08:20:24 +08:00
err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
2018-07-22 16:14:36 +08:00
request := &filer_pb.ListEntriesRequest{
Directory: fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, dir),
Prefix: prefix,
2018-07-22 17:04:07 +08:00
Limit: uint32(maxKeys + 1),
2018-07-22 16:14:36 +08:00
StartFromFileName: marker,
InclusiveStartFrom: false,
}
2019-12-13 16:22:37 +08:00
stream, err := client.ListEntries(ctx, request)
2018-07-22 16:14:36 +08:00
if err != nil {
return fmt.Errorf("list buckets: %v", err)
}
2019-02-27 16:21:37 +08:00
var contents []ListEntry
var commonPrefixes []PrefixEntry
2018-07-22 17:04:07 +08:00
var counter int
var lastEntryName string
var isTruncated bool
2019-12-13 16:22:37 +08:00
for {
resp, recvErr := stream.Recv()
if recvErr != nil {
if recvErr == io.EOF {
break
} else {
return recvErr
}
}
entry := resp.Entry
2018-07-22 17:04:07 +08:00
counter++
if counter > maxKeys {
isTruncated = true
break
}
lastEntryName = entry.Name
2018-07-22 16:14:36 +08:00
if entry.IsDirectory {
if entry.Name != ".uploads" {
commonPrefixes = append(commonPrefixes, PrefixEntry{
Prefix: fmt.Sprintf("%s%s/", dir, entry.Name),
})
}
2018-07-22 16:14:36 +08:00
} else {
2019-02-27 16:21:37 +08:00
contents = append(contents, ListEntry{
Key: fmt.Sprintf("%s%s", dir, entry.Name),
LastModified: time.Unix(entry.Attributes.Mtime, 0),
ETag: "\"" + filer2.ETag(entry.Chunks) + "\"",
Size: int64(filer2.TotalSize(entry.Chunks)),
Owner: CanonicalUser{
ID: fmt.Sprintf("%x", entry.Attributes.Uid),
DisplayName: entry.Attributes.UserName,
2018-07-22 16:14:36 +08:00
},
2019-02-27 16:21:37 +08:00
StorageClass: "STANDARD",
2018-07-22 16:14:36 +08:00
})
}
2019-12-13 16:22:37 +08:00
2018-07-22 16:14:36 +08:00
}
2019-02-27 16:21:37 +08:00
response = ListBucketResult{
Name: bucket,
Prefix: originalPrefix,
Marker: marker,
NextMarker: lastEntryName,
MaxKeys: maxKeys,
Delimiter: "/",
IsTruncated: isTruncated,
2018-07-23 16:55:26 +08:00
Contents: contents,
CommonPrefixes: commonPrefixes,
2018-07-22 16:14:36 +08:00
}
glog.V(4).Infof("read directory: %v, found: %v, %+v", request, counter, response)
2018-07-23 16:55:26 +08:00
2018-07-22 16:14:36 +08:00
return nil
})
2018-07-22 17:04:07 +08:00
return
}
2018-07-22 16:14:36 +08:00
2018-07-22 17:04:07 +08:00
func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimiter string, fetchOwner bool, maxkeys int) {
prefix = values.Get("prefix")
token = values.Get("continuation-token")
startAfter = values.Get("start-after")
delimiter = values.Get("delimiter")
if values.Get("max-keys") != "" {
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
} else {
maxkeys = maxObjectListSizeLimit
}
fetchOwner = values.Get("fetch-owner") == "true"
return
2018-07-22 16:14:36 +08:00
}
func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int) {
prefix = values.Get("prefix")
marker = values.Get("marker")
delimiter = values.Get("delimiter")
if values.Get("max-keys") != "" {
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
} else {
maxkeys = maxObjectListSizeLimit
}
return
}