2018-07-22 01:39:02 +08:00
|
|
|
package s3api
|
|
|
|
|
|
|
|
import (
|
2018-09-12 16:00:57 +08:00
|
|
|
"crypto/md5"
|
2018-07-22 08:39:10 +08:00
|
|
|
"encoding/json"
|
2020-02-26 06:38:36 +08:00
|
|
|
"encoding/xml"
|
2018-07-22 01:39:02 +08:00
|
|
|
"fmt"
|
2018-07-22 16:15:11 +08:00
|
|
|
"io"
|
2018-07-22 01:39:02 +08:00
|
|
|
"io/ioutil"
|
2018-07-22 08:39:10 +08:00
|
|
|
"net/http"
|
2018-07-23 16:15:59 +08:00
|
|
|
"strings"
|
2018-09-12 15:46:12 +08:00
|
|
|
|
2020-01-31 16:11:08 +08:00
|
|
|
"github.com/gorilla/mux"
|
|
|
|
|
2018-09-12 15:46:12 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2020-03-21 05:17:31 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
2020-06-11 20:00:47 +08:00
|
|
|
weed_server "github.com/chrislusf/seaweedfs/weed/server"
|
2020-02-15 01:46:36 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2018-07-22 01:39:02 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
client *http.Client
|
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
client = &http.Client{Transport: &http.Transport{
|
|
|
|
MaxIdleConnsPerHost: 1024,
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
|
|
|
// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
|
|
|
|
|
2020-07-27 03:58:58 +08:00
|
|
|
bucket, object := getBucketAndObject(r)
|
2018-07-22 01:39:02 +08:00
|
|
|
|
|
|
|
_, err := validateContentMd5(r.Header)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(w, ErrInvalidDigest, r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-09-03 05:20:47 +08:00
|
|
|
dataReader := r.Body
|
2020-08-09 00:11:40 +08:00
|
|
|
if s3a.iam.isEnabled() {
|
|
|
|
rAuthType := getRequestAuthType(r)
|
|
|
|
var s3ErrCode ErrorCode
|
|
|
|
switch rAuthType {
|
|
|
|
case authTypeStreamingSigned:
|
|
|
|
dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r)
|
|
|
|
case authTypeSignedV2, authTypePresignedV2:
|
|
|
|
_, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r)
|
|
|
|
case authTypePresigned, authTypeSigned:
|
|
|
|
_, s3ErrCode = s3a.iam.reqSignatureV4Verify(r)
|
|
|
|
}
|
|
|
|
if s3ErrCode != ErrNone {
|
|
|
|
writeErrorResponse(w, s3ErrCode, r.URL)
|
|
|
|
return
|
|
|
|
}
|
2018-09-03 05:20:47 +08:00
|
|
|
}
|
2020-02-15 01:09:15 +08:00
|
|
|
defer dataReader.Close()
|
2018-09-03 05:20:47 +08:00
|
|
|
|
2020-07-28 00:58:42 +08:00
|
|
|
if strings.HasSuffix(object, "/") {
|
|
|
|
if err := s3a.mkdir(s3a.option.BucketsPath, bucket+object, nil); err != nil {
|
|
|
|
writeErrorResponse(w, ErrInternalError, r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, object)
|
|
|
|
|
|
|
|
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
|
|
|
|
|
|
|
|
if errCode != ErrNone {
|
|
|
|
writeErrorResponse(w, errCode, r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
setEtag(w, etag)
|
2018-07-22 01:39:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
writeSuccessResponseEmpty(w)
|
|
|
|
}
|
2018-07-22 09:49:47 +08:00
|
|
|
|
|
|
|
func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
2020-07-27 03:58:58 +08:00
|
|
|
bucket, object := getBucketAndObject(r)
|
2018-09-20 13:01:41 +08:00
|
|
|
|
2018-07-23 16:15:59 +08:00
|
|
|
if strings.HasSuffix(r.URL.Path, "/") {
|
|
|
|
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-09-20 13:01:41 +08:00
|
|
|
destUrl := fmt.Sprintf("http://%s%s/%s%s",
|
2018-11-09 13:41:02 +08:00
|
|
|
s3a.option.Filer, s3a.option.BucketsPath, bucket, object)
|
2018-07-22 09:49:47 +08:00
|
|
|
|
2019-01-02 18:38:32 +08:00
|
|
|
s3a.proxyToFiler(w, r, destUrl, passThroughResponse)
|
2018-07-22 09:49:47 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
2020-07-27 03:58:58 +08:00
|
|
|
bucket, object := getBucketAndObject(r)
|
2018-09-20 13:01:41 +08:00
|
|
|
|
|
|
|
destUrl := fmt.Sprintf("http://%s%s/%s%s",
|
2018-11-09 13:41:02 +08:00
|
|
|
s3a.option.Filer, s3a.option.BucketsPath, bucket, object)
|
2018-07-22 09:49:47 +08:00
|
|
|
|
2019-01-02 18:38:32 +08:00
|
|
|
s3a.proxyToFiler(w, r, destUrl, passThroughResponse)
|
2018-07-22 09:49:47 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
2020-07-27 03:58:58 +08:00
|
|
|
bucket, object := getBucketAndObject(r)
|
2018-09-20 13:01:41 +08:00
|
|
|
|
2020-06-23 01:01:00 +08:00
|
|
|
destUrl := fmt.Sprintf("http://%s%s/%s%s?recursive=true",
|
2018-11-09 13:41:02 +08:00
|
|
|
s3a.option.Filer, s3a.option.BucketsPath, bucket, object)
|
2018-07-22 09:49:47 +08:00
|
|
|
|
2020-06-11 20:00:47 +08:00
|
|
|
s3a.proxyToFiler(w, r, destUrl, func(proxyResponse *http.Response, w http.ResponseWriter) {
|
|
|
|
for k, v := range proxyResponse.Header {
|
2018-07-22 10:12:44 +08:00
|
|
|
w.Header()[k] = v
|
|
|
|
}
|
2020-06-12 00:00:34 +08:00
|
|
|
w.WriteHeader(http.StatusNoContent)
|
2018-07-22 10:12:44 +08:00
|
|
|
})
|
2018-07-22 09:49:47 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-07-27 03:58:58 +08:00
|
|
|
// / ObjectIdentifier carries key name for the object to delete.
|
2020-02-26 06:38:36 +08:00
|
|
|
type ObjectIdentifier struct {
|
|
|
|
ObjectName string `xml:"Key"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteObjectsRequest - xml carrying the object key names which needs to be deleted.
|
|
|
|
type DeleteObjectsRequest struct {
|
|
|
|
// Element to enable quiet mode for the request
|
|
|
|
Quiet bool
|
|
|
|
// List of objects to be deleted
|
|
|
|
Objects []ObjectIdentifier `xml:"Object"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteError structure.
|
|
|
|
type DeleteError struct {
|
|
|
|
Code string
|
|
|
|
Message string
|
|
|
|
Key string
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteObjectsResponse container for multiple object deletes.
|
|
|
|
type DeleteObjectsResponse struct {
|
|
|
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"`
|
|
|
|
|
|
|
|
// Collection of all deleted objects
|
|
|
|
DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"`
|
|
|
|
|
|
|
|
// Collection of errors deleting certain objects.
|
|
|
|
Errors []DeleteError `xml:"Error,omitempty"`
|
|
|
|
}
|
|
|
|
|
2018-09-04 15:42:44 +08:00
|
|
|
// DeleteMultipleObjectsHandler - Delete multiple objects
|
|
|
|
func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
2020-02-26 06:38:36 +08:00
|
|
|
|
2020-07-27 03:58:58 +08:00
|
|
|
bucket, _ := getBucketAndObject(r)
|
2020-02-26 06:38:36 +08:00
|
|
|
|
|
|
|
deleteXMLBytes, err := ioutil.ReadAll(r.Body)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(w, ErrInternalError, r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
deleteObjects := &DeleteObjectsRequest{}
|
|
|
|
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
|
|
|
|
writeErrorResponse(w, ErrMalformedXML, r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var deletedObjects []ObjectIdentifier
|
|
|
|
var deleteErrors []DeleteError
|
|
|
|
|
2020-03-23 14:52:55 +08:00
|
|
|
s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
2020-03-21 05:17:31 +08:00
|
|
|
|
|
|
|
for _, object := range deleteObjects.Objects {
|
|
|
|
lastSeparator := strings.LastIndex(object.ObjectName, "/")
|
2020-03-27 02:05:54 +08:00
|
|
|
parentDirectoryPath, entryName, isDeleteData, isRecursive := "/", object.ObjectName, true, true
|
2020-03-21 05:17:31 +08:00
|
|
|
if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) {
|
|
|
|
entryName = object.ObjectName[lastSeparator+1:]
|
|
|
|
parentDirectoryPath = "/" + object.ObjectName[:lastSeparator]
|
|
|
|
}
|
|
|
|
parentDirectoryPath = fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, parentDirectoryPath)
|
|
|
|
|
|
|
|
err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive)
|
|
|
|
if err == nil {
|
|
|
|
deletedObjects = append(deletedObjects, object)
|
|
|
|
} else {
|
|
|
|
deleteErrors = append(deleteErrors, DeleteError{
|
|
|
|
Code: "",
|
|
|
|
Message: err.Error(),
|
|
|
|
Key: object.ObjectName,
|
|
|
|
})
|
|
|
|
}
|
2020-02-26 06:38:36 +08:00
|
|
|
}
|
2020-03-21 05:17:31 +08:00
|
|
|
return nil
|
2020-02-26 06:38:36 +08:00
|
|
|
})
|
|
|
|
|
|
|
|
deleteResp := DeleteObjectsResponse{}
|
|
|
|
if !deleteObjects.Quiet {
|
|
|
|
deleteResp.DeletedObjects = deletedObjects
|
|
|
|
}
|
|
|
|
deleteResp.Errors = deleteErrors
|
|
|
|
|
|
|
|
writeSuccessResponseXML(w, encodeResponse(deleteResp))
|
|
|
|
|
2018-09-04 15:42:44 +08:00
|
|
|
}
|
|
|
|
|
2020-06-12 01:53:25 +08:00
|
|
|
func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResponse *http.Response, w http.ResponseWriter)) {
|
2018-07-22 09:49:47 +08:00
|
|
|
|
|
|
|
glog.V(2).Infof("s3 proxying %s to %s", r.Method, destUrl)
|
|
|
|
|
|
|
|
proxyReq, err := http.NewRequest(r.Method, destUrl, r.Body)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("NewRequest %s: %v", destUrl, err)
|
|
|
|
writeErrorResponse(w, ErrInternalError, r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
proxyReq.Header.Set("Host", s3a.option.Filer)
|
|
|
|
proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
|
|
|
|
|
|
|
|
for header, values := range r.Header {
|
|
|
|
for _, value := range values {
|
|
|
|
proxyReq.Header.Add(header, value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, postErr := client.Do(proxyReq)
|
|
|
|
|
2020-08-01 01:08:30 +08:00
|
|
|
if resp.ContentLength == -1 {
|
|
|
|
writeErrorResponse(w, ErrNoSuchKey, r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-07-22 09:49:47 +08:00
|
|
|
if postErr != nil {
|
|
|
|
glog.Errorf("post to filer: %v", postErr)
|
|
|
|
writeErrorResponse(w, ErrInternalError, r.URL)
|
|
|
|
return
|
|
|
|
}
|
2020-02-15 01:46:36 +08:00
|
|
|
defer util.CloseResponse(resp)
|
2018-07-22 09:49:47 +08:00
|
|
|
|
2018-07-22 10:12:44 +08:00
|
|
|
responseFn(resp, w)
|
2020-02-15 01:46:36 +08:00
|
|
|
|
2018-07-22 10:12:44 +08:00
|
|
|
}
|
2020-06-12 01:53:25 +08:00
|
|
|
func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) {
|
|
|
|
for k, v := range proxyResponse.Header {
|
2018-07-22 09:49:47 +08:00
|
|
|
w.Header()[k] = v
|
|
|
|
}
|
2020-06-12 01:53:25 +08:00
|
|
|
w.WriteHeader(proxyResponse.StatusCode)
|
|
|
|
io.Copy(w, proxyResponse.Body)
|
2018-07-22 09:49:47 +08:00
|
|
|
}
|
2018-09-04 15:42:44 +08:00
|
|
|
|
2020-02-15 01:09:15 +08:00
|
|
|
func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader) (etag string, code ErrorCode) {
|
2018-09-04 15:42:44 +08:00
|
|
|
|
2018-09-12 15:46:12 +08:00
|
|
|
hash := md5.New()
|
2020-02-25 14:28:45 +08:00
|
|
|
var body = io.TeeReader(dataReader, hash)
|
2018-09-12 15:46:12 +08:00
|
|
|
|
|
|
|
proxyReq, err := http.NewRequest("PUT", uploadUrl, body)
|
2018-09-04 15:42:44 +08:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("NewRequest %s: %v", uploadUrl, err)
|
|
|
|
return "", ErrInternalError
|
|
|
|
}
|
|
|
|
|
|
|
|
proxyReq.Header.Set("Host", s3a.option.Filer)
|
|
|
|
proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
|
|
|
|
|
|
|
|
for header, values := range r.Header {
|
|
|
|
for _, value := range values {
|
|
|
|
proxyReq.Header.Add(header, value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, postErr := client.Do(proxyReq)
|
|
|
|
|
|
|
|
if postErr != nil {
|
|
|
|
glog.Errorf("post to filer: %v", postErr)
|
|
|
|
return "", ErrInternalError
|
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
|
|
|
|
2018-09-12 15:46:12 +08:00
|
|
|
etag = fmt.Sprintf("%x", hash.Sum(nil))
|
2018-09-04 15:42:44 +08:00
|
|
|
|
|
|
|
resp_body, ra_err := ioutil.ReadAll(resp.Body)
|
|
|
|
if ra_err != nil {
|
|
|
|
glog.Errorf("upload to filer response read: %v", ra_err)
|
|
|
|
return etag, ErrInternalError
|
|
|
|
}
|
2018-09-12 15:46:12 +08:00
|
|
|
var ret weed_server.FilerPostResult
|
2018-09-04 15:42:44 +08:00
|
|
|
unmarshal_err := json.Unmarshal(resp_body, &ret)
|
|
|
|
if unmarshal_err != nil {
|
|
|
|
glog.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body))
|
2018-09-12 15:46:12 +08:00
|
|
|
return "", ErrInternalError
|
2018-09-04 15:42:44 +08:00
|
|
|
}
|
|
|
|
if ret.Error != "" {
|
|
|
|
glog.Errorf("upload to filer error: %v", ret.Error)
|
2018-09-12 15:46:12 +08:00
|
|
|
return "", ErrInternalError
|
2018-09-04 15:42:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return etag, ErrNone
|
|
|
|
}
|
|
|
|
|
|
|
|
func setEtag(w http.ResponseWriter, etag string) {
|
|
|
|
if etag != "" {
|
2018-09-12 15:46:12 +08:00
|
|
|
if strings.HasPrefix(etag, "\"") {
|
|
|
|
w.Header().Set("ETag", etag)
|
|
|
|
} else {
|
|
|
|
w.Header().Set("ETag", "\""+etag+"\"")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-27 03:58:58 +08:00
|
|
|
func getBucketAndObject(r *http.Request) (bucket, object string) {
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
bucket = vars["bucket"]
|
|
|
|
object = vars["object"]
|
2018-09-12 15:46:12 +08:00
|
|
|
if !strings.HasPrefix(object, "/") {
|
|
|
|
object = "/" + object
|
|
|
|
}
|
2020-07-27 03:58:58 +08:00
|
|
|
|
|
|
|
return
|
2018-09-12 15:46:12 +08:00
|
|
|
}
|