seaweedfs/weed/storage/backend/s3_backend/s3_upload.go

115 lines
2.7 KiB
Go
Raw Normal View History

2019-11-27 19:09:42 +08:00
package s3_backend
import (
"fmt"
"os"
"sync/atomic"
2019-11-27 19:09:42 +08:00
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/chrislusf/seaweedfs/weed/glog"
2019-11-27 19:09:42 +08:00
)
func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string,
attributes map[string]string,
2019-12-03 12:49:58 +08:00
fn func(progressed int64, percentage float32) error) (fileSize int64, err error) {
2019-11-27 19:09:42 +08:00
//open the file
f, err := os.Open(filename)
if err != nil {
return 0, fmt.Errorf("failed to open file %q, %v", filename, err)
2019-11-27 19:09:42 +08:00
}
defer f.Close()
info, err := f.Stat()
if err != nil {
return 0, fmt.Errorf("failed to stat file %q, %v", filename, err)
2019-11-27 19:09:42 +08:00
}
fileSize = info.Size()
2019-11-27 19:09:42 +08:00
partSize := int64(64 * 1024 * 1024) // The minimum/default allowed part size is 5MB
for partSize*1000 < fileSize {
partSize *= 4
}
// Create an uploader with the session and custom options
uploader := s3manager.NewUploaderWithClient(sess, func(u *s3manager.Uploader) {
u.PartSize = partSize
u.Concurrency = 5
2019-11-27 19:09:42 +08:00
})
fileReader := &s3UploadProgressedReader{
2019-12-03 12:49:58 +08:00
fp: f,
size: fileSize,
read: -fileSize,
fn: fn,
}
// process tagging
tags := ""
for k, v := range attributes {
if len(tags) > 0 {
tags = tags + "&"
}
tags = tags + k + "=" + v
}
2019-11-27 19:09:42 +08:00
// Upload the file to S3.
var result *s3manager.UploadOutput
result, err = uploader.Upload(&s3manager.UploadInput{
2019-11-27 19:09:42 +08:00
Bucket: aws.String(destBucket),
Key: aws.String(destKey),
Body: fileReader,
2019-11-27 19:09:42 +08:00
ACL: aws.String("private"),
ServerSideEncryption: aws.String("AES256"),
StorageClass: aws.String("STANDARD_IA"),
Tagging: aws.String(tags),
2019-11-27 19:09:42 +08:00
})
//in case it fails to upload
if err != nil {
return 0, fmt.Errorf("failed to upload file %s: %v", filename, err)
}
glog.V(1).Infof("file %s uploaded to %s\n", filename, result.Location)
return
}
// adapted from https://github.com/aws/aws-sdk-go/pull/1868
type s3UploadProgressedReader struct {
fp *os.File
size int64
read int64
2019-12-03 12:49:58 +08:00
fn func(progressed int64, percentage float32) error
}
2019-12-03 12:49:58 +08:00
func (r *s3UploadProgressedReader) Read(p []byte) (int, error) {
return r.fp.Read(p)
}
func (r *s3UploadProgressedReader) ReadAt(p []byte, off int64) (int, error) {
n, err := r.fp.ReadAt(p, off)
if err != nil {
return n, err
}
// Got the length have read( or means has uploaded), and you can construct your message
atomic.AddInt64(&r.read, int64(n))
if r.fn != nil {
read := r.read
if err := r.fn(read, float32(read*100)/float32(r.size)); err != nil {
return n, err
}
2019-11-27 19:09:42 +08:00
}
return n, err
}
func (r *s3UploadProgressedReader) Seek(offset int64, whence int) (int64, error) {
return r.fp.Seek(offset, whence)
2019-11-27 19:09:42 +08:00
}