2019-11-27 19:09:42 +08:00
|
|
|
package s3_backend
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"os"
|
2019-12-03 07:08:28 +08:00
|
|
|
"sync/atomic"
|
2019-11-27 19:09:42 +08:00
|
|
|
|
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
|
|
|
"github.com/aws/aws-sdk-go/service/s3/s3iface"
|
|
|
|
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
2019-12-26 13:37:21 +08:00
|
|
|
|
2019-12-03 07:08:28 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2019-11-27 19:09:42 +08:00
|
|
|
)
|
|
|
|
|
2021-10-30 03:39:19 +08:00
|
|
|
func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string, fn func(progressed int64, percentage float32) error) (fileSize int64, err error) {
|
2019-11-27 19:09:42 +08:00
|
|
|
|
|
|
|
//open the file
|
|
|
|
f, err := os.Open(filename)
|
|
|
|
if err != nil {
|
2019-12-03 07:08:28 +08:00
|
|
|
return 0, fmt.Errorf("failed to open file %q, %v", filename, err)
|
2019-11-27 19:09:42 +08:00
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
info, err := f.Stat()
|
|
|
|
if err != nil {
|
2019-12-03 07:08:28 +08:00
|
|
|
return 0, fmt.Errorf("failed to stat file %q, %v", filename, err)
|
2019-11-27 19:09:42 +08:00
|
|
|
}
|
|
|
|
|
2019-12-03 07:08:28 +08:00
|
|
|
fileSize = info.Size()
|
2019-11-27 19:09:42 +08:00
|
|
|
|
|
|
|
partSize := int64(64 * 1024 * 1024) // The minimum/default allowed part size is 5MB
|
|
|
|
for partSize*1000 < fileSize {
|
|
|
|
partSize *= 4
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create an uploader with the session and custom options
|
|
|
|
uploader := s3manager.NewUploaderWithClient(sess, func(u *s3manager.Uploader) {
|
|
|
|
u.PartSize = partSize
|
2019-12-03 07:08:28 +08:00
|
|
|
u.Concurrency = 5
|
2019-11-27 19:09:42 +08:00
|
|
|
})
|
|
|
|
|
2019-12-03 07:08:28 +08:00
|
|
|
fileReader := &s3UploadProgressedReader{
|
2019-12-03 12:49:58 +08:00
|
|
|
fp: f,
|
|
|
|
size: fileSize,
|
|
|
|
read: -fileSize,
|
|
|
|
fn: fn,
|
2019-12-03 07:08:28 +08:00
|
|
|
}
|
|
|
|
|
2019-11-27 19:09:42 +08:00
|
|
|
// Upload the file to S3.
|
2019-12-03 07:08:28 +08:00
|
|
|
var result *s3manager.UploadOutput
|
|
|
|
result, err = uploader.Upload(&s3manager.UploadInput{
|
2021-11-03 14:39:16 +08:00
|
|
|
Bucket: aws.String(destBucket),
|
|
|
|
Key: aws.String(destKey),
|
|
|
|
Body: fileReader,
|
|
|
|
StorageClass: aws.String("STANDARD_IA"),
|
2019-11-27 19:09:42 +08:00
|
|
|
})
|
|
|
|
|
|
|
|
//in case it fails to upload
|
|
|
|
if err != nil {
|
2019-12-03 07:08:28 +08:00
|
|
|
return 0, fmt.Errorf("failed to upload file %s: %v", filename, err)
|
|
|
|
}
|
|
|
|
glog.V(1).Infof("file %s uploaded to %s\n", filename, result.Location)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// adapted from https://github.com/aws/aws-sdk-go/pull/1868
|
|
|
|
type s3UploadProgressedReader struct {
|
|
|
|
fp *os.File
|
|
|
|
size int64
|
|
|
|
read int64
|
2019-12-03 12:49:58 +08:00
|
|
|
fn func(progressed int64, percentage float32) error
|
2019-12-03 07:08:28 +08:00
|
|
|
}
|
2019-12-03 12:49:58 +08:00
|
|
|
|
2019-12-03 07:08:28 +08:00
|
|
|
func (r *s3UploadProgressedReader) Read(p []byte) (int, error) {
|
|
|
|
return r.fp.Read(p)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *s3UploadProgressedReader) ReadAt(p []byte, off int64) (int, error) {
|
|
|
|
n, err := r.fp.ReadAt(p, off)
|
|
|
|
if err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Got the length have read( or means has uploaded), and you can construct your message
|
|
|
|
atomic.AddInt64(&r.read, int64(n))
|
|
|
|
|
|
|
|
if r.fn != nil {
|
|
|
|
read := r.read
|
|
|
|
if err := r.fn(read, float32(read*100)/float32(r.size)); err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
2019-11-27 19:09:42 +08:00
|
|
|
}
|
|
|
|
|
2019-12-03 07:08:28 +08:00
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *s3UploadProgressedReader) Seek(offset int64, whence int) (int64, error) {
|
|
|
|
return r.fp.Seek(offset, whence)
|
2019-11-27 19:09:42 +08:00
|
|
|
}
|