1
0
mirror of https://github.com/seaweedfs/seaweedfs.git synced 2024-12-28 11:57:54 +08:00
seaweedfs/weed/storage/backend/s3_backend/s3_backend.go

197 lines
5.4 KiB
Go
Raw Normal View History

2019-11-19 11:24:34 +08:00
package s3_backend
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/util"
"io"
2019-11-27 19:09:42 +08:00
"os"
2019-11-19 11:24:34 +08:00
"strings"
"time"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/google/uuid"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/backend"
2019-11-19 11:24:34 +08:00
)
func init() {
backend.BackendStorageFactories["s3"] = &S3BackendFactory{}
}
type S3BackendFactory struct {
2019-11-19 11:24:34 +08:00
}
func (factory *S3BackendFactory) StorageType() backend.StorageType {
return backend.StorageType("s3")
}
func (factory *S3BackendFactory) BuildStorage(configuration backend.StringProperties, configPrefix string, id string) (backend.BackendStorage, error) {
return newS3BackendStorage(configuration, configPrefix, id)
}
type S3BackendStorage struct {
id string
aws_access_key_id string
aws_secret_access_key string
region string
bucket string
endpoint string
storageClass string
conn s3iface.S3API
2019-11-19 11:24:34 +08:00
}
func newS3BackendStorage(configuration backend.StringProperties, configPrefix string, id string) (s *S3BackendStorage, err error) {
s = &S3BackendStorage{}
s.id = id
s.aws_access_key_id = configuration.GetString(configPrefix + "aws_access_key_id")
s.aws_secret_access_key = configuration.GetString(configPrefix + "aws_secret_access_key")
s.region = configuration.GetString(configPrefix + "region")
s.bucket = configuration.GetString(configPrefix + "bucket")
s.endpoint = configuration.GetString(configPrefix + "endpoint")
s.storageClass = configuration.GetString(configPrefix + "storage_class")
if s.storageClass == "" {
s.storageClass = "STANDARD_IA"
}
s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region, s.endpoint)
glog.V(0).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket)
return
}
func (s *S3BackendStorage) ToProperties() map[string]string {
m := make(map[string]string)
m["aws_access_key_id"] = s.aws_access_key_id
m["aws_secret_access_key"] = s.aws_secret_access_key
m["region"] = s.region
m["bucket"] = s.bucket
m["endpoint"] = s.endpoint
m["storage_class"] = s.storageClass
return m
}
2019-12-29 03:21:49 +08:00
func (s *S3BackendStorage) NewStorageFile(key string, tierInfo *volume_server_pb.VolumeInfo) backend.BackendStorageFile {
if strings.HasPrefix(key, "/") {
key = key[1:]
}
f := &S3BackendStorageFile{
backendStorage: s,
key: key,
tierInfo: tierInfo,
}
return f
}
func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) {
randomUuid, _ := uuid.NewRandom()
key = randomUuid.String()
2019-12-03 07:54:24 +08:00
glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key)
util.Retry("upload to S3", func() error {
size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, s.storageClass, fn)
return err
})
return
}
func (s *S3BackendStorage) DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) {
glog.V(1).Infof("download dat file of %s from remote s3.%s as %s", fileName, s.id, key)
size, err = downloadFromS3(s.conn, fileName, s.bucket, key, fn)
return
}
func (s *S3BackendStorage) DeleteFile(key string) (err error) {
glog.V(1).Infof("delete dat file %s from remote", key)
err = deleteFromS3(s.conn, s.bucket, key)
return
}
type S3BackendStorageFile struct {
backendStorage *S3BackendStorage
key string
2019-12-29 03:21:49 +08:00
tierInfo *volume_server_pb.VolumeInfo
}
func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n int, err error) {
2019-11-19 11:24:34 +08:00
bytesRange := fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p))-1)
getObjectOutput, getObjectErr := s3backendStorageFile.backendStorage.conn.GetObject(&s3.GetObjectInput{
Bucket: &s3backendStorageFile.backendStorage.bucket,
Key: &s3backendStorageFile.key,
2019-11-19 11:24:34 +08:00
Range: &bytesRange,
})
if getObjectErr != nil {
return 0, fmt.Errorf("bucket %s GetObject %s: %v", s3backendStorageFile.backendStorage.bucket, s3backendStorageFile.key, getObjectErr)
2019-11-19 11:24:34 +08:00
}
defer getObjectOutput.Body.Close()
// glog.V(3).Infof("read %s %s", s3backendStorageFile.key, bytesRange)
// glog.V(3).Infof("content range: %s, contentLength: %d", *getObjectOutput.ContentRange, *getObjectOutput.ContentLength)
2019-11-19 11:24:34 +08:00
var readCount int
for {
p = p[readCount:]
readCount, err = getObjectOutput.Body.Read(p)
n += readCount
if err != nil {
break
}
}
if err == io.EOF {
err = nil
}
return
2019-11-19 11:24:34 +08:00
}
func (s3backendStorageFile S3BackendStorageFile) WriteAt(p []byte, off int64) (n int, err error) {
2019-12-11 13:44:47 +08:00
panic("not implemented")
2019-11-19 11:24:34 +08:00
}
func (s3backendStorageFile S3BackendStorageFile) Truncate(off int64) error {
2019-12-11 13:44:47 +08:00
panic("not implemented")
2019-11-19 11:24:34 +08:00
}
func (s3backendStorageFile S3BackendStorageFile) Close() error {
2019-11-19 11:24:34 +08:00
return nil
}
func (s3backendStorageFile S3BackendStorageFile) GetStat() (datSize int64, modTime time.Time, err error) {
2019-11-19 11:24:34 +08:00
files := s3backendStorageFile.tierInfo.GetFiles()
2019-11-19 11:24:34 +08:00
2019-12-03 12:49:58 +08:00
if len(files) == 0 {
err = fmt.Errorf("remote file info not found")
return
2019-11-19 11:24:34 +08:00
}
datSize = int64(files[0].FileSize)
2019-12-03 12:49:58 +08:00
modTime = time.Unix(int64(files[0].ModifiedTime), 0)
2019-11-19 11:24:34 +08:00
return
}
2019-12-09 11:44:16 +08:00
func (s3backendStorageFile S3BackendStorageFile) Name() string {
return s3backendStorageFile.key
2019-11-19 11:24:34 +08:00
}
func (s3backendStorageFile S3BackendStorageFile) Sync() error {
return nil
}