mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-11-24 02:59:13 +08:00
s3 backend support customizing storage class
This commit is contained in:
parent
d71a1fd077
commit
192983b464
@ -28,3 +28,4 @@ sleep_minutes = 17 # sleep minutes between each script execution
|
||||
region = "us-east-2"
|
||||
bucket = "volume_bucket" # an existing bucket
|
||||
endpoint = "http://server2:8333"
|
||||
storage_class = "STANDARD_IA"
|
||||
|
@ -36,6 +36,7 @@ aws_secret_access_key = "" # if empty, loads from the shared credentials fil
|
||||
region = "us-east-2"
|
||||
bucket = "your_bucket_name" # an existing bucket
|
||||
endpoint = ""
|
||||
storage_class = "STANDARD_IA"
|
||||
|
||||
# create this number of logical volumes if no more writable volumes
|
||||
# count_x means how many copies of data.
|
||||
|
@ -37,6 +37,7 @@ type S3BackendStorage struct {
|
||||
region string
|
||||
bucket string
|
||||
endpoint string
|
||||
storageClass string
|
||||
conn s3iface.S3API
|
||||
}
|
||||
|
||||
@ -48,6 +49,10 @@ func newS3BackendStorage(configuration backend.StringProperties, configPrefix st
|
||||
s.region = configuration.GetString(configPrefix + "region")
|
||||
s.bucket = configuration.GetString(configPrefix + "bucket")
|
||||
s.endpoint = configuration.GetString(configPrefix + "endpoint")
|
||||
s.storageClass = configuration.GetString(configPrefix + "storageClass")
|
||||
if s.storageClass == "" {
|
||||
s.storageClass = "STANDARD_IA"
|
||||
}
|
||||
|
||||
s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region, s.endpoint)
|
||||
|
||||
@ -62,6 +67,7 @@ func (s *S3BackendStorage) ToProperties() map[string]string {
|
||||
m["region"] = s.region
|
||||
m["bucket"] = s.bucket
|
||||
m["endpoint"] = s.endpoint
|
||||
m["storage_class"] = s.storageClass
|
||||
return m
|
||||
}
|
||||
|
||||
@ -85,7 +91,7 @@ func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percen
|
||||
|
||||
glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key)
|
||||
|
||||
size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, fn)
|
||||
size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, s.storageClass, fn)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
)
|
||||
|
||||
func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string, fn func(progressed int64, percentage float32) error) (fileSize int64, err error) {
|
||||
func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string, storageClass string, fn func(progressed int64, percentage float32) error) (fileSize int64, err error) {
|
||||
|
||||
//open the file
|
||||
f, err := os.Open(filename)
|
||||
@ -52,7 +52,7 @@ func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey
|
||||
Bucket: aws.String(destBucket),
|
||||
Key: aws.String(destKey),
|
||||
Body: fileReader,
|
||||
StorageClass: aws.String("STANDARD_IA"),
|
||||
StorageClass: aws.String(storageClass),
|
||||
})
|
||||
|
||||
//in case it fails to upload
|
||||
|
Loading…
Reference in New Issue
Block a user