2013-08-14 14:26:51 +08:00
|
|
|
package operation
|
|
|
|
|
|
|
|
import (
|
2013-11-19 13:47:31 +08:00
|
|
|
"bytes"
|
2013-08-14 14:26:51 +08:00
|
|
|
"io"
|
|
|
|
"mime"
|
2015-12-15 14:38:58 +08:00
|
|
|
"net/url"
|
2013-08-14 14:26:51 +08:00
|
|
|
"os"
|
|
|
|
"path"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
2014-10-27 02:34:55 +08:00
|
|
|
|
2016-06-03 09:09:14 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/security"
|
2013-08-14 14:26:51 +08:00
|
|
|
)
|
|
|
|
|
2013-08-15 01:07:42 +08:00
|
|
|
type FilePart struct {
|
2014-03-10 14:54:07 +08:00
|
|
|
Reader io.Reader
|
|
|
|
FileName string
|
|
|
|
FileSize int64
|
|
|
|
IsGzipped bool
|
|
|
|
MimeType string
|
|
|
|
ModTime int64 //in seconds
|
|
|
|
Replication string
|
|
|
|
Collection string
|
2017-01-09 23:34:07 +08:00
|
|
|
DataCenter string
|
2014-09-21 03:38:59 +08:00
|
|
|
Ttl string
|
2014-03-10 14:54:07 +08:00
|
|
|
Server string //this comes from assign result
|
|
|
|
Fid string //this comes from assign result, but customizable
|
2013-08-15 01:07:42 +08:00
|
|
|
}
|
|
|
|
|
2013-08-14 14:26:51 +08:00
|
|
|
type SubmitResult struct {
|
2014-04-16 01:01:13 +08:00
|
|
|
FileName string `json:"fileName,omitempty"`
|
|
|
|
FileUrl string `json:"fileUrl,omitempty"`
|
|
|
|
Fid string `json:"fid,omitempty"`
|
|
|
|
Size uint32 `json:"size,omitempty"`
|
|
|
|
Error string `json:"error,omitempty"`
|
2013-08-14 14:26:51 +08:00
|
|
|
}
|
|
|
|
|
2015-02-08 07:35:28 +08:00
|
|
|
func SubmitFiles(master string, files []FilePart,
|
2017-01-09 23:34:07 +08:00
|
|
|
replication string, collection string, dataCenter string, ttl string, maxMB int,
|
2015-02-08 07:35:28 +08:00
|
|
|
secret security.Secret,
|
|
|
|
) ([]SubmitResult, error) {
|
2013-08-14 14:26:51 +08:00
|
|
|
results := make([]SubmitResult, len(files))
|
|
|
|
for index, file := range files {
|
2013-08-15 01:07:42 +08:00
|
|
|
results[index].FileName = file.FileName
|
2013-08-14 14:26:51 +08:00
|
|
|
}
|
2016-06-26 10:50:18 +08:00
|
|
|
ar := &VolumeAssignRequest{
|
|
|
|
Count: uint64(len(files)),
|
|
|
|
Replication: replication,
|
|
|
|
Collection: collection,
|
2017-01-09 23:34:07 +08:00
|
|
|
DataCenter: dataCenter,
|
2016-06-26 10:50:18 +08:00
|
|
|
Ttl: ttl,
|
|
|
|
}
|
|
|
|
ret, err := Assign(master, ar)
|
2013-08-14 14:26:51 +08:00
|
|
|
if err != nil {
|
|
|
|
for index, _ := range files {
|
|
|
|
results[index].Error = err.Error()
|
|
|
|
}
|
|
|
|
return results, err
|
|
|
|
}
|
|
|
|
for index, file := range files {
|
2014-03-10 09:50:45 +08:00
|
|
|
file.Fid = ret.Fid
|
2013-08-14 14:26:51 +08:00
|
|
|
if index > 0 {
|
2014-03-10 09:50:45 +08:00
|
|
|
file.Fid = file.Fid + "_" + strconv.Itoa(index)
|
2013-08-14 14:26:51 +08:00
|
|
|
}
|
2015-03-09 16:10:01 +08:00
|
|
|
file.Server = ret.Url
|
2014-03-10 14:54:07 +08:00
|
|
|
file.Replication = replication
|
|
|
|
file.Collection = collection
|
2017-01-09 23:34:07 +08:00
|
|
|
file.DataCenter = dataCenter
|
2015-02-08 07:35:28 +08:00
|
|
|
results[index].Size, err = file.Upload(maxMB, master, secret)
|
2013-08-14 14:26:51 +08:00
|
|
|
if err != nil {
|
|
|
|
results[index].Error = err.Error()
|
|
|
|
}
|
2014-03-10 09:50:45 +08:00
|
|
|
results[index].Fid = file.Fid
|
2015-04-17 02:37:05 +08:00
|
|
|
results[index].FileUrl = ret.PublicUrl + "/" + file.Fid
|
2013-08-14 14:26:51 +08:00
|
|
|
}
|
|
|
|
return results, nil
|
|
|
|
}
|
|
|
|
|
2013-08-15 01:07:42 +08:00
|
|
|
func NewFileParts(fullPathFilenames []string) (ret []FilePart, err error) {
|
|
|
|
ret = make([]FilePart, len(fullPathFilenames))
|
|
|
|
for index, file := range fullPathFilenames {
|
2013-11-19 13:47:31 +08:00
|
|
|
if ret[index], err = newFilePart(file); err != nil {
|
2013-08-15 01:07:42 +08:00
|
|
|
return
|
|
|
|
}
|
2013-08-14 14:26:51 +08:00
|
|
|
}
|
2013-08-15 01:07:42 +08:00
|
|
|
return
|
|
|
|
}
|
2013-11-19 13:47:31 +08:00
|
|
|
func newFilePart(fullPathFilename string) (ret FilePart, err error) {
|
2013-08-15 01:07:42 +08:00
|
|
|
fh, openErr := os.Open(fullPathFilename)
|
|
|
|
if openErr != nil {
|
|
|
|
glog.V(0).Info("Failed to open file: ", fullPathFilename)
|
|
|
|
return ret, openErr
|
|
|
|
}
|
2013-08-15 02:31:39 +08:00
|
|
|
ret.Reader = fh
|
2013-08-15 01:07:42 +08:00
|
|
|
|
2017-01-04 11:23:40 +08:00
|
|
|
fi, fiErr := fh.Stat()
|
|
|
|
if fiErr != nil {
|
2013-08-15 01:07:42 +08:00
|
|
|
glog.V(0).Info("Failed to stat file:", fullPathFilename)
|
|
|
|
return ret, fiErr
|
|
|
|
}
|
2017-01-04 11:23:40 +08:00
|
|
|
ret.ModTime = fi.ModTime().UTC().Unix()
|
|
|
|
ret.FileSize = fi.Size()
|
2013-08-15 01:07:42 +08:00
|
|
|
ext := strings.ToLower(path.Ext(fullPathFilename))
|
|
|
|
ret.IsGzipped = ext == ".gz"
|
2018-02-27 00:01:23 +08:00
|
|
|
ret.FileName = fi.Name()
|
2013-08-15 01:07:42 +08:00
|
|
|
if ext != "" {
|
|
|
|
ret.MimeType = mime.TypeByExtension(ext)
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret, nil
|
|
|
|
}
|
|
|
|
|
2015-02-08 07:35:28 +08:00
|
|
|
func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (retSize uint32, err error) {
|
|
|
|
jwt := security.GenJwt(secret, fi.Fid)
|
2014-03-10 09:50:45 +08:00
|
|
|
fileUrl := "http://" + fi.Server + "/" + fi.Fid
|
2013-08-15 01:07:42 +08:00
|
|
|
if fi.ModTime != 0 {
|
|
|
|
fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime))
|
2013-08-14 14:26:51 +08:00
|
|
|
}
|
2013-09-02 14:58:21 +08:00
|
|
|
if closer, ok := fi.Reader.(io.Closer); ok {
|
|
|
|
defer closer.Close()
|
2013-08-15 02:31:39 +08:00
|
|
|
}
|
2015-12-02 16:35:16 +08:00
|
|
|
baseName := path.Base(fi.FileName)
|
2013-11-19 13:47:31 +08:00
|
|
|
if maxMB > 0 && fi.FileSize > int64(maxMB*1024*1024) {
|
|
|
|
chunkSize := int64(maxMB * 1024 * 1024)
|
|
|
|
chunks := fi.FileSize/chunkSize + 1
|
2015-12-02 15:00:46 +08:00
|
|
|
cm := ChunkManifest{
|
2015-12-02 16:35:16 +08:00
|
|
|
Name: baseName,
|
2015-12-02 15:00:46 +08:00
|
|
|
Size: fi.FileSize,
|
|
|
|
Mime: fi.MimeType,
|
|
|
|
Chunks: make([]*ChunkInfo, 0, chunks),
|
|
|
|
}
|
|
|
|
|
2017-01-09 23:34:07 +08:00
|
|
|
var ret *AssignResult
|
|
|
|
var id string
|
|
|
|
if fi.DataCenter != "" {
|
|
|
|
ar := &VolumeAssignRequest{
|
|
|
|
Count: uint64(chunks),
|
|
|
|
Replication: fi.Replication,
|
|
|
|
Collection: fi.Collection,
|
|
|
|
Ttl: fi.Ttl,
|
|
|
|
}
|
|
|
|
ret, err = Assign(master, ar)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2013-11-19 13:47:31 +08:00
|
|
|
for i := int64(0); i < chunks; i++ {
|
2017-01-09 23:34:07 +08:00
|
|
|
if fi.DataCenter == "" {
|
|
|
|
ar := &VolumeAssignRequest{
|
|
|
|
Count: 1,
|
|
|
|
Replication: fi.Replication,
|
|
|
|
Collection: fi.Collection,
|
|
|
|
Ttl: fi.Ttl,
|
|
|
|
}
|
|
|
|
ret, err = Assign(master, ar)
|
|
|
|
if err != nil {
|
|
|
|
// delete all uploaded chunks
|
|
|
|
cm.DeleteChunks(master)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
id = ret.Fid
|
|
|
|
} else {
|
|
|
|
id = ret.Fid
|
|
|
|
if i > 0 {
|
|
|
|
id += "_" + strconv.FormatInt(i, 10)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fileUrl := "http://" + ret.Url + "/" + id
|
|
|
|
count, e := upload_one_chunk(
|
2015-12-02 16:35:16 +08:00
|
|
|
baseName+"-"+strconv.FormatInt(i+1, 10),
|
2015-02-08 07:35:28 +08:00
|
|
|
io.LimitReader(fi.Reader, chunkSize),
|
2017-01-09 23:34:07 +08:00
|
|
|
master, fileUrl,
|
2015-02-08 07:35:28 +08:00
|
|
|
jwt)
|
2013-11-19 13:47:31 +08:00
|
|
|
if e != nil {
|
2015-12-02 15:00:46 +08:00
|
|
|
// delete all uploaded chunks
|
|
|
|
cm.DeleteChunks(master)
|
2013-11-19 13:47:31 +08:00
|
|
|
return 0, e
|
|
|
|
}
|
2015-12-02 15:00:46 +08:00
|
|
|
cm.Chunks = append(cm.Chunks,
|
|
|
|
&ChunkInfo{
|
|
|
|
Offset: i * chunkSize,
|
|
|
|
Size: int64(count),
|
|
|
|
Fid: id,
|
|
|
|
},
|
|
|
|
)
|
2013-11-19 13:47:31 +08:00
|
|
|
retSize += count
|
|
|
|
}
|
2015-12-02 15:00:46 +08:00
|
|
|
err = upload_chunked_file_manifest(fileUrl, &cm, jwt)
|
|
|
|
if err != nil {
|
|
|
|
// delete all uploaded chunks
|
|
|
|
cm.DeleteChunks(master)
|
|
|
|
}
|
2013-11-19 13:47:31 +08:00
|
|
|
} else {
|
2017-01-08 09:16:29 +08:00
|
|
|
ret, e := Upload(fileUrl, baseName, fi.Reader, fi.IsGzipped, fi.MimeType, nil, jwt)
|
2013-11-19 13:47:31 +08:00
|
|
|
if e != nil {
|
|
|
|
return 0, e
|
|
|
|
}
|
|
|
|
return ret.Size, e
|
2013-08-14 14:26:51 +08:00
|
|
|
}
|
2013-11-19 15:03:06 +08:00
|
|
|
return
|
2013-11-19 13:47:31 +08:00
|
|
|
}
|
|
|
|
|
2015-02-08 07:35:28 +08:00
|
|
|
func upload_one_chunk(filename string, reader io.Reader, master,
|
2017-01-09 23:34:07 +08:00
|
|
|
fileUrl string, jwt security.EncodedJwt,
|
|
|
|
) (size uint32, e error) {
|
2013-11-19 13:47:31 +08:00
|
|
|
glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...")
|
2015-02-08 07:35:28 +08:00
|
|
|
uploadResult, uploadError := Upload(fileUrl, filename, reader, false,
|
2017-01-08 09:16:29 +08:00
|
|
|
"application/octet-stream", nil, jwt)
|
2013-11-19 18:12:56 +08:00
|
|
|
if uploadError != nil {
|
2017-01-09 23:34:07 +08:00
|
|
|
return 0, uploadError
|
2013-11-19 18:12:56 +08:00
|
|
|
}
|
2017-01-09 23:34:07 +08:00
|
|
|
return uploadResult.Size, nil
|
2013-11-19 13:47:31 +08:00
|
|
|
}
|
|
|
|
|
2015-12-02 15:00:46 +08:00
|
|
|
func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt security.EncodedJwt) error {
|
2015-12-14 22:01:30 +08:00
|
|
|
buf, e := manifest.Marshal()
|
2015-12-02 15:00:46 +08:00
|
|
|
if e != nil {
|
|
|
|
return e
|
|
|
|
}
|
|
|
|
bufReader := bytes.NewReader(buf)
|
|
|
|
glog.V(4).Info("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...")
|
|
|
|
u, _ := url.Parse(fileUrl)
|
|
|
|
q := u.Query()
|
2015-12-15 14:38:58 +08:00
|
|
|
q.Set("cm", "true")
|
2015-12-02 15:00:46 +08:00
|
|
|
u.RawQuery = q.Encode()
|
2017-01-08 09:16:29 +08:00
|
|
|
_, e = Upload(u.String(), manifest.Name, bufReader, false, "application/json", nil, jwt)
|
2013-11-19 13:47:31 +08:00
|
|
|
return e
|
2013-08-14 14:26:51 +08:00
|
|
|
}
|