seaweedfs/weed/operation/submit.go

195 lines
5.1 KiB
Go
Raw Normal View History

package operation
import (
"bytes"
"io"
"mime"
2015-12-15 14:38:58 +08:00
"net/url"
"os"
"path"
"strconv"
"strings"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/security"
)
2013-08-15 01:07:42 +08:00
type FilePart struct {
Reader io.Reader
FileName string
FileSize int64
IsGzipped bool
MimeType string
ModTime int64 //in seconds
Replication string
Collection string
Ttl string
Server string //this comes from assign result
Fid string //this comes from assign result, but customizable
2013-08-15 01:07:42 +08:00
}
type SubmitResult struct {
2014-04-16 01:01:13 +08:00
FileName string `json:"fileName,omitempty"`
FileUrl string `json:"fileUrl,omitempty"`
Fid string `json:"fid,omitempty"`
Size uint32 `json:"size,omitempty"`
Error string `json:"error,omitempty"`
}
2015-02-08 07:35:28 +08:00
func SubmitFiles(master string, files []FilePart,
replication string, collection string, ttl string, maxMB int,
secret security.Secret,
) ([]SubmitResult, error) {
results := make([]SubmitResult, len(files))
for index, file := range files {
2013-08-15 01:07:42 +08:00
results[index].FileName = file.FileName
}
ret, err := Assign(master, uint64(len(files)), replication, collection, ttl)
if err != nil {
for index, _ := range files {
results[index].Error = err.Error()
}
return results, err
}
for index, file := range files {
2014-03-10 09:50:45 +08:00
file.Fid = ret.Fid
if index > 0 {
2014-03-10 09:50:45 +08:00
file.Fid = file.Fid + "_" + strconv.Itoa(index)
}
file.Server = ret.Url
file.Replication = replication
file.Collection = collection
2015-02-08 07:35:28 +08:00
results[index].Size, err = file.Upload(maxMB, master, secret)
if err != nil {
results[index].Error = err.Error()
}
2014-03-10 09:50:45 +08:00
results[index].Fid = file.Fid
results[index].FileUrl = ret.PublicUrl + "/" + file.Fid
}
return results, nil
}
2013-08-15 01:07:42 +08:00
func NewFileParts(fullPathFilenames []string) (ret []FilePart, err error) {
ret = make([]FilePart, len(fullPathFilenames))
for index, file := range fullPathFilenames {
if ret[index], err = newFilePart(file); err != nil {
2013-08-15 01:07:42 +08:00
return
}
}
2013-08-15 01:07:42 +08:00
return
}
func newFilePart(fullPathFilename string) (ret FilePart, err error) {
2013-08-15 01:07:42 +08:00
fh, openErr := os.Open(fullPathFilename)
if openErr != nil {
glog.V(0).Info("Failed to open file: ", fullPathFilename)
return ret, openErr
}
ret.Reader = fh
2013-08-15 01:07:42 +08:00
if fi, fiErr := fh.Stat(); fiErr != nil {
glog.V(0).Info("Failed to stat file:", fullPathFilename)
return ret, fiErr
} else {
ret.ModTime = fi.ModTime().UTC().Unix()
ret.FileSize = fi.Size()
2013-08-15 01:07:42 +08:00
}
ext := strings.ToLower(path.Ext(fullPathFilename))
ret.IsGzipped = ext == ".gz"
if ret.IsGzipped {
ret.FileName = fullPathFilename[0 : len(fullPathFilename)-3]
}
2013-08-15 01:07:42 +08:00
ret.FileName = fullPathFilename
if ext != "" {
ret.MimeType = mime.TypeByExtension(ext)
}
return ret, nil
}
2015-02-08 07:35:28 +08:00
func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (retSize uint32, err error) {
jwt := security.GenJwt(secret, fi.Fid)
2014-03-10 09:50:45 +08:00
fileUrl := "http://" + fi.Server + "/" + fi.Fid
2013-08-15 01:07:42 +08:00
if fi.ModTime != 0 {
fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime))
}
if closer, ok := fi.Reader.(io.Closer); ok {
defer closer.Close()
}
baseName := path.Base(fi.FileName)
if maxMB > 0 && fi.FileSize > int64(maxMB*1024*1024) {
chunkSize := int64(maxMB * 1024 * 1024)
chunks := fi.FileSize/chunkSize + 1
2015-12-02 15:00:46 +08:00
cm := ChunkManifest{
Name: baseName,
2015-12-02 15:00:46 +08:00
Size: fi.FileSize,
Mime: fi.MimeType,
Chunks: make([]*ChunkInfo, 0, chunks),
}
for i := int64(0); i < chunks; i++ {
2015-02-08 07:35:28 +08:00
id, count, e := upload_one_chunk(
baseName+"-"+strconv.FormatInt(i+1, 10),
2015-02-08 07:35:28 +08:00
io.LimitReader(fi.Reader, chunkSize),
master, fi.Replication, fi.Collection, fi.Ttl,
jwt)
if e != nil {
2015-12-02 15:00:46 +08:00
// delete all uploaded chunks
cm.DeleteChunks(master)
return 0, e
}
2015-12-02 15:00:46 +08:00
cm.Chunks = append(cm.Chunks,
&ChunkInfo{
Offset: i * chunkSize,
Size: int64(count),
Fid: id,
},
)
retSize += count
}
2015-12-02 15:00:46 +08:00
err = upload_chunked_file_manifest(fileUrl, &cm, jwt)
if err != nil {
// delete all uploaded chunks
cm.DeleteChunks(master)
}
} else {
ret, e := Upload(fileUrl, baseName, fi.Reader, fi.IsGzipped, fi.MimeType, jwt)
if e != nil {
return 0, e
}
return ret.Size, e
}
return
}
2015-02-08 07:35:28 +08:00
func upload_one_chunk(filename string, reader io.Reader, master,
replication string, collection string, ttl string, jwt security.EncodedJwt,
) (fid string, size uint32, e error) {
ret, err := Assign(master, 1, replication, collection, ttl)
if err != nil {
return "", 0, err
}
fileUrl, fid := "http://"+ret.Url+"/"+ret.Fid, ret.Fid
glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...")
2015-02-08 07:35:28 +08:00
uploadResult, uploadError := Upload(fileUrl, filename, reader, false,
"application/octet-stream", jwt)
2013-11-19 18:12:56 +08:00
if uploadError != nil {
return fid, 0, uploadError
}
return fid, uploadResult.Size, nil
}
2015-12-02 15:00:46 +08:00
func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt security.EncodedJwt) error {
buf, e := manifest.Marshal()
2015-12-02 15:00:46 +08:00
if e != nil {
return e
}
bufReader := bytes.NewReader(buf)
glog.V(4).Info("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...")
u, _ := url.Parse(fileUrl)
q := u.Query()
2015-12-15 14:38:58 +08:00
q.Set("cm", "true")
2015-12-02 15:00:46 +08:00
u.RawQuery = q.Encode()
2015-12-02 18:25:18 +08:00
_, e = Upload(u.String(), manifest.Name, bufReader, false, "application/json", jwt)
return e
}