seaweedfs/weed/operation/submit.go

305 lines
7.5 KiB
Go
Raw Normal View History

package operation
import (
"context"
"github.com/seaweedfs/seaweedfs/weed/pb"
"io"
2024-08-30 00:52:21 +08:00
"math/rand/v2"
"mime"
2015-12-15 14:38:58 +08:00
"net/url"
"os"
"path"
"strconv"
"strings"
"google.golang.org/grpc"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/security"
)
2013-08-15 01:07:42 +08:00
type FilePart struct {
Reader io.Reader
FileName string
FileSize int64
MimeType string
ModTime int64 //in seconds
Pref StoragePreference
Server string //this comes from assign result
Fid string //this comes from assign result, but customizable
Fsync bool
2013-08-15 01:07:42 +08:00
}
type SubmitResult struct {
2014-04-16 01:01:13 +08:00
FileName string `json:"fileName,omitempty"`
FileUrl string `json:"url,omitempty"`
2014-04-16 01:01:13 +08:00
Fid string `json:"fid,omitempty"`
Size uint32 `json:"size,omitempty"`
Error string `json:"error,omitempty"`
}
type StoragePreference struct {
Replication string
Collection string
DataCenter string
Ttl string
DiskType string
MaxMB int
}
type GetMasterFn func(ctx context.Context) pb.ServerAddress
func SubmitFiles(masterFn GetMasterFn, grpcDialOption grpc.DialOption, files []*FilePart, pref StoragePreference, usePublicUrl bool) ([]SubmitResult, error) {
results := make([]SubmitResult, len(files))
for index, file := range files {
2013-08-15 01:07:42 +08:00
results[index].FileName = file.FileName
}
2016-06-26 10:50:18 +08:00
ar := &VolumeAssignRequest{
Count: uint64(len(files)),
Replication: pref.Replication,
Collection: pref.Collection,
DataCenter: pref.DataCenter,
Ttl: pref.Ttl,
DiskType: pref.DiskType,
2016-06-26 10:50:18 +08:00
}
ret, err := Assign(masterFn, grpcDialOption, ar)
if err != nil {
for index := range files {
results[index].Error = err.Error()
}
return results, err
}
for index, file := range files {
2014-03-10 09:50:45 +08:00
file.Fid = ret.Fid
if index > 0 {
2014-03-10 09:50:45 +08:00
file.Fid = file.Fid + "_" + strconv.Itoa(index)
}
file.Server = ret.Url
if usePublicUrl {
file.Server = ret.PublicUrl
}
file.Pref = pref
results[index].Size, err = file.Upload(pref.MaxMB, masterFn, usePublicUrl, ret.Auth, grpcDialOption)
if err != nil {
results[index].Error = err.Error()
}
2014-03-10 09:50:45 +08:00
results[index].Fid = file.Fid
results[index].FileUrl = ret.PublicUrl + "/" + file.Fid
}
return results, nil
}
func NewFileParts(fullPathFilenames []string) (ret []*FilePart, err error) {
ret = make([]*FilePart, len(fullPathFilenames))
2013-08-15 01:07:42 +08:00
for index, file := range fullPathFilenames {
if ret[index], err = newFilePart(file); err != nil {
2013-08-15 01:07:42 +08:00
return
}
}
2013-08-15 01:07:42 +08:00
return
}
func newFilePart(fullPathFilename string) (ret *FilePart, err error) {
ret = &FilePart{}
2013-08-15 01:07:42 +08:00
fh, openErr := os.Open(fullPathFilename)
if openErr != nil {
glog.V(0).Info("Failed to open file: ", fullPathFilename)
return ret, openErr
}
ret.Reader = fh
2013-08-15 01:07:42 +08:00
fi, fiErr := fh.Stat()
if fiErr != nil {
2013-08-15 01:07:42 +08:00
glog.V(0).Info("Failed to stat file:", fullPathFilename)
return ret, fiErr
}
ret.ModTime = fi.ModTime().UTC().Unix()
ret.FileSize = fi.Size()
2013-08-15 01:07:42 +08:00
ext := strings.ToLower(path.Ext(fullPathFilename))
ret.FileName = fi.Name()
2013-08-15 01:07:42 +08:00
if ext != "" {
ret.MimeType = mime.TypeByExtension(ext)
}
return ret, nil
}
func (fi *FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) {
2014-03-10 09:50:45 +08:00
fileUrl := "http://" + fi.Server + "/" + fi.Fid
2013-08-15 01:07:42 +08:00
if fi.ModTime != 0 {
fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime))
}
2020-04-30 23:31:08 +08:00
if fi.Fsync {
fileUrl += "?fsync=true"
}
if closer, ok := fi.Reader.(io.Closer); ok {
defer closer.Close()
}
baseName := path.Base(fi.FileName)
if maxMB > 0 && fi.FileSize > int64(maxMB*1024*1024) {
chunkSize := int64(maxMB * 1024 * 1024)
chunks := fi.FileSize/chunkSize + 1
2015-12-02 15:00:46 +08:00
cm := ChunkManifest{
Name: baseName,
2015-12-02 15:00:46 +08:00
Size: fi.FileSize,
Mime: fi.MimeType,
Chunks: make([]*ChunkInfo, 0, chunks),
}
var ret *AssignResult
var id string
if fi.Pref.DataCenter != "" {
ar := &VolumeAssignRequest{
Count: uint64(chunks),
Replication: fi.Pref.Replication,
Collection: fi.Pref.Collection,
Ttl: fi.Pref.Ttl,
DiskType: fi.Pref.DiskType,
}
ret, err = Assign(masterFn, grpcDialOption, ar)
if err != nil {
return
}
}
for i := int64(0); i < chunks; i++ {
if fi.Pref.DataCenter == "" {
ar := &VolumeAssignRequest{
Count: 1,
Replication: fi.Pref.Replication,
Collection: fi.Pref.Collection,
Ttl: fi.Pref.Ttl,
DiskType: fi.Pref.DiskType,
}
ret, err = Assign(masterFn, grpcDialOption, ar)
if err != nil {
// delete all uploaded chunks
cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption)
return
}
id = ret.Fid
} else {
id = ret.Fid
if i > 0 {
id += "_" + strconv.FormatInt(i, 10)
}
}
2024-08-28 00:02:39 +08:00
fileUrl := genFileUrl(ret, id, usePublicUrl)
count, e := uploadOneChunk(
baseName+"-"+strconv.FormatInt(i+1, 10),
2015-02-08 07:35:28 +08:00
io.LimitReader(fi.Reader, chunkSize),
masterFn, fileUrl,
2019-02-15 16:09:19 +08:00
ret.Auth)
if e != nil {
2015-12-02 15:00:46 +08:00
// delete all uploaded chunks
cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption)
return 0, e
}
2015-12-02 15:00:46 +08:00
cm.Chunks = append(cm.Chunks,
&ChunkInfo{
Offset: i * chunkSize,
Size: int64(count),
Fid: id,
},
)
retSize += count
}
2024-08-28 00:02:39 +08:00
err = uploadChunkedFileManifest(fileUrl, &cm, jwt)
2015-12-02 15:00:46 +08:00
if err != nil {
// delete all uploaded chunks
cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption)
2015-12-02 15:00:46 +08:00
}
} else {
2021-09-07 07:20:49 +08:00
uploadOption := &UploadOption{
UploadUrl: fileUrl,
Filename: baseName,
Cipher: false,
IsInputCompressed: false,
MimeType: fi.MimeType,
PairMap: nil,
Jwt: jwt,
}
2024-08-28 00:02:39 +08:00
uploader, e := NewUploader()
if e != nil {
return 0, e
}
ret, e, _ := uploader.Upload(fi.Reader, uploadOption)
if e != nil {
return 0, e
}
return ret.Size, e
}
return
}
2024-08-28 00:02:39 +08:00
func genFileUrl(ret *AssignResult, id string, usePublicUrl bool) string {
fileUrl := "http://" + ret.Url + "/" + id
if usePublicUrl {
fileUrl = "http://" + ret.PublicUrl + "/" + id
}
for _, replica := range ret.Replicas {
2024-08-30 00:52:21 +08:00
if rand.IntN(len(ret.Replicas)+1) == 0 {
2024-08-28 00:02:39 +08:00
fileUrl = "http://" + replica.Url + "/" + id
if usePublicUrl {
fileUrl = "http://" + replica.PublicUrl + "/" + id
}
}
}
return fileUrl
}
func uploadOneChunk(filename string, reader io.Reader, masterFn GetMasterFn,
2019-02-15 16:09:48 +08:00
fileUrl string, jwt security.EncodedJwt,
) (size uint32, e error) {
glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...")
2021-09-07 07:20:49 +08:00
uploadOption := &UploadOption{
UploadUrl: fileUrl,
Filename: filename,
Cipher: false,
IsInputCompressed: false,
MimeType: "",
PairMap: nil,
Jwt: jwt,
}
uploader, uploaderError := NewUploader()
if uploaderError != nil {
return 0, uploaderError
}
uploadResult, uploadError, _ := uploader.Upload(reader, uploadOption)
2013-11-19 18:12:56 +08:00
if uploadError != nil {
return 0, uploadError
2013-11-19 18:12:56 +08:00
}
return uploadResult.Size, nil
}
2024-08-28 00:02:39 +08:00
func uploadChunkedFileManifest(fileUrl string, manifest *ChunkManifest, jwt security.EncodedJwt) error {
buf, e := manifest.Marshal()
2015-12-02 15:00:46 +08:00
if e != nil {
return e
}
glog.V(4).Info("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...")
u, _ := url.Parse(fileUrl)
q := u.Query()
2015-12-15 14:38:58 +08:00
q.Set("cm", "true")
2015-12-02 15:00:46 +08:00
u.RawQuery = q.Encode()
2021-09-07 07:20:49 +08:00
uploadOption := &UploadOption{
UploadUrl: u.String(),
Filename: manifest.Name,
Cipher: false,
IsInputCompressed: false,
MimeType: "application/json",
PairMap: nil,
Jwt: jwt,
}
2024-08-28 00:02:39 +08:00
uploader, e := NewUploader()
if e != nil {
return e
}
_, e = uploader.UploadData(buf, uploadOption)
return e
}