2016-07-21 14:45:55 +08:00
|
|
|
package command
|
|
|
|
|
|
|
|
import (
|
2019-04-05 15:04:00 +08:00
|
|
|
"context"
|
2016-07-21 14:45:55 +08:00
|
|
|
"fmt"
|
2019-04-05 15:04:00 +08:00
|
|
|
"io"
|
2016-07-21 14:45:55 +08:00
|
|
|
"io/ioutil"
|
2019-04-05 15:04:00 +08:00
|
|
|
"net/http"
|
2016-07-21 14:45:55 +08:00
|
|
|
"net/url"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2018-05-30 15:54:56 +08:00
|
|
|
"strconv"
|
2019-04-05 15:04:00 +08:00
|
|
|
"strings"
|
2019-04-06 14:35:30 +08:00
|
|
|
"sync"
|
2018-05-30 15:54:56 +08:00
|
|
|
"time"
|
2019-06-05 16:30:24 +08:00
|
|
|
|
2020-01-30 01:09:55 +08:00
|
|
|
"google.golang.org/grpc"
|
|
|
|
|
2019-06-05 16:30:24 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/security"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
2016-07-21 14:45:55 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2019-04-06 14:35:30 +08:00
|
|
|
copy CopyOptions
|
|
|
|
waitGroup sync.WaitGroup
|
2016-07-21 14:45:55 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
type CopyOptions struct {
|
2019-10-24 22:26:23 +08:00
|
|
|
include *string
|
|
|
|
replication *string
|
|
|
|
collection *string
|
|
|
|
ttl *string
|
|
|
|
maxMB *int
|
|
|
|
masterClient *wdclient.MasterClient
|
|
|
|
concurrenctFiles *int
|
|
|
|
concurrenctChunks *int
|
|
|
|
compressionLevel *int
|
|
|
|
grpcDialOption grpc.DialOption
|
|
|
|
masters []string
|
2016-07-21 14:45:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
cmdCopy.Run = runCopy // break init cycle
|
|
|
|
cmdCopy.IsDebug = cmdCopy.Flag.Bool("debug", false, "verbose debug information")
|
|
|
|
copy.include = cmdCopy.Flag.String("include", "", "pattens of files to copy, e.g., *.pdf, *.html, ab?d.txt, works together with -dir")
|
|
|
|
copy.replication = cmdCopy.Flag.String("replication", "", "replication type")
|
|
|
|
copy.collection = cmdCopy.Flag.String("collection", "", "optional collection name")
|
|
|
|
copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
|
2019-07-11 12:48:27 +08:00
|
|
|
copy.maxMB = cmdCopy.Flag.Int("maxMB", 32, "split files larger than the limit")
|
2019-10-24 22:26:23 +08:00
|
|
|
copy.concurrenctFiles = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines")
|
|
|
|
copy.concurrenctChunks = cmdCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file")
|
2019-04-19 11:21:28 +08:00
|
|
|
copy.compressionLevel = cmdCopy.Flag.Int("compressionLevel", 9, "local file compression level 1 ~ 9")
|
2016-07-21 14:45:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
var cmdCopy = &Command{
|
2016-07-22 06:00:07 +08:00
|
|
|
UsageLine: "filer.copy file_or_dir1 [file_or_dir2 file_or_dir3] http://localhost:8888/path/to/a/folder/",
|
2016-07-21 14:45:55 +08:00
|
|
|
Short: "copy one or a list of files to a filer folder",
|
|
|
|
Long: `copy one or a list of files, or batch copy one whole folder recursively, to a filer folder
|
|
|
|
|
|
|
|
It can copy one or a list of files or folders.
|
|
|
|
|
|
|
|
If copying a whole folder recursively:
|
|
|
|
All files under the folder and subfolders will be copyed.
|
|
|
|
Optional parameter "-include" allows you to specify the file name patterns.
|
|
|
|
|
2018-09-28 16:58:34 +08:00
|
|
|
If "maxMB" is set to a positive number, files larger than it would be split into chunks.
|
2016-07-21 14:45:55 +08:00
|
|
|
|
|
|
|
`,
|
|
|
|
}
|
|
|
|
|
|
|
|
func runCopy(cmd *Command, args []string) bool {
|
2019-02-19 04:11:52 +08:00
|
|
|
|
2019-06-05 16:30:24 +08:00
|
|
|
util.LoadConfiguration("security", false)
|
2019-02-19 04:11:52 +08:00
|
|
|
|
2016-07-21 14:45:55 +08:00
|
|
|
if len(args) <= 1 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
filerDestination := args[len(args)-1]
|
2018-07-22 08:39:10 +08:00
|
|
|
fileOrDirs := args[0 : len(args)-1]
|
2016-07-21 14:45:55 +08:00
|
|
|
|
|
|
|
filerUrl, err := url.Parse(filerDestination)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("The last argument should be a URL on filer: %v\n", err)
|
|
|
|
return false
|
|
|
|
}
|
2018-05-30 14:46:45 +08:00
|
|
|
urlPath := filerUrl.Path
|
|
|
|
if !strings.HasSuffix(urlPath, "/") {
|
2018-12-24 10:20:11 +08:00
|
|
|
fmt.Printf("The last argument should be a folder and end with \"/\": %v\n", err)
|
|
|
|
return false
|
2016-07-21 14:45:55 +08:00
|
|
|
}
|
|
|
|
|
2018-06-06 14:37:41 +08:00
|
|
|
if filerUrl.Port() == "" {
|
|
|
|
fmt.Printf("The filer port should be specified.\n")
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
filerPort, parseErr := strconv.ParseUint(filerUrl.Port(), 10, 64)
|
|
|
|
if parseErr != nil {
|
|
|
|
fmt.Printf("The filer port parse error: %v\n", parseErr)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
filerGrpcPort := filerPort + 10000
|
|
|
|
filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort)
|
2020-01-30 01:09:55 +08:00
|
|
|
copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
2018-06-06 14:37:41 +08:00
|
|
|
|
2020-02-26 14:23:59 +08:00
|
|
|
masters, collection, replication, maxMB, err := readFilerConfiguration(copy.grpcDialOption, filerGrpcAddress)
|
2019-06-23 16:57:35 +08:00
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("read from filer %s: %v\n", filerGrpcAddress, err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if *copy.collection == "" {
|
|
|
|
*copy.collection = collection
|
|
|
|
}
|
|
|
|
if *copy.replication == "" {
|
|
|
|
*copy.replication = replication
|
|
|
|
}
|
|
|
|
if *copy.maxMB == 0 {
|
|
|
|
*copy.maxMB = int(maxMB)
|
|
|
|
}
|
|
|
|
copy.masters = masters
|
|
|
|
|
2019-04-08 00:13:24 +08:00
|
|
|
if *cmdCopy.IsDebug {
|
|
|
|
util.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof")
|
|
|
|
}
|
|
|
|
|
2019-10-24 22:26:23 +08:00
|
|
|
fileCopyTaskChan := make(chan FileCopyTask, *copy.concurrenctFiles)
|
2019-04-06 14:35:30 +08:00
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer close(fileCopyTaskChan)
|
|
|
|
for _, fileOrDir := range fileOrDirs {
|
|
|
|
if err := genFileCopyTask(fileOrDir, urlPath, fileCopyTaskChan); err != nil {
|
|
|
|
fmt.Fprintf(os.Stderr, "gen file list error: %v\n", err)
|
|
|
|
break
|
|
|
|
}
|
2016-07-21 14:45:55 +08:00
|
|
|
}
|
2019-04-06 14:35:30 +08:00
|
|
|
}()
|
2019-10-24 22:26:23 +08:00
|
|
|
for i := 0; i < *copy.concurrenctFiles; i++ {
|
2019-04-06 14:35:30 +08:00
|
|
|
waitGroup.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer waitGroup.Done()
|
|
|
|
worker := FileCopyWorker{
|
|
|
|
options: ©,
|
|
|
|
filerHost: filerUrl.Host,
|
|
|
|
filerGrpcAddress: filerGrpcAddress,
|
|
|
|
}
|
2020-02-26 14:23:59 +08:00
|
|
|
if err := worker.copyFiles(fileCopyTaskChan); err != nil {
|
2019-04-06 14:35:30 +08:00
|
|
|
fmt.Fprintf(os.Stderr, "copy file error: %v\n", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
2016-07-21 14:45:55 +08:00
|
|
|
}
|
2019-04-06 14:35:30 +08:00
|
|
|
waitGroup.Wait()
|
|
|
|
|
2016-07-21 14:45:55 +08:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2020-02-26 14:23:59 +08:00
|
|
|
func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, err error) {
|
2020-02-26 13:50:12 +08:00
|
|
|
err = withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
2020-02-26 14:23:59 +08:00
|
|
|
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
2019-06-23 16:57:35 +08:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
|
|
|
|
}
|
|
|
|
masters, collection, replication, maxMB = resp.Masters, resp.Collection, resp.Replication, resp.MaxMb
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-04-06 14:35:30 +08:00
|
|
|
func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan FileCopyTask) error {
|
2016-07-21 14:45:55 +08:00
|
|
|
|
2019-04-06 14:35:30 +08:00
|
|
|
fi, err := os.Stat(fileOrDir)
|
2016-07-21 14:45:55 +08:00
|
|
|
if err != nil {
|
2019-04-06 14:35:30 +08:00
|
|
|
fmt.Fprintf(os.Stderr, "Failed to get stat for file %s: %v\n", fileOrDir, err)
|
|
|
|
return nil
|
2016-07-21 14:45:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
mode := fi.Mode()
|
|
|
|
if mode.IsDir() {
|
|
|
|
files, _ := ioutil.ReadDir(fileOrDir)
|
|
|
|
for _, subFileOrDir := range files {
|
2019-04-06 14:35:30 +08:00
|
|
|
if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), destPath+fi.Name()+"/", fileCopyTaskChan); err != nil {
|
|
|
|
return err
|
2016-07-21 14:45:55 +08:00
|
|
|
}
|
|
|
|
}
|
2019-04-06 14:35:30 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-04-08 02:31:50 +08:00
|
|
|
uid, gid := util.GetFileUidGid(fi)
|
|
|
|
|
2019-04-06 14:35:30 +08:00
|
|
|
fileCopyTaskChan <- FileCopyTask{
|
|
|
|
sourceLocation: fileOrDir,
|
|
|
|
destinationUrlPath: destPath,
|
|
|
|
fileSize: fi.Size(),
|
|
|
|
fileMode: fi.Mode(),
|
2019-04-08 02:31:50 +08:00
|
|
|
uid: uid,
|
|
|
|
gid: gid,
|
2019-04-06 14:35:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type FileCopyWorker struct {
|
|
|
|
options *CopyOptions
|
|
|
|
filerHost string
|
|
|
|
filerGrpcAddress string
|
|
|
|
}
|
|
|
|
|
2020-02-26 14:23:59 +08:00
|
|
|
func (worker *FileCopyWorker) copyFiles(fileCopyTaskChan chan FileCopyTask) error {
|
2019-04-06 14:35:30 +08:00
|
|
|
for task := range fileCopyTaskChan {
|
2020-02-26 14:23:59 +08:00
|
|
|
if err := worker.doEachCopy(task); err != nil {
|
2019-04-06 14:35:30 +08:00
|
|
|
return err
|
|
|
|
}
|
2016-07-21 14:45:55 +08:00
|
|
|
}
|
2019-04-06 14:35:30 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type FileCopyTask struct {
|
|
|
|
sourceLocation string
|
|
|
|
destinationUrlPath string
|
|
|
|
fileSize int64
|
|
|
|
fileMode os.FileMode
|
2019-04-08 02:31:50 +08:00
|
|
|
uid uint32
|
|
|
|
gid uint32
|
2019-04-06 14:35:30 +08:00
|
|
|
}
|
|
|
|
|
2020-02-26 14:23:59 +08:00
|
|
|
func (worker *FileCopyWorker) doEachCopy(task FileCopyTask) error {
|
2019-04-06 14:35:30 +08:00
|
|
|
|
|
|
|
f, err := os.Open(task.sourceLocation)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Failed to open file %s: %v\n", task.sourceLocation, err)
|
|
|
|
if _, ok := err.(*os.PathError); ok {
|
|
|
|
fmt.Printf("skipping %s\n", task.sourceLocation)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer f.Close()
|
2016-07-21 14:45:55 +08:00
|
|
|
|
|
|
|
// this is a regular file
|
2019-04-06 14:35:30 +08:00
|
|
|
if *worker.options.include != "" {
|
|
|
|
if ok, _ := filepath.Match(*worker.options.include, filepath.Base(task.sourceLocation)); !ok {
|
|
|
|
return nil
|
2016-07-21 14:45:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-30 14:46:45 +08:00
|
|
|
// find the chunk count
|
2019-04-06 14:35:30 +08:00
|
|
|
chunkSize := int64(*worker.options.maxMB * 1024 * 1024)
|
2018-05-30 14:46:45 +08:00
|
|
|
chunkCount := 1
|
2019-04-06 14:35:30 +08:00
|
|
|
if chunkSize > 0 && task.fileSize > chunkSize {
|
|
|
|
chunkCount = int(task.fileSize/chunkSize) + 1
|
2018-05-30 14:46:45 +08:00
|
|
|
}
|
|
|
|
|
2018-05-30 16:05:26 +08:00
|
|
|
if chunkCount == 1 {
|
2020-02-26 13:50:12 +08:00
|
|
|
return worker.uploadFileAsOne(task, f)
|
2018-05-30 16:05:26 +08:00
|
|
|
}
|
|
|
|
|
2020-02-26 13:50:12 +08:00
|
|
|
return worker.uploadFileInChunks(task, f, chunkCount, chunkSize)
|
2018-05-30 16:05:26 +08:00
|
|
|
}
|
|
|
|
|
2020-02-26 13:50:12 +08:00
|
|
|
func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) error {
|
2018-05-30 16:05:26 +08:00
|
|
|
|
2018-05-30 14:46:45 +08:00
|
|
|
// upload the file content
|
2018-05-31 11:48:13 +08:00
|
|
|
fileName := filepath.Base(f.Name())
|
2018-05-30 14:52:27 +08:00
|
|
|
mimeType := detectMimeType(f)
|
2018-05-30 14:46:45 +08:00
|
|
|
|
2018-05-31 13:28:14 +08:00
|
|
|
var chunks []*filer_pb.FileChunk
|
2020-02-25 14:28:45 +08:00
|
|
|
var assignResult *filer_pb.AssignVolumeResponse
|
|
|
|
var assignError error
|
2016-07-21 14:45:55 +08:00
|
|
|
|
2019-04-06 14:35:30 +08:00
|
|
|
if task.fileSize > 0 {
|
2018-05-31 13:28:14 +08:00
|
|
|
|
|
|
|
// assign a volume
|
2020-02-26 13:50:12 +08:00
|
|
|
err := withFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
2020-02-25 14:28:45 +08:00
|
|
|
|
|
|
|
request := &filer_pb.AssignVolumeRequest{
|
|
|
|
Count: 1,
|
|
|
|
Replication: *worker.options.replication,
|
|
|
|
Collection: *worker.options.collection,
|
|
|
|
TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)),
|
|
|
|
ParentPath: task.destinationUrlPath,
|
|
|
|
}
|
|
|
|
|
2020-02-26 13:50:12 +08:00
|
|
|
assignResult, assignError = client.AssignVolume(context.Background(), request)
|
2020-02-25 14:28:45 +08:00
|
|
|
if assignError != nil {
|
|
|
|
return fmt.Errorf("assign volume failure %v: %v", request, assignError)
|
|
|
|
}
|
2020-02-26 09:15:09 +08:00
|
|
|
if assignResult.Error != "" {
|
|
|
|
return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
|
|
|
|
}
|
2020-02-25 14:28:45 +08:00
|
|
|
return nil
|
2018-05-31 13:28:14 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
2019-06-23 16:57:35 +08:00
|
|
|
fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
|
2018-05-31 13:28:14 +08:00
|
|
|
}
|
|
|
|
|
2020-02-25 14:28:45 +08:00
|
|
|
targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId
|
2018-05-31 13:28:14 +08:00
|
|
|
|
2020-02-25 14:28:45 +08:00
|
|
|
uploadResult, err := operation.UploadWithLocalCompressionLevel(targetUrl, fileName, f, false, mimeType, nil, security.EncodedJwt(assignResult.Auth), *worker.options.compressionLevel)
|
2018-05-31 13:28:14 +08:00
|
|
|
if err != nil {
|
2019-04-06 14:35:30 +08:00
|
|
|
return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
|
2018-05-31 13:28:14 +08:00
|
|
|
}
|
|
|
|
if uploadResult.Error != "" {
|
2019-04-06 14:35:30 +08:00
|
|
|
return fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
|
2018-05-31 13:28:14 +08:00
|
|
|
}
|
|
|
|
fmt.Printf("uploaded %s to %s\n", fileName, targetUrl)
|
|
|
|
|
|
|
|
chunks = append(chunks, &filer_pb.FileChunk{
|
2020-02-25 14:28:45 +08:00
|
|
|
FileId: assignResult.FileId,
|
2018-05-31 13:28:14 +08:00
|
|
|
Offset: 0,
|
|
|
|
Size: uint64(uploadResult.Size),
|
|
|
|
Mtime: time.Now().UnixNano(),
|
2018-09-23 13:12:21 +08:00
|
|
|
ETag: uploadResult.ETag,
|
2018-05-31 13:28:14 +08:00
|
|
|
})
|
|
|
|
|
2019-04-06 14:35:30 +08:00
|
|
|
fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName)
|
2016-07-21 14:45:55 +08:00
|
|
|
}
|
|
|
|
|
2020-02-26 13:50:12 +08:00
|
|
|
if err := withFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
2018-05-31 13:28:14 +08:00
|
|
|
request := &filer_pb.CreateEntryRequest{
|
2019-04-06 14:35:30 +08:00
|
|
|
Directory: task.destinationUrlPath,
|
2018-05-31 13:28:14 +08:00
|
|
|
Entry: &filer_pb.Entry{
|
|
|
|
Name: fileName,
|
|
|
|
Attributes: &filer_pb.FuseAttributes{
|
2018-06-11 07:57:32 +08:00
|
|
|
Crtime: time.Now().Unix(),
|
|
|
|
Mtime: time.Now().Unix(),
|
2019-04-08 02:31:50 +08:00
|
|
|
Gid: task.gid,
|
|
|
|
Uid: task.uid,
|
2019-04-06 14:35:30 +08:00
|
|
|
FileSize: uint64(task.fileSize),
|
|
|
|
FileMode: uint32(task.fileMode),
|
2018-06-11 07:57:32 +08:00
|
|
|
Mime: mimeType,
|
2019-04-06 14:35:30 +08:00
|
|
|
Replication: *worker.options.replication,
|
|
|
|
Collection: *worker.options.collection,
|
|
|
|
TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)),
|
2018-05-31 13:28:14 +08:00
|
|
|
},
|
|
|
|
Chunks: chunks,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-02-26 13:50:12 +08:00
|
|
|
if err := filer_pb.CreateEntry(client, request); err != nil {
|
2018-05-31 13:28:14 +08:00
|
|
|
return fmt.Errorf("update fh: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
2019-04-06 14:35:30 +08:00
|
|
|
return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerHost, task.destinationUrlPath, fileName, err)
|
2016-07-21 14:45:55 +08:00
|
|
|
}
|
|
|
|
|
2019-04-06 14:35:30 +08:00
|
|
|
return nil
|
2016-07-21 14:45:55 +08:00
|
|
|
}
|
2018-05-30 14:46:45 +08:00
|
|
|
|
2020-02-26 13:50:12 +08:00
|
|
|
func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error {
|
2018-05-30 15:54:56 +08:00
|
|
|
|
2018-05-31 11:48:13 +08:00
|
|
|
fileName := filepath.Base(f.Name())
|
2018-05-31 11:24:57 +08:00
|
|
|
mimeType := detectMimeType(f)
|
|
|
|
|
2019-10-24 22:26:23 +08:00
|
|
|
chunksChan := make(chan *filer_pb.FileChunk, chunkCount)
|
|
|
|
|
|
|
|
concurrentChunks := make(chan struct{}, *worker.options.concurrenctChunks)
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
var uploadError error
|
2020-02-25 14:28:45 +08:00
|
|
|
var collection, replication string
|
2019-10-24 22:26:23 +08:00
|
|
|
|
|
|
|
fmt.Printf("uploading %s in %d chunks ...\n", fileName, chunkCount)
|
|
|
|
for i := int64(0); i < int64(chunkCount) && uploadError == nil; i++ {
|
|
|
|
wg.Add(1)
|
|
|
|
concurrentChunks <- struct{}{}
|
|
|
|
go func(i int64) {
|
|
|
|
defer func() {
|
|
|
|
wg.Done()
|
|
|
|
<-concurrentChunks
|
|
|
|
}()
|
|
|
|
// assign a volume
|
2020-02-25 14:28:45 +08:00
|
|
|
var assignResult *filer_pb.AssignVolumeResponse
|
|
|
|
var assignError error
|
2020-02-26 13:50:12 +08:00
|
|
|
err := withFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
2020-02-25 14:28:45 +08:00
|
|
|
request := &filer_pb.AssignVolumeRequest{
|
|
|
|
Count: 1,
|
|
|
|
Replication: *worker.options.replication,
|
|
|
|
Collection: *worker.options.collection,
|
|
|
|
TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)),
|
|
|
|
ParentPath: task.destinationUrlPath,
|
|
|
|
}
|
|
|
|
|
2020-02-26 13:50:12 +08:00
|
|
|
assignResult, assignError = client.AssignVolume(context.Background(), request)
|
2020-02-25 14:28:45 +08:00
|
|
|
if assignError != nil {
|
|
|
|
return fmt.Errorf("assign volume failure %v: %v", request, assignError)
|
|
|
|
}
|
2020-02-26 09:15:09 +08:00
|
|
|
if assignResult.Error != "" {
|
|
|
|
return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
|
|
|
|
}
|
2020-02-25 14:28:45 +08:00
|
|
|
return nil
|
2019-10-24 22:26:23 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
|
|
|
|
}
|
2020-02-25 14:28:45 +08:00
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
|
|
|
|
}
|
2018-05-30 15:54:56 +08:00
|
|
|
|
2020-02-25 14:28:45 +08:00
|
|
|
targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId
|
|
|
|
if collection == "" {
|
|
|
|
collection = assignResult.Collection
|
|
|
|
}
|
|
|
|
if replication == "" {
|
|
|
|
replication = assignResult.Replication
|
|
|
|
}
|
2018-05-30 16:05:26 +08:00
|
|
|
|
2019-10-24 22:26:23 +08:00
|
|
|
uploadResult, err := operation.Upload(targetUrl,
|
|
|
|
fileName+"-"+strconv.FormatInt(i+1, 10),
|
|
|
|
io.NewSectionReader(f, i*chunkSize, chunkSize),
|
2020-02-25 14:28:45 +08:00
|
|
|
false, "", nil, security.EncodedJwt(assignResult.Auth))
|
2019-10-24 22:26:23 +08:00
|
|
|
if err != nil {
|
|
|
|
uploadError = fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if uploadResult.Error != "" {
|
|
|
|
uploadError = fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
chunksChan <- &filer_pb.FileChunk{
|
2020-02-25 14:28:45 +08:00
|
|
|
FileId: assignResult.FileId,
|
2019-10-24 22:26:23 +08:00
|
|
|
Offset: i * chunkSize,
|
|
|
|
Size: uint64(uploadResult.Size),
|
|
|
|
Mtime: time.Now().UnixNano(),
|
|
|
|
ETag: uploadResult.ETag,
|
|
|
|
}
|
|
|
|
fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size))
|
|
|
|
}(i)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
close(chunksChan)
|
2018-05-30 15:54:56 +08:00
|
|
|
|
2019-10-24 22:26:23 +08:00
|
|
|
var chunks []*filer_pb.FileChunk
|
|
|
|
for chunk := range chunksChan {
|
|
|
|
chunks = append(chunks, chunk)
|
2018-05-30 15:54:56 +08:00
|
|
|
}
|
|
|
|
|
2019-10-25 01:16:01 +08:00
|
|
|
if uploadError != nil {
|
|
|
|
var fileIds []string
|
|
|
|
for _, chunk := range chunks {
|
|
|
|
fileIds = append(fileIds, chunk.FileId)
|
|
|
|
}
|
2020-02-25 14:28:45 +08:00
|
|
|
operation.DeleteFiles(copy.masters[0], worker.options.grpcDialOption, fileIds)
|
2019-10-25 01:16:01 +08:00
|
|
|
return uploadError
|
|
|
|
}
|
|
|
|
|
2020-02-26 13:50:12 +08:00
|
|
|
if err := withFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
2018-05-30 15:54:56 +08:00
|
|
|
request := &filer_pb.CreateEntryRequest{
|
2019-04-06 14:35:30 +08:00
|
|
|
Directory: task.destinationUrlPath,
|
2018-05-30 15:54:56 +08:00
|
|
|
Entry: &filer_pb.Entry{
|
2018-05-31 11:48:13 +08:00
|
|
|
Name: fileName,
|
2018-05-30 15:54:56 +08:00
|
|
|
Attributes: &filer_pb.FuseAttributes{
|
2018-06-11 07:57:32 +08:00
|
|
|
Crtime: time.Now().Unix(),
|
|
|
|
Mtime: time.Now().Unix(),
|
2019-04-08 02:31:50 +08:00
|
|
|
Gid: task.gid,
|
|
|
|
Uid: task.uid,
|
2019-04-06 14:35:30 +08:00
|
|
|
FileSize: uint64(task.fileSize),
|
|
|
|
FileMode: uint32(task.fileMode),
|
2018-06-11 07:57:32 +08:00
|
|
|
Mime: mimeType,
|
2020-02-25 14:28:45 +08:00
|
|
|
Replication: replication,
|
|
|
|
Collection: collection,
|
2019-04-06 14:35:30 +08:00
|
|
|
TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)),
|
2018-05-30 15:54:56 +08:00
|
|
|
},
|
|
|
|
Chunks: chunks,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-02-26 13:50:12 +08:00
|
|
|
if err := filer_pb.CreateEntry(client, request); err != nil {
|
2018-05-30 15:54:56 +08:00
|
|
|
return fmt.Errorf("update fh: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
2019-04-06 14:35:30 +08:00
|
|
|
return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerHost, task.destinationUrlPath, fileName, err)
|
2018-05-30 15:54:56 +08:00
|
|
|
}
|
|
|
|
|
2019-04-06 14:35:30 +08:00
|
|
|
fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName)
|
2018-05-30 16:05:26 +08:00
|
|
|
|
2019-04-06 14:35:30 +08:00
|
|
|
return nil
|
2018-05-30 14:46:45 +08:00
|
|
|
}
|
2018-05-30 14:52:27 +08:00
|
|
|
|
|
|
|
func detectMimeType(f *os.File) string {
|
|
|
|
head := make([]byte, 512)
|
2019-01-17 09:17:19 +08:00
|
|
|
f.Seek(0, io.SeekStart)
|
2018-05-30 14:52:27 +08:00
|
|
|
n, err := f.Read(head)
|
2018-05-31 13:28:14 +08:00
|
|
|
if err == io.EOF {
|
|
|
|
return ""
|
|
|
|
}
|
2018-05-30 14:52:27 +08:00
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("read head of %v: %v\n", f.Name(), err)
|
|
|
|
return "application/octet-stream"
|
|
|
|
}
|
2019-01-17 09:17:19 +08:00
|
|
|
f.Seek(0, io.SeekStart)
|
2018-05-30 14:52:27 +08:00
|
|
|
mimeType := http.DetectContentType(head[:n])
|
|
|
|
return mimeType
|
|
|
|
}
|
2018-05-30 15:54:56 +08:00
|
|
|
|
2020-02-26 13:50:12 +08:00
|
|
|
func withFilerClient(filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error {
|
2018-05-30 15:54:56 +08:00
|
|
|
|
2020-02-26 13:50:12 +08:00
|
|
|
return util.WithCachedGrpcClient(func(clientConn *grpc.ClientConn) error {
|
2019-04-05 16:09:06 +08:00
|
|
|
client := filer_pb.NewSeaweedFilerClient(clientConn)
|
|
|
|
return fn(client)
|
|
|
|
}, filerAddress, grpcDialOption)
|
2018-05-30 15:54:56 +08:00
|
|
|
|
|
|
|
}
|