2016-07-21 14:45:55 +08:00
|
|
|
package command
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
|
|
|
"net/url"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"strings"
|
|
|
|
|
2018-07-22 08:39:10 +08:00
|
|
|
"context"
|
2016-07-21 14:45:55 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
2018-07-22 08:39:10 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
2016-07-21 14:45:55 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/security"
|
2018-07-22 08:39:10 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
|
|
|
"io"
|
2018-05-30 14:46:45 +08:00
|
|
|
"net/http"
|
2018-07-22 08:39:10 +08:00
|
|
|
"path"
|
2018-05-30 15:54:56 +08:00
|
|
|
"strconv"
|
|
|
|
"time"
|
2016-07-21 14:45:55 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
copy CopyOptions
|
|
|
|
)
|
|
|
|
|
|
|
|
type CopyOptions struct {
|
2018-06-06 14:37:41 +08:00
|
|
|
filerGrpcPort *int
|
|
|
|
master *string
|
|
|
|
include *string
|
|
|
|
replication *string
|
|
|
|
collection *string
|
|
|
|
ttl *string
|
|
|
|
maxMB *int
|
|
|
|
secretKey *string
|
2016-07-21 14:45:55 +08:00
|
|
|
|
|
|
|
secret security.Secret
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
cmdCopy.Run = runCopy // break init cycle
|
|
|
|
cmdCopy.IsDebug = cmdCopy.Flag.Bool("debug", false, "verbose debug information")
|
|
|
|
copy.master = cmdCopy.Flag.String("master", "localhost:9333", "SeaweedFS master location")
|
|
|
|
copy.include = cmdCopy.Flag.String("include", "", "pattens of files to copy, e.g., *.pdf, *.html, ab?d.txt, works together with -dir")
|
|
|
|
copy.replication = cmdCopy.Flag.String("replication", "", "replication type")
|
|
|
|
copy.collection = cmdCopy.Flag.String("collection", "", "optional collection name")
|
|
|
|
copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
|
|
|
|
copy.maxMB = cmdCopy.Flag.Int("maxMB", 0, "split files larger than the limit")
|
2018-06-06 14:37:41 +08:00
|
|
|
copy.filerGrpcPort = cmdCopy.Flag.Int("filer.port.grpc", 0, "filer grpc server listen port, default to filer port + 10000")
|
2016-07-21 14:45:55 +08:00
|
|
|
copy.secretKey = cmdCopy.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)")
|
|
|
|
}
|
|
|
|
|
|
|
|
var cmdCopy = &Command{
|
2016-07-22 06:00:07 +08:00
|
|
|
UsageLine: "filer.copy file_or_dir1 [file_or_dir2 file_or_dir3] http://localhost:8888/path/to/a/folder/",
|
2016-07-21 14:45:55 +08:00
|
|
|
Short: "copy one or a list of files to a filer folder",
|
|
|
|
Long: `copy one or a list of files, or batch copy one whole folder recursively, to a filer folder
|
|
|
|
|
|
|
|
It can copy one or a list of files or folders.
|
|
|
|
|
|
|
|
If copying a whole folder recursively:
|
|
|
|
All files under the folder and subfolders will be copyed.
|
|
|
|
Optional parameter "-include" allows you to specify the file name patterns.
|
|
|
|
|
|
|
|
If any file has a ".gz" extension, the content are considered gzipped already, and will be stored as is.
|
|
|
|
This can save volume server's gzipped processing and allow customizable gzip compression level.
|
|
|
|
The file name will strip out ".gz" and stored. For example, "jquery.js.gz" will be stored as "jquery.js".
|
|
|
|
|
|
|
|
If "maxMB" is set to a positive number, files larger than it would be split into chunks and copyed separatedly.
|
|
|
|
The list of file ids of those chunks would be stored in an additional chunk, and this additional chunk's file id would be returned.
|
|
|
|
|
|
|
|
`,
|
|
|
|
}
|
|
|
|
|
|
|
|
func runCopy(cmd *Command, args []string) bool {
|
|
|
|
copy.secret = security.Secret(*copy.secretKey)
|
|
|
|
if len(args) <= 1 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
filerDestination := args[len(args)-1]
|
2018-07-22 08:39:10 +08:00
|
|
|
fileOrDirs := args[0 : len(args)-1]
|
2016-07-21 14:45:55 +08:00
|
|
|
|
|
|
|
filerUrl, err := url.Parse(filerDestination)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("The last argument should be a URL on filer: %v\n", err)
|
|
|
|
return false
|
|
|
|
}
|
2018-05-30 14:46:45 +08:00
|
|
|
urlPath := filerUrl.Path
|
|
|
|
if !strings.HasSuffix(urlPath, "/") {
|
|
|
|
urlPath = urlPath + "/"
|
2016-07-21 14:45:55 +08:00
|
|
|
}
|
|
|
|
|
2018-06-06 14:37:41 +08:00
|
|
|
if filerUrl.Port() == "" {
|
|
|
|
fmt.Printf("The filer port should be specified.\n")
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
filerPort, parseErr := strconv.ParseUint(filerUrl.Port(), 10, 64)
|
|
|
|
if parseErr != nil {
|
|
|
|
fmt.Printf("The filer port parse error: %v\n", parseErr)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
filerGrpcPort := filerPort + 10000
|
|
|
|
if *copy.filerGrpcPort != 0 {
|
|
|
|
filerGrpcPort = uint64(*copy.filerGrpcPort)
|
|
|
|
}
|
|
|
|
|
|
|
|
filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort)
|
|
|
|
|
2016-07-21 14:45:55 +08:00
|
|
|
for _, fileOrDir := range fileOrDirs {
|
2018-06-06 14:37:41 +08:00
|
|
|
if !doEachCopy(fileOrDir, filerUrl.Host, filerGrpcAddress, urlPath) {
|
2016-07-21 14:45:55 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-06-06 14:37:41 +08:00
|
|
|
func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, path string) bool {
|
2016-07-21 14:45:55 +08:00
|
|
|
f, err := os.Open(fileOrDir)
|
|
|
|
if err != nil {
|
2018-05-14 14:56:16 +08:00
|
|
|
fmt.Printf("Failed to open file %s: %v\n", fileOrDir, err)
|
2016-07-21 14:45:55 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
fi, err := f.Stat()
|
|
|
|
if err != nil {
|
2018-05-14 14:56:16 +08:00
|
|
|
fmt.Printf("Failed to get stat for file %s: %v\n", fileOrDir, err)
|
2016-07-21 14:45:55 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
mode := fi.Mode()
|
|
|
|
if mode.IsDir() {
|
|
|
|
files, _ := ioutil.ReadDir(fileOrDir)
|
|
|
|
for _, subFileOrDir := range files {
|
2018-06-06 14:37:41 +08:00
|
|
|
if !doEachCopy(fileOrDir+"/"+subFileOrDir.Name(), filerAddress, filerGrpcAddress, path+fi.Name()+"/") {
|
2016-07-21 14:45:55 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// this is a regular file
|
|
|
|
if *copy.include != "" {
|
|
|
|
if ok, _ := filepath.Match(*copy.include, filepath.Base(fileOrDir)); !ok {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-30 14:46:45 +08:00
|
|
|
// find the chunk count
|
|
|
|
chunkSize := int64(*copy.maxMB * 1024 * 1024)
|
|
|
|
chunkCount := 1
|
|
|
|
if chunkSize > 0 && fi.Size() > chunkSize {
|
|
|
|
chunkCount = int(fi.Size()/chunkSize) + 1
|
|
|
|
}
|
|
|
|
|
2018-05-30 16:05:26 +08:00
|
|
|
if chunkCount == 1 {
|
2018-06-06 14:37:41 +08:00
|
|
|
return uploadFileAsOne(filerAddress, filerGrpcAddress, path, f, fi)
|
2018-05-30 16:05:26 +08:00
|
|
|
}
|
|
|
|
|
2018-06-06 14:37:41 +08:00
|
|
|
return uploadFileInChunks(filerAddress, filerGrpcAddress, path, f, fi, chunkCount, chunkSize)
|
2018-05-30 16:05:26 +08:00
|
|
|
}
|
|
|
|
|
2018-06-06 14:37:41 +08:00
|
|
|
func uploadFileAsOne(filerAddress, filerGrpcAddress string, urlFolder string, f *os.File, fi os.FileInfo) bool {
|
2018-05-30 16:05:26 +08:00
|
|
|
|
2018-05-30 14:46:45 +08:00
|
|
|
// upload the file content
|
2018-05-31 11:48:13 +08:00
|
|
|
fileName := filepath.Base(f.Name())
|
2018-05-30 14:52:27 +08:00
|
|
|
mimeType := detectMimeType(f)
|
2018-05-31 11:48:13 +08:00
|
|
|
isGzipped := isGzipped(fileName)
|
2018-05-30 14:46:45 +08:00
|
|
|
|
2018-05-31 13:28:14 +08:00
|
|
|
var chunks []*filer_pb.FileChunk
|
2016-07-21 14:45:55 +08:00
|
|
|
|
2018-05-31 13:28:14 +08:00
|
|
|
if fi.Size() > 0 {
|
|
|
|
|
|
|
|
// assign a volume
|
|
|
|
assignResult, err := operation.Assign(*copy.master, &operation.VolumeAssignRequest{
|
|
|
|
Count: 1,
|
|
|
|
Replication: *copy.replication,
|
|
|
|
Collection: *copy.collection,
|
|
|
|
Ttl: *copy.ttl,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Failed to assign from %s: %v\n", *copy.master, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
|
|
|
|
|
|
|
|
uploadResult, err := operation.Upload(targetUrl, fileName, f, isGzipped, mimeType, nil, "")
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("upload data %v to %s: %v\n", fileName, targetUrl, err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if uploadResult.Error != "" {
|
|
|
|
fmt.Printf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
fmt.Printf("uploaded %s to %s\n", fileName, targetUrl)
|
|
|
|
|
|
|
|
chunks = append(chunks, &filer_pb.FileChunk{
|
|
|
|
FileId: assignResult.Fid,
|
|
|
|
Offset: 0,
|
|
|
|
Size: uint64(uploadResult.Size),
|
|
|
|
Mtime: time.Now().UnixNano(),
|
|
|
|
})
|
|
|
|
|
2018-06-06 14:37:41 +08:00
|
|
|
fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerAddress, urlFolder, fileName)
|
2016-07-21 14:45:55 +08:00
|
|
|
}
|
|
|
|
|
2018-06-06 14:37:41 +08:00
|
|
|
if err := withFilerClient(filerGrpcAddress, func(client filer_pb.SeaweedFilerClient) error {
|
2018-05-31 13:28:14 +08:00
|
|
|
request := &filer_pb.CreateEntryRequest{
|
|
|
|
Directory: urlFolder,
|
|
|
|
Entry: &filer_pb.Entry{
|
|
|
|
Name: fileName,
|
|
|
|
Attributes: &filer_pb.FuseAttributes{
|
2018-06-11 07:57:32 +08:00
|
|
|
Crtime: time.Now().Unix(),
|
|
|
|
Mtime: time.Now().Unix(),
|
|
|
|
Gid: uint32(os.Getgid()),
|
|
|
|
Uid: uint32(os.Getuid()),
|
|
|
|
FileSize: uint64(fi.Size()),
|
|
|
|
FileMode: uint32(fi.Mode()),
|
|
|
|
Mime: mimeType,
|
|
|
|
Replication: *copy.replication,
|
|
|
|
Collection: *copy.collection,
|
|
|
|
TtlSec: int32(util.ParseInt(*copy.ttl, 0)),
|
2018-05-31 13:28:14 +08:00
|
|
|
},
|
|
|
|
Chunks: chunks,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := client.CreateEntry(context.Background(), request); err != nil {
|
|
|
|
return fmt.Errorf("update fh: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
2018-06-06 14:37:41 +08:00
|
|
|
fmt.Printf("upload data %v to http://%s%s%s: %v\n", fileName, filerAddress, urlFolder, fileName, err)
|
2016-07-21 14:45:55 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
2018-05-30 14:46:45 +08:00
|
|
|
|
2018-06-06 14:37:41 +08:00
|
|
|
func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string, f *os.File, fi os.FileInfo, chunkCount int, chunkSize int64) bool {
|
2018-05-30 15:54:56 +08:00
|
|
|
|
2018-05-31 11:48:13 +08:00
|
|
|
fileName := filepath.Base(f.Name())
|
2018-05-31 11:24:57 +08:00
|
|
|
mimeType := detectMimeType(f)
|
|
|
|
|
2018-05-30 15:54:56 +08:00
|
|
|
var chunks []*filer_pb.FileChunk
|
|
|
|
|
|
|
|
for i := int64(0); i < int64(chunkCount); i++ {
|
2018-05-30 16:05:26 +08:00
|
|
|
|
|
|
|
// assign a volume
|
|
|
|
assignResult, err := operation.Assign(*copy.master, &operation.VolumeAssignRequest{
|
|
|
|
Count: 1,
|
|
|
|
Replication: *copy.replication,
|
|
|
|
Collection: *copy.collection,
|
|
|
|
Ttl: *copy.ttl,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Failed to assign from %s: %v\n", *copy.master, err)
|
2018-05-30 15:54:56 +08:00
|
|
|
}
|
|
|
|
|
2018-05-30 16:05:26 +08:00
|
|
|
targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
|
2018-05-30 15:54:56 +08:00
|
|
|
|
|
|
|
uploadResult, err := operation.Upload(targetUrl,
|
2018-05-31 11:48:13 +08:00
|
|
|
fileName+"-"+strconv.FormatInt(i+1, 10),
|
2018-05-30 15:54:56 +08:00
|
|
|
io.LimitReader(f, chunkSize),
|
|
|
|
false, "application/octet-stream", nil, "")
|
|
|
|
if err != nil {
|
2018-05-31 11:48:13 +08:00
|
|
|
fmt.Printf("upload data %v to %s: %v\n", fileName, targetUrl, err)
|
2018-05-30 15:54:56 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
if uploadResult.Error != "" {
|
2018-05-31 11:48:13 +08:00
|
|
|
fmt.Printf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
|
2018-05-30 15:54:56 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
chunks = append(chunks, &filer_pb.FileChunk{
|
2018-05-30 16:05:26 +08:00
|
|
|
FileId: assignResult.Fid,
|
2018-05-30 15:54:56 +08:00
|
|
|
Offset: i * chunkSize,
|
|
|
|
Size: uint64(uploadResult.Size),
|
|
|
|
Mtime: time.Now().UnixNano(),
|
|
|
|
})
|
2018-05-31 11:48:13 +08:00
|
|
|
fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size))
|
2018-05-30 15:54:56 +08:00
|
|
|
}
|
|
|
|
|
2018-06-06 14:37:41 +08:00
|
|
|
if err := withFilerClient(filerGrpcAddress, func(client filer_pb.SeaweedFilerClient) error {
|
2018-05-30 15:54:56 +08:00
|
|
|
request := &filer_pb.CreateEntryRequest{
|
|
|
|
Directory: urlFolder,
|
|
|
|
Entry: &filer_pb.Entry{
|
2018-05-31 11:48:13 +08:00
|
|
|
Name: fileName,
|
2018-05-30 15:54:56 +08:00
|
|
|
Attributes: &filer_pb.FuseAttributes{
|
2018-06-11 07:57:32 +08:00
|
|
|
Crtime: time.Now().Unix(),
|
|
|
|
Mtime: time.Now().Unix(),
|
|
|
|
Gid: uint32(os.Getgid()),
|
|
|
|
Uid: uint32(os.Getuid()),
|
|
|
|
FileSize: uint64(fi.Size()),
|
|
|
|
FileMode: uint32(fi.Mode()),
|
|
|
|
Mime: mimeType,
|
|
|
|
Replication: *copy.replication,
|
|
|
|
Collection: *copy.collection,
|
|
|
|
TtlSec: int32(util.ParseInt(*copy.ttl, 0)),
|
2018-05-30 15:54:56 +08:00
|
|
|
},
|
|
|
|
Chunks: chunks,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := client.CreateEntry(context.Background(), request); err != nil {
|
|
|
|
return fmt.Errorf("update fh: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
2018-06-06 14:37:41 +08:00
|
|
|
fmt.Printf("upload data %v to http://%s%s%s: %v\n", fileName, filerAddress, urlFolder, fileName, err)
|
2018-05-30 15:54:56 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-06-06 14:37:41 +08:00
|
|
|
fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerAddress, urlFolder, fileName)
|
2018-05-30 16:05:26 +08:00
|
|
|
|
2018-05-30 15:54:56 +08:00
|
|
|
return true
|
2018-05-30 14:46:45 +08:00
|
|
|
}
|
2018-05-30 14:52:27 +08:00
|
|
|
|
|
|
|
func isGzipped(filename string) bool {
|
|
|
|
return strings.ToLower(path.Ext(filename)) == ".gz"
|
|
|
|
}
|
|
|
|
|
|
|
|
func detectMimeType(f *os.File) string {
|
|
|
|
head := make([]byte, 512)
|
|
|
|
f.Seek(0, 0)
|
|
|
|
n, err := f.Read(head)
|
2018-05-31 13:28:14 +08:00
|
|
|
if err == io.EOF {
|
|
|
|
return ""
|
|
|
|
}
|
2018-05-30 14:52:27 +08:00
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("read head of %v: %v\n", f.Name(), err)
|
|
|
|
return "application/octet-stream"
|
|
|
|
}
|
|
|
|
f.Seek(0, 0)
|
|
|
|
mimeType := http.DetectContentType(head[:n])
|
|
|
|
return mimeType
|
|
|
|
}
|
2018-05-30 15:54:56 +08:00
|
|
|
|
|
|
|
func withFilerClient(filerAddress string, fn func(filer_pb.SeaweedFilerClient) error) error {
|
|
|
|
|
2018-07-04 10:07:55 +08:00
|
|
|
grpcConnection, err := util.GrpcDial(filerAddress)
|
2018-05-30 15:54:56 +08:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("fail to dial %s: %v", filerAddress, err)
|
|
|
|
}
|
|
|
|
defer grpcConnection.Close()
|
|
|
|
|
|
|
|
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
|
|
|
|
|
|
|
|
return fn(client)
|
|
|
|
}
|