2016-06-03 09:09:14 +08:00
|
|
|
package command
|
2012-08-07 16:29:22 +08:00
|
|
|
|
|
|
|
import (
|
2012-09-20 17:11:08 +08:00
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"os"
|
2013-07-14 05:14:16 +08:00
|
|
|
"path/filepath"
|
2014-10-27 02:34:55 +08:00
|
|
|
|
2016-06-03 09:09:14 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/security"
|
2012-08-07 16:29:22 +08:00
|
|
|
)
|
|
|
|
|
2013-07-14 05:14:16 +08:00
|
|
|
var (
|
2015-06-02 10:25:01 +08:00
|
|
|
upload UploadOptions
|
2013-07-14 05:14:16 +08:00
|
|
|
)
|
2012-09-20 17:11:08 +08:00
|
|
|
|
2015-06-02 10:25:01 +08:00
|
|
|
type UploadOptions struct {
|
2016-07-21 14:45:55 +08:00
|
|
|
master *string
|
2015-06-02 10:25:01 +08:00
|
|
|
dir *string
|
|
|
|
include *string
|
|
|
|
replication *string
|
|
|
|
collection *string
|
|
|
|
ttl *string
|
|
|
|
maxMB *int
|
|
|
|
secretKey *string
|
|
|
|
}
|
|
|
|
|
2012-08-07 16:29:22 +08:00
|
|
|
func init() {
|
2012-09-20 17:11:08 +08:00
|
|
|
cmdUpload.Run = runUpload // break init cycle
|
2013-01-20 11:49:57 +08:00
|
|
|
cmdUpload.IsDebug = cmdUpload.Flag.Bool("debug", false, "verbose debug information")
|
2016-07-21 14:45:55 +08:00
|
|
|
upload.master = cmdUpload.Flag.String("master", "localhost:9333", "SeaweedFS master location")
|
2015-06-02 10:25:01 +08:00
|
|
|
upload.dir = cmdUpload.Flag.String("dir", "", "Upload the whole folder recursively if specified.")
|
|
|
|
upload.include = cmdUpload.Flag.String("include", "", "pattens of files to upload, e.g., *.pdf, *.html, ab?d.txt, works together with -dir")
|
|
|
|
upload.replication = cmdUpload.Flag.String("replication", "", "replication type")
|
|
|
|
upload.collection = cmdUpload.Flag.String("collection", "", "optional collection name")
|
|
|
|
upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
|
|
|
|
upload.maxMB = cmdUpload.Flag.Int("maxMB", 0, "split files larger than the limit")
|
|
|
|
upload.secretKey = cmdUpload.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)")
|
2012-08-07 16:29:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
var cmdUpload = &Command{
|
2016-07-21 14:45:55 +08:00
|
|
|
UsageLine: "upload -master=localhost:9333 file1 [file2 file3]\n weed upload -server=localhost:9333 -dir=one_directory -include=*.pdf",
|
2012-09-20 17:11:08 +08:00
|
|
|
Short: "upload one or a list of files",
|
2013-07-14 05:14:16 +08:00
|
|
|
Long: `upload one or a list of files, or batch upload one whole folder recursively.
|
2015-02-08 07:35:28 +08:00
|
|
|
|
2013-11-19 15:04:33 +08:00
|
|
|
If uploading a list of files:
|
2012-09-20 17:11:08 +08:00
|
|
|
It uses consecutive file keys for the list of files.
|
2012-08-07 16:29:22 +08:00
|
|
|
e.g. If the file1 uses key k, file2 can be read via k_1
|
|
|
|
|
2013-07-21 06:14:12 +08:00
|
|
|
If uploading a whole folder recursively:
|
|
|
|
All files under the folder and subfolders will be uploaded, each with its own file key.
|
|
|
|
Optional parameter "-include" allows you to specify the file name patterns.
|
2015-02-08 07:35:28 +08:00
|
|
|
|
2013-07-21 06:14:12 +08:00
|
|
|
If any file has a ".gz" extension, the content are considered gzipped already, and will be stored as is.
|
|
|
|
This can save volume server's gzipped processing and allow customizable gzip compression level.
|
|
|
|
The file name will strip out ".gz" and stored. For example, "jquery.js.gz" will be stored as "jquery.js".
|
2015-02-08 07:35:28 +08:00
|
|
|
|
|
|
|
If "maxMB" is set to a positive number, files larger than it would be split into chunks and uploaded separatedly.
|
|
|
|
The list of file ids of those chunks would be stored in an additional chunk, and this additional chunk's file id would be returned.
|
2013-07-21 06:14:12 +08:00
|
|
|
|
2012-08-07 16:29:22 +08:00
|
|
|
`,
|
|
|
|
}
|
|
|
|
|
|
|
|
func runUpload(cmd *Command, args []string) bool {
|
2015-06-02 10:25:01 +08:00
|
|
|
secret := security.Secret(*upload.secretKey)
|
2017-01-04 11:23:40 +08:00
|
|
|
if len(args) == 0 {
|
2015-06-02 10:25:01 +08:00
|
|
|
if *upload.dir == "" {
|
2013-07-14 05:14:16 +08:00
|
|
|
return false
|
|
|
|
}
|
2015-06-02 10:25:01 +08:00
|
|
|
filepath.Walk(*upload.dir, func(path string, info os.FileInfo, err error) error {
|
2013-07-14 13:01:48 +08:00
|
|
|
if err == nil {
|
|
|
|
if !info.IsDir() {
|
2015-06-02 10:25:01 +08:00
|
|
|
if *upload.include != "" {
|
|
|
|
if ok, _ := filepath.Match(*upload.include, filepath.Base(path)); !ok {
|
2013-07-14 14:17:41 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2013-08-15 01:07:42 +08:00
|
|
|
parts, e := operation.NewFileParts([]string{path})
|
|
|
|
if e != nil {
|
|
|
|
return e
|
|
|
|
}
|
2016-07-21 14:45:55 +08:00
|
|
|
results, e := operation.SubmitFiles(*upload.master, parts,
|
2015-06-02 10:25:01 +08:00
|
|
|
*upload.replication, *upload.collection,
|
|
|
|
*upload.ttl, *upload.maxMB, secret)
|
2013-07-14 13:01:48 +08:00
|
|
|
bytes, _ := json.Marshal(results)
|
|
|
|
fmt.Println(string(bytes))
|
|
|
|
if e != nil {
|
|
|
|
return e
|
|
|
|
}
|
2013-07-14 05:14:16 +08:00
|
|
|
}
|
2013-07-14 13:01:48 +08:00
|
|
|
} else {
|
|
|
|
fmt.Println(err)
|
2013-07-14 05:14:16 +08:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
} else {
|
2013-08-15 01:07:42 +08:00
|
|
|
parts, e := operation.NewFileParts(args)
|
|
|
|
if e != nil {
|
|
|
|
fmt.Println(e.Error())
|
|
|
|
}
|
2016-07-21 14:45:55 +08:00
|
|
|
results, _ := operation.SubmitFiles(*upload.master, parts,
|
2015-06-02 10:25:01 +08:00
|
|
|
*upload.replication, *upload.collection,
|
|
|
|
*upload.ttl, *upload.maxMB, secret)
|
2013-07-14 05:35:29 +08:00
|
|
|
bytes, _ := json.Marshal(results)
|
|
|
|
fmt.Println(string(bytes))
|
2012-09-20 17:11:08 +08:00
|
|
|
}
|
|
|
|
return true
|
2012-08-07 16:29:22 +08:00
|
|
|
}
|