2013-11-19 15:41:00 +08:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"path"
|
|
|
|
"strings"
|
2014-10-27 02:34:55 +08:00
|
|
|
|
2015-04-17 03:18:06 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/go/operation"
|
|
|
|
"github.com/chrislusf/seaweedfs/go/util"
|
2013-11-19 15:41:00 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2015-06-02 10:25:01 +08:00
|
|
|
d DownloadOptions
|
2013-11-19 15:41:00 +08:00
|
|
|
)
|
|
|
|
|
2015-06-02 10:25:01 +08:00
|
|
|
type DownloadOptions struct {
|
|
|
|
server *string
|
|
|
|
dir *string
|
|
|
|
}
|
|
|
|
|
2013-11-19 15:41:00 +08:00
|
|
|
func init() {
|
|
|
|
cmdDownload.Run = runDownload // break init cycle
|
2015-06-02 10:25:01 +08:00
|
|
|
d.server = cmdDownload.Flag.String("server", "localhost:9333", "SeaweedFS master location")
|
|
|
|
d.dir = cmdDownload.Flag.String("dir", ".", "Download the whole folder recursively if specified.")
|
2013-11-19 15:41:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
var cmdDownload = &Command{
|
|
|
|
UsageLine: "download -server=localhost:9333 -dir=one_directory fid1 [fid2 fid3 ...]",
|
|
|
|
Short: "download files by file id",
|
|
|
|
Long: `download files by file id.
|
2015-04-17 05:45:07 +08:00
|
|
|
|
2013-11-19 15:41:00 +08:00
|
|
|
Usually you just need to use curl to lookup the file's volume server, and then download them directly.
|
|
|
|
This download tool combine the two steps into one.
|
|
|
|
|
|
|
|
What's more, if you use "weed upload -maxMB=..." option to upload a big file divided into chunks, you can
|
|
|
|
use this tool to download the chunks and merge them automatically.
|
|
|
|
|
|
|
|
`,
|
|
|
|
}
|
|
|
|
|
|
|
|
func runDownload(cmd *Command, args []string) bool {
|
|
|
|
for _, fid := range args {
|
2015-06-02 10:25:01 +08:00
|
|
|
filename, content, e := fetchFileId(*d.server, fid)
|
2013-11-19 15:41:00 +08:00
|
|
|
if e != nil {
|
|
|
|
fmt.Println("Fetch Error:", e)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if filename == "" {
|
|
|
|
filename = fid
|
|
|
|
}
|
|
|
|
if strings.HasSuffix(filename, "-list") {
|
2014-03-03 14:16:54 +08:00
|
|
|
filename = filename[0 : len(filename)-len("-list")]
|
2013-11-19 15:41:00 +08:00
|
|
|
fids := strings.Split(string(content), "\n")
|
2015-06-02 10:25:01 +08:00
|
|
|
f, err := os.OpenFile(path.Join(*d.dir, filename), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm)
|
2013-11-19 15:41:00 +08:00
|
|
|
if err != nil {
|
|
|
|
fmt.Println("File Creation Error:", e)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
for _, partId := range fids {
|
|
|
|
var n int
|
2015-06-02 10:25:01 +08:00
|
|
|
_, part, err := fetchFileId(*d.server, partId)
|
2013-11-19 15:41:00 +08:00
|
|
|
if err == nil {
|
|
|
|
n, err = f.Write(part)
|
|
|
|
}
|
|
|
|
if err == nil && n < len(part) {
|
|
|
|
err = io.ErrShortWrite
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
fmt.Println("File Write Error:", err)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2015-06-02 10:25:01 +08:00
|
|
|
ioutil.WriteFile(path.Join(*d.dir, filename), content, os.ModePerm)
|
2013-11-19 15:41:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2014-03-31 02:28:04 +08:00
|
|
|
func fetchFileId(server string, fileId string) (filename string, content []byte, e error) {
|
|
|
|
fileUrl, lookupError := operation.LookupFileId(server, fileId)
|
2013-11-19 15:41:00 +08:00
|
|
|
if lookupError != nil {
|
|
|
|
return "", nil, lookupError
|
|
|
|
}
|
2014-03-31 02:28:04 +08:00
|
|
|
filename, content, e = util.DownloadUrl(fileUrl)
|
2013-11-19 15:41:00 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func WriteFile(filename string, data []byte, perm os.FileMode) error {
|
|
|
|
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
n, err := f.Write(data)
|
|
|
|
f.Close()
|
|
|
|
if err == nil && n < len(data) {
|
|
|
|
err = io.ErrShortWrite
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|