2019-04-16 16:06:32 +08:00
|
|
|
package shell
|
|
|
|
|
|
|
|
import (
|
2019-07-27 00:35:22 +08:00
|
|
|
"flag"
|
2019-04-16 16:06:32 +08:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"os"
|
2019-12-07 14:20:59 +08:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2019-04-17 00:40:27 +08:00
|
|
|
"time"
|
2019-04-16 16:06:32 +08:00
|
|
|
|
2019-12-07 14:20:59 +08:00
|
|
|
"github.com/golang/protobuf/proto"
|
|
|
|
|
2019-04-16 16:06:32 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
2019-06-05 16:30:24 +08:00
|
|
|
Commands = append(Commands, &commandFsMetaSave{})
|
2019-04-16 16:06:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
type commandFsMetaSave struct {
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandFsMetaSave) Name() string {
|
|
|
|
return "fs.meta.save"
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandFsMetaSave) Help() string {
|
2019-04-17 00:40:27 +08:00
|
|
|
return `save all directory and file meta data to a local file for metadata backup.
|
2019-04-16 16:06:32 +08:00
|
|
|
|
2019-07-27 00:35:22 +08:00
|
|
|
fs.meta.save / # save from the root
|
|
|
|
fs.meta.save -v -o t.meta / # save from the root, output to t.meta file.
|
|
|
|
fs.meta.save /path/to/save # save from the directory /path/to/save
|
|
|
|
fs.meta.save . # save from current directory
|
|
|
|
fs.meta.save # save from current directory
|
2019-04-16 16:06:32 +08:00
|
|
|
|
2019-04-17 00:40:27 +08:00
|
|
|
The meta data will be saved into a local <filer_host>-<port>-<time>.meta file.
|
|
|
|
These meta data can be later loaded by fs.meta.load command,
|
|
|
|
|
2019-04-16 16:06:32 +08:00
|
|
|
`
|
|
|
|
}
|
|
|
|
|
2019-06-05 16:30:24 +08:00
|
|
|
func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
2019-04-16 16:06:32 +08:00
|
|
|
|
2019-07-27 00:35:22 +08:00
|
|
|
fsMetaSaveCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
|
|
|
verbose := fsMetaSaveCommand.Bool("v", false, "print out each processed files")
|
|
|
|
outputFileName := fsMetaSaveCommand.String("o", "", "output the meta data to this file")
|
2020-03-24 15:08:02 +08:00
|
|
|
// chunksFileName := fsMetaSaveCommand.String("chunks", "", "output all the chunks to this file")
|
2019-07-27 00:35:22 +08:00
|
|
|
if err = fsMetaSaveCommand.Parse(args); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-24 12:26:15 +08:00
|
|
|
path, parseErr := commandEnv.parseUrl(findInputDirectory(fsMetaSaveCommand.Args()))
|
2019-12-13 16:22:37 +08:00
|
|
|
if parseErr != nil {
|
|
|
|
return parseErr
|
2019-04-16 16:06:32 +08:00
|
|
|
}
|
|
|
|
|
2020-03-24 15:09:12 +08:00
|
|
|
fileName := *outputFileName
|
|
|
|
if fileName == "" {
|
|
|
|
t := time.Now()
|
|
|
|
fileName = fmt.Sprintf("%s-%d-%4d%02d%02d-%02d%02d%02d.meta",
|
|
|
|
commandEnv.option.FilerHost, commandEnv.option.FilerPort, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second())
|
|
|
|
}
|
2020-03-24 15:08:02 +08:00
|
|
|
|
2020-03-24 15:09:12 +08:00
|
|
|
dst, openErr := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
|
|
|
if openErr != nil {
|
|
|
|
return fmt.Errorf("failed to create file %s: %v", fileName, openErr)
|
2020-03-24 13:54:46 +08:00
|
|
|
}
|
2020-03-24 15:09:12 +08:00
|
|
|
defer dst.Close()
|
|
|
|
|
2020-03-24 15:16:12 +08:00
|
|
|
return doTraverseBfsAndSaving(commandEnv, writer, path, *verbose, func(outputChan chan []byte) {
|
2020-03-24 15:09:12 +08:00
|
|
|
sizeBuf := make([]byte, 4)
|
|
|
|
for b := range outputChan {
|
|
|
|
util.Uint32toBytes(sizeBuf, uint32(len(b)))
|
|
|
|
dst.Write(sizeBuf)
|
|
|
|
dst.Write(b)
|
|
|
|
}
|
|
|
|
}, func(entry *filer_pb.FullEntry, outputChan chan []byte) (err error) {
|
|
|
|
bytes, err := proto.Marshal(entry)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Fprintf(writer, "marshall error: %v\n", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
outputChan <- bytes
|
|
|
|
return nil
|
|
|
|
})
|
2020-03-24 13:54:46 +08:00
|
|
|
|
2020-03-24 15:08:02 +08:00
|
|
|
var chunksFileName = ""
|
|
|
|
if chunksFileName != "" {
|
|
|
|
|
|
|
|
dst, openErr := os.OpenFile(chunksFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
|
|
|
if openErr != nil {
|
|
|
|
return fmt.Errorf("failed to create file %s: %v", chunksFileName, openErr)
|
|
|
|
}
|
|
|
|
defer dst.Close()
|
|
|
|
|
2020-03-24 15:16:12 +08:00
|
|
|
return doTraverseBfsAndSaving(commandEnv, writer, path, *verbose, func(outputChan chan []byte) {
|
2020-03-24 13:54:46 +08:00
|
|
|
for b := range outputChan {
|
|
|
|
dst.Write(b)
|
|
|
|
}
|
|
|
|
}, func(entry *filer_pb.FullEntry, outputChan chan []byte) (err error) {
|
|
|
|
for _, chunk := range entry.Entry.Chunks {
|
|
|
|
dir := entry.Dir
|
|
|
|
if dir == "/" {
|
|
|
|
dir = ""
|
|
|
|
}
|
|
|
|
outputLine := fmt.Sprintf("%d\t%s\t%s/%s\n", chunk.Fid.FileKey, chunk.FileId, dir, entry.Entry.Name)
|
|
|
|
outputChan <- []byte(outputLine)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
2019-12-13 16:22:37 +08:00
|
|
|
}
|
2019-04-16 16:06:32 +08:00
|
|
|
|
2020-03-24 13:54:46 +08:00
|
|
|
return err
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-03-24 15:16:12 +08:00
|
|
|
func doTraverseBfsAndSaving(commandEnv *CommandEnv, writer io.Writer, path string, verbose bool, saveFn func(outputChan chan []byte), genFn func(entry *filer_pb.FullEntry, outputChan chan []byte) error) error {
|
2019-04-16 16:06:32 +08:00
|
|
|
|
2019-12-30 12:19:51 +08:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
outputChan := make(chan []byte, 1024)
|
|
|
|
go func() {
|
2020-03-24 15:16:12 +08:00
|
|
|
saveFn(outputChan)
|
2019-12-30 12:19:51 +08:00
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
|
2019-12-13 16:22:37 +08:00
|
|
|
var dirCount, fileCount uint64
|
2019-04-16 16:06:32 +08:00
|
|
|
|
2020-03-24 13:54:46 +08:00
|
|
|
err := doTraverseBfs(writer, commandEnv, util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) {
|
2019-04-16 16:06:32 +08:00
|
|
|
|
2019-12-13 16:22:37 +08:00
|
|
|
protoMessage := &filer_pb.FullEntry{
|
|
|
|
Dir: string(parentPath),
|
|
|
|
Entry: entry,
|
|
|
|
}
|
2019-04-16 16:06:32 +08:00
|
|
|
|
2020-03-24 13:54:46 +08:00
|
|
|
if err := genFn(protoMessage, outputChan); err != nil {
|
2019-12-13 16:22:37 +08:00
|
|
|
fmt.Fprintf(writer, "marshall error: %v\n", err)
|
|
|
|
return
|
|
|
|
}
|
2019-04-16 16:06:32 +08:00
|
|
|
|
2019-12-13 16:22:37 +08:00
|
|
|
if entry.IsDirectory {
|
|
|
|
atomic.AddUint64(&dirCount, 1)
|
|
|
|
} else {
|
|
|
|
atomic.AddUint64(&fileCount, 1)
|
2019-04-16 16:06:32 +08:00
|
|
|
}
|
|
|
|
|
2020-03-24 13:54:46 +08:00
|
|
|
if verbose {
|
2019-12-13 16:22:37 +08:00
|
|
|
println(parentPath.Child(entry.Name))
|
|
|
|
}
|
2019-04-16 16:06:32 +08:00
|
|
|
|
|
|
|
})
|
|
|
|
|
2019-12-30 12:19:51 +08:00
|
|
|
close(outputChan)
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
|
2019-12-13 16:22:37 +08:00
|
|
|
if err == nil {
|
2019-12-30 12:19:51 +08:00
|
|
|
fmt.Fprintf(writer, "total %d directories, %d files\n", dirCount, fileCount)
|
2019-12-13 16:22:37 +08:00
|
|
|
}
|
|
|
|
return err
|
2019-04-16 16:06:32 +08:00
|
|
|
}
|
2020-03-24 13:54:46 +08:00
|
|
|
|
|
|
|
func doTraverseBfs(writer io.Writer, filerClient filer_pb.FilerClient, parentPath util.FullPath, fn func(parentPath util.FullPath, entry *filer_pb.Entry)) (err error) {
|
2019-12-07 14:20:59 +08:00
|
|
|
|
|
|
|
K := 5
|
|
|
|
|
|
|
|
var jobQueueWg sync.WaitGroup
|
|
|
|
queue := util.NewQueue()
|
|
|
|
jobQueueWg.Add(1)
|
|
|
|
queue.Enqueue(parentPath)
|
|
|
|
var isTerminating bool
|
|
|
|
|
|
|
|
for i := 0; i < K; i++ {
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
if isTerminating {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
t := queue.Dequeue()
|
|
|
|
if t == nil {
|
|
|
|
time.Sleep(329 * time.Millisecond)
|
|
|
|
continue
|
|
|
|
}
|
2020-03-23 15:01:34 +08:00
|
|
|
dir := t.(util.FullPath)
|
2020-02-26 14:23:59 +08:00
|
|
|
processErr := processOneDirectory(writer, filerClient, dir, queue, &jobQueueWg, fn)
|
2019-12-07 14:20:59 +08:00
|
|
|
if processErr != nil {
|
|
|
|
err = processErr
|
|
|
|
}
|
|
|
|
jobQueueWg.Done()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
jobQueueWg.Wait()
|
|
|
|
isTerminating = true
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-03-23 15:01:34 +08:00
|
|
|
func processOneDirectory(writer io.Writer, filerClient filer_pb.FilerClient, parentPath util.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, fn func(parentPath util.FullPath, entry *filer_pb.Entry)) (err error) {
|
2019-04-16 16:06:32 +08:00
|
|
|
|
2020-03-23 15:01:34 +08:00
|
|
|
return filer_pb.ReadDirAllEntries(filerClient, parentPath, "", func(entry *filer_pb.Entry, isLast bool) {
|
2019-04-16 16:06:32 +08:00
|
|
|
|
2019-12-13 16:22:37 +08:00
|
|
|
fn(parentPath, entry)
|
2019-04-16 16:06:32 +08:00
|
|
|
|
2019-12-13 16:22:37 +08:00
|
|
|
if entry.IsDirectory {
|
|
|
|
subDir := fmt.Sprintf("%s/%s", parentPath, entry.Name)
|
|
|
|
if parentPath == "/" {
|
|
|
|
subDir = "/" + entry.Name
|
2019-04-16 16:06:32 +08:00
|
|
|
}
|
2019-12-13 16:22:37 +08:00
|
|
|
jobQueueWg.Add(1)
|
2020-03-23 15:01:34 +08:00
|
|
|
queue.Enqueue(util.FullPath(subDir))
|
2019-04-16 16:06:32 +08:00
|
|
|
}
|
2019-12-13 16:22:37 +08:00
|
|
|
})
|
2019-04-16 16:06:32 +08:00
|
|
|
|
|
|
|
}
|