seaweedfs/weed/command/backup.go

160 lines
5.0 KiB
Go
Raw Normal View History

package command
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/pb"
2019-04-19 12:43:36 +08:00
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/storage"
)
var (
s BackupOptions
)
type BackupOptions struct {
master *string
collection *string
dir *string
volumeId *int
ttl *string
replication *string
}
func init() {
cmdBackup.Run = runBackup // break init cycle
s.master = cmdBackup.Flag.String("server", "localhost:9333", "SeaweedFS master location")
s.collection = cmdBackup.Flag.String("collection", "", "collection name")
s.dir = cmdBackup.Flag.String("dir", ".", "directory to store volume data files")
s.volumeId = cmdBackup.Flag.Int("volumeId", -1, "a volume id. The volume .dat and .idx files should already exist in the dir.")
s.ttl = cmdBackup.Flag.String("ttl", "", `backup volume's time to live, format:
3m: 3 minutes
4h: 4 hours
5d: 5 days
6w: 6 weeks
7M: 7 months
8y: 8 years
default is the same with origin`)
s.replication = cmdBackup.Flag.String("replication", "", "backup volume's replication, default is the same with origin")
}
var cmdBackup = &Command{
UsageLine: "backup -dir=. -volumeId=234 -server=localhost:9333",
Short: "incrementally backup a volume to local folder",
Long: `Incrementally backup volume data.
2019-04-11 17:18:53 +08:00
It is expected that you use this inside a script, to loop through
all possible volume ids that needs to be backup to local folder.
2019-04-11 17:18:53 +08:00
The volume id does not need to exist locally or even remotely.
This will help to backup future new volumes.
2019-04-11 17:18:53 +08:00
Usually backing up is just copying the .dat (and .idx) files.
2019-01-17 09:17:19 +08:00
But it's tricky to incrementally copy the differences.
2019-04-11 17:18:53 +08:00
The complexity comes when there are multiple addition, deletion and compaction.
2019-02-06 21:59:15 +08:00
This tool will handle them correctly and efficiently, avoiding unnecessary data transportation.
`,
}
func runBackup(cmd *Command, args []string) bool {
2019-02-19 04:11:52 +08:00
2024-07-17 00:15:55 +08:00
util.LoadSecurityConfiguration()
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
2019-02-19 04:11:52 +08:00
if *s.volumeId == -1 {
return false
}
2019-04-19 12:43:36 +08:00
vid := needle.VolumeId(*s.volumeId)
// find volume location, replication, ttl info
lookup, err := operation.LookupVolumeId(func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*s.master) }, grpcDialOption, vid.String())
if err != nil {
fmt.Printf("Error looking up volume %d: %v\n", vid, err)
return true
}
volumeServer := lookup.Locations[0].ServerAddress()
2019-02-19 04:11:52 +08:00
stats, err := operation.GetVolumeSyncStatus(volumeServer, grpcDialOption, uint32(vid))
if err != nil {
fmt.Printf("Error get volume %d status: %v\n", vid, err)
return true
}
var ttl *needle.TTL
if *s.ttl != "" {
ttl, err = needle.ReadTTL(*s.ttl)
if err != nil {
fmt.Printf("Error generate volume %d ttl %s: %v\n", vid, *s.ttl, err)
return true
}
} else {
ttl, err = needle.ReadTTL(stats.Ttl)
if err != nil {
fmt.Printf("Error get volume %d ttl %s: %v\n", vid, stats.Ttl, err)
return true
}
}
2019-12-24 04:48:20 +08:00
var replication *super_block.ReplicaPlacement
if *s.replication != "" {
2019-12-24 04:48:20 +08:00
replication, err = super_block.NewReplicaPlacementFromString(*s.replication)
if err != nil {
fmt.Printf("Error generate volume %d replication %s : %v\n", vid, *s.replication, err)
return true
}
} else {
2019-12-24 04:48:20 +08:00
replication, err = super_block.NewReplicaPlacementFromString(stats.Replication)
if err != nil {
fmt.Printf("Error get volume %d replication %s : %v\n", vid, stats.Replication, err)
return true
}
}
v, err := storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0, 0)
if err != nil {
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
return true
}
if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) {
if err = v.Compact2(0, 0, nil); err != nil {
2019-03-26 00:16:12 +08:00
fmt.Printf("Compact Volume before synchronizing %v\n", err)
return true
}
if err = v.CommitCompact(); err != nil {
fmt.Printf("Commit Compact before synchronizing %v\n", err)
return true
}
v.SuperBlock.CompactionRevision = uint16(stats.CompactRevision)
v.DataBackend.WriteAt(v.SuperBlock.Bytes(), 0)
2019-03-26 00:16:12 +08:00
}
2019-04-19 15:39:34 +08:00
datSize, _, _ := v.FileStat()
if datSize > stats.TailOffset {
2019-03-26 00:16:12 +08:00
// remove the old data
if err := v.Destroy(false); err != nil {
fmt.Printf("Error destroying volume: %v\n", err)
}
2019-03-26 00:16:12 +08:00
// recreate an empty volume
v, err = storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0, 0)
2019-03-26 00:16:12 +08:00
if err != nil {
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
return true
}
}
defer v.Close()
2019-04-18 13:04:49 +08:00
if err := v.IncrementalBackup(volumeServer, grpcDialOption); err != nil {
fmt.Printf("Error synchronizing volume %d: %v\n", vid, err)
return true
}
return true
}