seaweedfs/weed/storage/volume_backup.go

261 lines
6.8 KiB
Go
Raw Normal View History

2019-03-26 00:16:12 +08:00
package storage
import (
"context"
"fmt"
2019-04-19 12:43:36 +08:00
"io"
"os"
2019-03-26 00:16:12 +08:00
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/idx"
2019-04-19 12:43:36 +08:00
"github.com/chrislusf/seaweedfs/weed/storage/needle"
2019-03-26 00:16:12 +08:00
. "github.com/chrislusf/seaweedfs/weed/storage/types"
"google.golang.org/grpc"
)
2019-03-26 14:18:40 +08:00
func (v *Volume) GetVolumeSyncStatus() *volume_server_pb.VolumeSyncStatusResponse {
v.dataFileAccessLock.RLock()
defer v.dataFileAccessLock.RUnlock()
2019-03-26 14:18:40 +08:00
var syncStatus = &volume_server_pb.VolumeSyncStatusResponse{}
if datSize, _, err := v.DataBackend.GetStat(); err == nil {
syncStatus.TailOffset = uint64(datSize)
2019-03-26 14:18:40 +08:00
}
syncStatus.Collection = v.Collection
syncStatus.IdxFileSize = v.nm.IndexFileSize()
syncStatus.CompactRevision = uint32(v.SuperBlock.CompactionRevision)
2019-03-26 14:18:40 +08:00
syncStatus.Ttl = v.SuperBlock.Ttl.String()
syncStatus.Replication = v.SuperBlock.ReplicaPlacement.String()
return syncStatus
}
2019-03-26 00:16:12 +08:00
// The volume sync with a master volume via 2 steps:
// 1. The slave checks master side to find subscription checkpoint
// to setup the replication.
// 2. The slave receives the updates from master
/*
Assume the slave volume needs to follow the master volume.
The master volume could be compacted, and could be many files ahead of
slave volume.
Step 0: // implemented in command/backup.go, to avoid dat file size overflow.
0.1 If slave compact version is less than the master, do a local compaction, and set
local compact version the same as the master.
0.2 If the slave size is still bigger than the master, discard local copy and do a full copy.
Step 1:
The slave volume ask the master by the last modification time t.
The master do a binary search in volume (use .idx as an array, and check the appendAtNs in .dat file),
to find the first entry with appendAtNs > t.
Step 2:
The master send content bytes to the slave. The bytes are not chunked by needle.
Step 3:
The slave generate the needle map for the new bytes. (This may be optimized to incrementally
update needle map when receiving new .dat bytes. But seems not necessary now.)
*/
2019-04-18 13:04:49 +08:00
func (v *Volume) IncrementalBackup(volumeServer string, grpcDialOption grpc.DialOption) error {
2019-03-26 00:16:12 +08:00
ctx := context.Background()
2019-04-19 15:39:34 +08:00
startFromOffset, _, _ := v.FileStat()
2019-03-26 00:16:12 +08:00
appendAtNs, err := v.findLastAppendAtNs()
if err != nil {
return err
}
writeOffset := int64(startFromOffset)
2019-03-26 00:16:12 +08:00
err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
2019-04-18 13:04:49 +08:00
stream, err := client.VolumeIncrementalCopy(ctx, &volume_server_pb.VolumeIncrementalCopyRequest{
2019-03-26 00:16:12 +08:00
VolumeId: uint32(v.Id),
SinceNs: appendAtNs,
2019-03-26 00:16:12 +08:00
})
if err != nil {
return err
}
for {
resp, recvErr := stream.Recv()
if recvErr != nil {
if recvErr == io.EOF {
break
} else {
return recvErr
}
}
n, writeErr := v.DataBackend.WriteAt(resp.FileContent, writeOffset)
2019-03-26 00:16:12 +08:00
if writeErr != nil {
return writeErr
}
writeOffset += int64(n)
2019-03-26 00:16:12 +08:00
}
return nil
})
if err != nil {
return err
}
// add to needle map
return ScanVolumeFileFrom(v.version, v.DataBackend, int64(startFromOffset), &VolumeFileScanner4GenIdx{v: v})
2019-03-26 00:16:12 +08:00
}
func (v *Volume) findLastAppendAtNs() (uint64, error) {
offset, err := v.locateLastAppendEntry()
if err != nil {
return 0, err
}
2019-04-09 10:40:56 +08:00
if offset.IsZero() {
2019-03-26 00:16:12 +08:00
return 0, nil
}
return v.readAppendAtNs(offset)
}
func (v *Volume) locateLastAppendEntry() (Offset, error) {
indexFile, e := os.OpenFile(v.FileName()+".idx", os.O_RDONLY, 0644)
if e != nil {
2019-04-09 10:40:56 +08:00
return Offset{}, fmt.Errorf("cannot read %s.idx: %v", v.FileName(), e)
2019-03-26 00:16:12 +08:00
}
defer indexFile.Close()
fi, err := indexFile.Stat()
if err != nil {
2019-04-09 10:40:56 +08:00
return Offset{}, fmt.Errorf("file %s stat error: %v", indexFile.Name(), err)
2019-03-26 00:16:12 +08:00
}
fileSize := fi.Size()
2019-04-19 15:39:34 +08:00
if fileSize%NeedleMapEntrySize != 0 {
2019-04-09 10:40:56 +08:00
return Offset{}, fmt.Errorf("unexpected file %s size: %d", indexFile.Name(), fileSize)
2019-03-26 00:16:12 +08:00
}
if fileSize == 0 {
2019-04-09 10:40:56 +08:00
return Offset{}, nil
2019-03-26 00:16:12 +08:00
}
2019-04-19 15:39:34 +08:00
bytes := make([]byte, NeedleMapEntrySize)
n, e := indexFile.ReadAt(bytes, fileSize-NeedleMapEntrySize)
if n != NeedleMapEntrySize {
2019-04-09 10:40:56 +08:00
return Offset{}, fmt.Errorf("file %s read error: %v", indexFile.Name(), e)
2019-03-26 00:16:12 +08:00
}
_, offset, _ := idx.IdxFileEntry(bytes)
2019-03-26 00:16:12 +08:00
return offset, nil
}
func (v *Volume) readAppendAtNs(offset Offset) (uint64, error) {
n, _, bodyLength, err := needle.ReadNeedleHeader(v.DataBackend, v.SuperBlock.version, offset.ToAcutalOffset())
2019-03-26 00:16:12 +08:00
if err != nil {
return 0, fmt.Errorf("ReadNeedleHeader: %v", err)
}
_, err = n.ReadNeedleBody(v.DataBackend, v.SuperBlock.version, offset.ToAcutalOffset()+int64(NeedleHeaderSize), bodyLength)
2019-03-26 00:16:12 +08:00
if err != nil {
2019-04-09 10:40:56 +08:00
return 0, fmt.Errorf("ReadNeedleBody offset %d, bodyLength %d: %v", offset.ToAcutalOffset(), bodyLength, err)
2019-03-26 00:16:12 +08:00
}
return n.AppendAtNs, nil
}
// on server side
func (v *Volume) BinarySearchByAppendAtNs(sinceNs uint64) (offset Offset, isLast bool, err error) {
indexFile, openErr := os.OpenFile(v.FileName()+".idx", os.O_RDONLY, 0644)
if openErr != nil {
err = fmt.Errorf("cannot read %s.idx: %v", v.FileName(), openErr)
return
}
defer indexFile.Close()
fi, statErr := indexFile.Stat()
if statErr != nil {
err = fmt.Errorf("file %s stat error: %v", indexFile.Name(), statErr)
return
}
fileSize := fi.Size()
2019-04-19 15:39:34 +08:00
if fileSize%NeedleMapEntrySize != 0 {
2019-03-26 00:16:12 +08:00
err = fmt.Errorf("unexpected file %s size: %d", indexFile.Name(), fileSize)
return
}
2019-04-19 15:39:34 +08:00
bytes := make([]byte, NeedleMapEntrySize)
entryCount := fileSize / NeedleMapEntrySize
2019-03-26 00:16:12 +08:00
l := int64(0)
h := entryCount
for l < h {
m := (l + h) / 2
if m == entryCount {
2019-04-09 10:40:56 +08:00
return Offset{}, true, nil
2019-03-26 00:16:12 +08:00
}
// read the appendAtNs for entry m
offset, err = v.readAppendAtNsForIndexEntry(indexFile, bytes, m)
if err != nil {
return
}
mNs, nsReadErr := v.readAppendAtNs(offset)
if nsReadErr != nil {
err = nsReadErr
return
}
// move the boundary
if mNs <= sinceNs {
l = m + 1
} else {
h = m
}
}
if l == entryCount {
2019-04-09 10:40:56 +08:00
return Offset{}, true, nil
2019-03-26 00:16:12 +08:00
}
offset, err = v.readAppendAtNsForIndexEntry(indexFile, bytes, l)
return offset, false, err
}
2019-04-19 15:39:34 +08:00
// bytes is of size NeedleMapEntrySize
2019-03-26 00:16:12 +08:00
func (v *Volume) readAppendAtNsForIndexEntry(indexFile *os.File, bytes []byte, m int64) (Offset, error) {
2019-04-19 15:39:34 +08:00
if _, readErr := indexFile.ReadAt(bytes, m*NeedleMapEntrySize); readErr != nil && readErr != io.EOF {
2019-04-09 10:40:56 +08:00
return Offset{}, readErr
2019-03-26 00:16:12 +08:00
}
_, offset, _ := idx.IdxFileEntry(bytes)
2019-03-26 00:16:12 +08:00
return offset, nil
}
// generate the volume idx
type VolumeFileScanner4GenIdx struct {
v *Volume
}
func (scanner *VolumeFileScanner4GenIdx) VisitSuperBlock(superBlock SuperBlock) error {
return nil
}
func (scanner *VolumeFileScanner4GenIdx) ReadNeedleBody() bool {
return false
}
2019-10-22 15:50:30 +08:00
func (scanner *VolumeFileScanner4GenIdx) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
if n.Size > 0 && n.Size != TombstoneFileSize {
2019-04-09 10:40:56 +08:00
return scanner.v.nm.Put(n.Id, ToOffset(offset), n.Size)
}
2019-04-09 10:40:56 +08:00
return scanner.v.nm.Delete(n.Id, ToOffset(offset))
}