seaweedfs/weed/server/volume_grpc_copy.go

267 lines
8.4 KiB
Go
Raw Normal View History

package weed_server
import (
"context"
"fmt"
2019-04-19 15:39:34 +08:00
"io"
"math"
2019-04-19 15:39:34 +08:00
"os"
"path"
2019-04-19 15:39:34 +08:00
"time"
2019-04-19 12:43:36 +08:00
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
2019-04-19 12:43:36 +08:00
"github.com/chrislusf/seaweedfs/weed/storage/needle"
2019-05-07 04:58:42 +08:00
"github.com/chrislusf/seaweedfs/weed/util"
)
2019-05-28 02:59:03 +08:00
const BufferSizeLimit = 1024 * 1024 * 2
// VolumeCopy copy the .idx .dat .vif files, and mount the volume
2019-04-18 13:04:49 +08:00
func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.VolumeCopyRequest) (*volume_server_pb.VolumeCopyResponse, error) {
2019-04-19 12:43:36 +08:00
v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
if v != nil {
2019-04-21 02:35:20 +08:00
return nil, fmt.Errorf("volume %d already exists", req.VolumeId)
}
location := vs.store.FindFreeLocation()
if location == nil {
return nil, fmt.Errorf("no space left")
}
// the master will not start compaction for read-only volumes, so it is safe to just copy files directly
// copy .dat and .idx files
// read .idx .dat file size and timestamp
// send .idx file
// send .dat file
// confirm size and timestamp
var volFileInfoResp *volume_server_pb.ReadVolumeFileStatusResponse
2019-04-21 02:35:20 +08:00
var volumeFileName, idxFileName, datFileName string
err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
var err error
volFileInfoResp, err = client.ReadVolumeFileStatus(context.Background(),
&volume_server_pb.ReadVolumeFileStatusRequest{
VolumeId: req.VolumeId,
})
if nil != err {
return fmt.Errorf("read volume file status failed, %v", err)
}
2019-06-03 15:13:31 +08:00
volumeFileName = storage.VolumeFileName(location.Directory, volFileInfoResp.Collection, int(req.VolumeId))
2019-04-21 02:35:20 +08:00
// println("source:", volFileInfoResp.String())
// copy ecx file
2020-02-26 14:23:59 +08:00
if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx", false, false); err != nil {
return err
}
2020-02-26 14:23:59 +08:00
if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat", false, true); err != nil {
return err
}
2020-02-26 14:23:59 +08:00
if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".vif", false, true); err != nil {
return err
}
return nil
})
2019-06-03 15:13:31 +08:00
idxFileName = volumeFileName + ".idx"
datFileName = volumeFileName + ".dat"
2019-04-21 02:35:20 +08:00
if err != nil && volumeFileName != "" {
os.Remove(idxFileName)
os.Remove(datFileName)
os.Remove(volumeFileName + ".vif")
return nil, err
}
2019-04-18 15:19:18 +08:00
if err = checkCopyFiles(volFileInfoResp, idxFileName, datFileName); err != nil { // added by panyc16
return nil, err
}
// mount the volume
2019-04-19 12:43:36 +08:00
err = vs.store.MountVolume(needle.VolumeId(req.VolumeId))
if err != nil {
return nil, fmt.Errorf("failed to mount volume %d: %v", req.VolumeId, err)
}
2019-04-19 15:39:34 +08:00
return &volume_server_pb.VolumeCopyResponse{
LastAppendAtNs: volFileInfoResp.DatFileTimestampSeconds * uint64(time.Second),
2019-04-19 15:39:34 +08:00
}, err
}
2020-02-26 14:23:59 +08:00
func (vs *VolumeServer) doCopyFile(client volume_server_pb.VolumeServerClient, isEcVolume bool, collection string, vid, compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend, ignoreSourceFileNotFound bool) error {
2020-02-26 14:23:59 +08:00
copyFileClient, err := client.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{
2019-12-24 04:48:20 +08:00
VolumeId: vid,
Ext: ext,
CompactionRevision: compactRevision,
StopOffset: stopOffset,
Collection: collection,
IsEcVolume: isEcVolume,
IgnoreSourceFileNotFound: ignoreSourceFileNotFound,
})
if err != nil {
return fmt.Errorf("failed to start copying volume %d %s file: %v", vid, ext, err)
}
2019-06-20 13:57:14 +08:00
err = writeToFile(copyFileClient, baseFileName+ext, util.NewWriteThrottler(vs.compactionBytePerSecond), isAppend)
if err != nil {
2019-06-03 17:26:31 +08:00
return fmt.Errorf("failed to copy %s file: %v", baseFileName+ext, err)
}
return nil
}
/**
2019-04-18 15:19:18 +08:00
only check the the differ of the file size
todo: maybe should check the received count and deleted count of the volume
*/
func checkCopyFiles(originFileInf *volume_server_pb.ReadVolumeFileStatusResponse, idxFileName, datFileName string) error {
stat, err := os.Stat(idxFileName)
if err != nil {
2019-06-03 15:13:31 +08:00
return fmt.Errorf("stat idx file %s failed, %v", idxFileName, err)
}
if originFileInf.IdxFileSize != uint64(stat.Size()) {
2019-06-03 15:13:31 +08:00
return fmt.Errorf("idx file %s size [%v] is not same as origin file size [%v]",
idxFileName, stat.Size(), originFileInf.IdxFileSize)
}
stat, err = os.Stat(datFileName)
if err != nil {
return fmt.Errorf("get dat file info failed, %v", err)
}
if originFileInf.DatFileSize != uint64(stat.Size()) {
return fmt.Errorf("the dat file size [%v] is not same as origin file size [%v]",
stat.Size(), originFileInf.DatFileSize)
}
return nil
}
2019-06-20 13:57:14 +08:00
func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName string, wt *util.WriteThrottler, isAppend bool) error {
2019-04-11 09:53:31 +08:00
glog.V(4).Infof("writing to %s", fileName)
2019-06-20 15:55:52 +08:00
flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC
2019-06-20 13:57:14 +08:00
if isAppend {
2019-06-20 15:55:52 +08:00
flags = os.O_WRONLY | os.O_CREATE
2019-06-20 13:57:14 +08:00
}
dst, err := os.OpenFile(fileName, flags, 0644)
if err != nil {
return nil
}
defer dst.Close()
for {
resp, receiveErr := client.Recv()
if receiveErr == io.EOF {
break
}
if receiveErr != nil {
return fmt.Errorf("receiving %s: %v", fileName, receiveErr)
}
dst.Write(resp.FileContent)
2019-05-07 04:58:42 +08:00
wt.MaybeSlowdown(int64(len(resp.FileContent)))
}
return nil
}
func (vs *VolumeServer) ReadVolumeFileStatus(ctx context.Context, req *volume_server_pb.ReadVolumeFileStatusRequest) (*volume_server_pb.ReadVolumeFileStatusResponse, error) {
resp := &volume_server_pb.ReadVolumeFileStatusResponse{}
2019-04-19 12:43:36 +08:00
v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
if v == nil {
return nil, fmt.Errorf("not found volume id %d", req.VolumeId)
}
resp.VolumeId = req.VolumeId
2019-04-19 15:39:34 +08:00
datSize, idxSize, modTime := v.FileStat()
resp.DatFileSize = datSize
resp.IdxFileSize = idxSize
resp.DatFileTimestampSeconds = uint64(modTime.Unix())
resp.IdxFileTimestampSeconds = uint64(modTime.Unix())
2019-04-11 14:39:53 +08:00
resp.FileCount = v.FileCount()
resp.CompactionRevision = uint32(v.CompactionRevision)
2019-04-21 02:35:20 +08:00
resp.Collection = v.Collection
return resp, nil
}
// CopyFile client pulls the volume related file from the source server.
// if req.CompactionRevision != math.MaxUint32, it ensures the compact revision is as expected
// The copying still stop at req.StopOffset, but you can set it to math.MaxUint64 in order to read all data.
2019-03-26 14:18:40 +08:00
func (vs *VolumeServer) CopyFile(req *volume_server_pb.CopyFileRequest, stream volume_server_pb.VolumeServer_CopyFileServer) error {
2019-06-03 17:26:31 +08:00
var fileName string
if !req.IsEcVolume {
v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
if v == nil {
return fmt.Errorf("not found volume id %d", req.VolumeId)
}
2019-06-03 17:26:31 +08:00
if uint32(v.CompactionRevision) != req.CompactionRevision && req.CompactionRevision != math.MaxUint32 {
return fmt.Errorf("volume %d is compacted", req.VolumeId)
}
fileName = v.FileName() + req.Ext
} else {
baseFileName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) + req.Ext
for _, location := range vs.store.Locations {
tName := path.Join(location.Directory, baseFileName)
2019-06-05 13:04:10 +08:00
if util.FileExists(tName) {
fileName = tName
}
}
if fileName == "" {
2019-12-29 04:59:31 +08:00
if req.IgnoreSourceFileNotFound {
return nil
}
return fmt.Errorf("CopyFile not found ec volume id %d", req.VolumeId)
2019-06-03 17:26:31 +08:00
}
}
bytesToRead := int64(req.StopOffset)
file, err := os.Open(fileName)
if err != nil {
2019-12-24 04:48:20 +08:00
if req.IgnoreSourceFileNotFound && err == os.ErrNotExist {
return nil
}
return err
}
defer file.Close()
2019-05-28 02:59:03 +08:00
buffer := make([]byte, BufferSizeLimit)
for bytesToRead > 0 {
bytesread, err := file.Read(buffer)
// println(fileName, "read", bytesread, "bytes, with target", bytesToRead)
if err != nil {
if err != io.EOF {
return err
}
// println(fileName, "read", bytesread, "bytes, with target", bytesToRead, "err", err.Error())
break
}
if int64(bytesread) > bytesToRead {
bytesread = int(bytesToRead)
}
err = stream.Send(&volume_server_pb.CopyFileResponse{
FileContent: buffer[:bytesread],
})
if err != nil {
// println("sending", bytesread, "bytes err", err.Error())
return err
}
bytesToRead -= int64(bytesread)
}
return nil
}