seaweedfs/weed/server/volume_grpc_tier_upload.go

101 lines
3.0 KiB
Go
Raw Normal View History

2019-11-27 19:09:42 +08:00
package weed_server
import (
"fmt"
"os"
"time"
2019-11-27 19:09:42 +08:00
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
)
// VolumeTierMoveDatToRemote copy dat file to a remote tier
func (vs *VolumeServer) VolumeTierMoveDatToRemote(req *volume_server_pb.VolumeTierMoveDatToRemoteRequest, stream volume_server_pb.VolumeServer_VolumeTierMoveDatToRemoteServer) error {
2019-11-27 19:09:42 +08:00
// find existing volume
2019-11-27 19:09:42 +08:00
v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
if v == nil {
return fmt.Errorf("volume %d not found", req.VolumeId)
2019-11-27 19:09:42 +08:00
}
// verify the collection
2019-11-27 19:09:42 +08:00
if v.Collection != req.Collection {
return fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection)
2019-11-27 19:09:42 +08:00
}
// locate the disk file
2019-11-27 19:09:42 +08:00
diskFile, ok := v.DataBackend.(*backend.DiskFile)
if !ok {
return fmt.Errorf("volume %d is not on local disk", req.VolumeId)
2019-11-27 19:09:42 +08:00
}
// check valid storage backend type
backendStorage, found := backend.BackendStorages[req.DestinationBackendName]
if !found {
var keys []string
for key := range backend.BackendStorages {
keys = append(keys, key)
}
return fmt.Errorf("destination %s not found, suppported: %v", req.DestinationBackendName, keys)
}
// check whether the existing backend storage is the same as requested
// if same, skip
backendType, backendId := backend.BackendNameToTypeId(req.DestinationBackendName)
2019-12-29 03:21:49 +08:00
for _, remoteFile := range v.GetVolumeInfo().GetFiles() {
if remoteFile.BackendType == backendType && remoteFile.BackendId == backendId {
return fmt.Errorf("destination %s already exists", req.DestinationBackendName)
}
}
startTime := time.Now()
fn := func(progressed int64, percentage float32) error {
now := time.Now()
if now.Sub(startTime) < time.Second {
return nil
}
startTime = now
return stream.Send(&volume_server_pb.VolumeTierMoveDatToRemoteResponse{
Processed: progressed,
ProcessedPercentage: percentage,
})
}
// remember the file original source
attributes := make(map[string]string)
attributes["volumeId"] = v.Id.String()
attributes["collection"] = v.Collection
attributes["ext"] = ".dat"
// copy the data file
key, size, err := backendStorage.CopyFile(diskFile.File, attributes, fn)
if err != nil {
2019-12-09 11:44:16 +08:00
return fmt.Errorf("backend %s copy file %s: %v", req.DestinationBackendName, diskFile.Name(), err)
}
2019-11-27 19:09:42 +08:00
// save the remote file to volume tier info
2019-12-29 03:21:49 +08:00
v.GetVolumeInfo().Files = append(v.GetVolumeInfo().GetFiles(), &volume_server_pb.RemoteFile{
BackendType: backendType,
BackendId: backendId,
Key: key,
Offset: 0,
FileSize: uint64(size),
ModifiedTime: uint64(time.Now().Unix()),
2019-12-29 13:13:10 +08:00
Extension: ".dat",
})
2019-12-29 03:21:49 +08:00
if err := v.SaveVolumeInfo(); err != nil {
return fmt.Errorf("volume %d fail to save remote file info: %v", v.Id, err)
}
if err := v.LoadRemoteFile(); err != nil {
return fmt.Errorf("volume %d fail to load remote file: %v", v.Id, err)
}
if !req.KeepLocalDatFile {
os.Remove(v.FileName(".dat"))
}
2019-11-27 19:09:42 +08:00
return nil
2019-11-28 04:34:57 +08:00
}