2019-05-20 15:53:17 +08:00
|
|
|
package weed_server
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2019-05-28 02:59:03 +08:00
|
|
|
"io"
|
2019-06-03 17:26:31 +08:00
|
|
|
"io/ioutil"
|
2019-05-20 15:53:17 +08:00
|
|
|
"math"
|
2019-05-26 05:02:06 +08:00
|
|
|
"os"
|
2019-06-03 17:26:31 +08:00
|
|
|
"path"
|
|
|
|
"strings"
|
2019-05-20 15:53:17 +08:00
|
|
|
|
2019-05-26 14:23:19 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2019-05-20 15:53:17 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
2019-06-20 15:17:11 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
2019-06-03 17:26:31 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2019-05-20 15:53:17 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
|
|
|
Steps to apply erasure coding to .dat .idx files
|
|
|
|
0. ensure the volume is readonly
|
2019-05-26 05:02:06 +08:00
|
|
|
1. client call VolumeEcShardsGenerate to generate the .ecx and .ec01~.ec14 files
|
2019-05-20 15:53:17 +08:00
|
|
|
2. client ask master for possible servers to hold the ec files, at least 4 servers
|
2019-05-26 05:02:06 +08:00
|
|
|
3. client call VolumeEcShardsCopy on above target servers to copy ec files from the source server
|
2019-05-20 15:53:17 +08:00
|
|
|
4. target servers report the new ec files to the master
|
|
|
|
5. master stores vid -> [14]*DataNode
|
|
|
|
6. client checks master. If all 14 slices are ready, delete the original .idx, .idx files
|
|
|
|
|
2019-05-26 05:02:06 +08:00
|
|
|
*/
|
2019-05-20 15:53:17 +08:00
|
|
|
|
2019-05-26 05:02:06 +08:00
|
|
|
// VolumeEcShardsGenerate generates the .ecx and .ec01 ~ .ec14 files
|
|
|
|
func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_server_pb.VolumeEcShardsGenerateRequest) (*volume_server_pb.VolumeEcShardsGenerateResponse, error) {
|
2019-05-20 15:53:17 +08:00
|
|
|
|
|
|
|
v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
|
|
|
|
if v == nil {
|
|
|
|
return nil, fmt.Errorf("volume %d not found", req.VolumeId)
|
|
|
|
}
|
|
|
|
baseFileName := v.FileName()
|
|
|
|
|
2019-05-26 05:02:06 +08:00
|
|
|
if v.Collection != req.Collection {
|
|
|
|
return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection)
|
|
|
|
}
|
|
|
|
|
2019-05-20 15:53:17 +08:00
|
|
|
// write .ecx file
|
|
|
|
if err := erasure_coding.WriteSortedEcxFile(baseFileName); err != nil {
|
|
|
|
return nil, fmt.Errorf("WriteSortedEcxFile %s: %v", baseFileName, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// write .ec01 ~ .ec14 files
|
|
|
|
if err := erasure_coding.WriteEcFiles(baseFileName); err != nil {
|
|
|
|
return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err)
|
|
|
|
}
|
|
|
|
|
2019-05-26 05:02:06 +08:00
|
|
|
return &volume_server_pb.VolumeEcShardsGenerateResponse{}, nil
|
2019-05-20 15:53:17 +08:00
|
|
|
}
|
|
|
|
|
2019-06-03 17:26:31 +08:00
|
|
|
// VolumeEcShardsRebuild generates the any of the missing .ec01 ~ .ec14 files
|
|
|
|
func (vs *VolumeServer) VolumeEcShardsRebuild(ctx context.Context, req *volume_server_pb.VolumeEcShardsRebuildRequest) (*volume_server_pb.VolumeEcShardsRebuildResponse, error) {
|
|
|
|
|
|
|
|
baseFileName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId))
|
|
|
|
|
|
|
|
var rebuiltShardIds []uint32
|
|
|
|
|
|
|
|
for _, location := range vs.store.Locations {
|
|
|
|
if util.FileExists(path.Join(location.Directory, baseFileName+".ecx")) {
|
|
|
|
// write .ec01 ~ .ec14 files
|
2019-06-04 02:50:54 +08:00
|
|
|
baseFileName = path.Join(location.Directory, baseFileName)
|
2019-06-03 17:26:31 +08:00
|
|
|
if generatedShardIds, err := erasure_coding.RebuildEcFiles(baseFileName); err != nil {
|
|
|
|
return nil, fmt.Errorf("RebuildEcFiles %s: %v", baseFileName, err)
|
|
|
|
} else {
|
|
|
|
rebuiltShardIds = generatedShardIds
|
|
|
|
}
|
2019-06-20 13:57:14 +08:00
|
|
|
|
|
|
|
if err := erasure_coding.RebuildEcxFile(baseFileName); err != nil {
|
|
|
|
return nil, fmt.Errorf("RebuildEcxFile %s: %v", baseFileName, err)
|
|
|
|
}
|
|
|
|
|
2019-06-03 17:26:31 +08:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &volume_server_pb.VolumeEcShardsRebuildResponse{
|
|
|
|
RebuiltShardIds: rebuiltShardIds,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-05-26 05:02:06 +08:00
|
|
|
// VolumeEcShardsCopy copy the .ecx and some ec data slices
|
|
|
|
func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_server_pb.VolumeEcShardsCopyRequest) (*volume_server_pb.VolumeEcShardsCopyResponse, error) {
|
2019-05-20 15:53:17 +08:00
|
|
|
|
|
|
|
location := vs.store.FindFreeLocation()
|
|
|
|
if location == nil {
|
|
|
|
return nil, fmt.Errorf("no space left")
|
|
|
|
}
|
|
|
|
|
2019-06-03 17:26:31 +08:00
|
|
|
baseFileName := storage.VolumeFileName(location.Directory, req.Collection, int(req.VolumeId))
|
2019-05-20 15:53:17 +08:00
|
|
|
|
|
|
|
err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
|
|
|
|
|
|
|
|
// copy ec data slices
|
2019-05-28 02:59:03 +08:00
|
|
|
for _, shardId := range req.ShardIds {
|
2019-06-20 13:57:14 +08:00
|
|
|
if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(shardId)), false); err != nil {
|
2019-05-20 15:53:17 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-03 17:26:31 +08:00
|
|
|
if !req.CopyEcxFile {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// copy ecx file
|
2019-06-20 13:57:14 +08:00
|
|
|
if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx", false); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// copy ecj file
|
|
|
|
if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecj", true); err != nil {
|
2019-06-03 17:26:31 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-05-20 15:53:17 +08:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
2019-05-26 05:02:06 +08:00
|
|
|
return nil, fmt.Errorf("VolumeEcShardsCopy volume %d: %v", req.VolumeId, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &volume_server_pb.VolumeEcShardsCopyResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2019-06-03 17:26:31 +08:00
|
|
|
// VolumeEcShardsDelete local delete the .ecx and some ec data slices if not needed
|
|
|
|
// the shard should not be mounted before calling this.
|
2019-05-26 05:02:06 +08:00
|
|
|
func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_server_pb.VolumeEcShardsDeleteRequest) (*volume_server_pb.VolumeEcShardsDeleteResponse, error) {
|
|
|
|
|
2019-06-03 17:26:31 +08:00
|
|
|
baseFilename := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId))
|
2019-06-01 16:41:22 +08:00
|
|
|
|
2019-06-04 11:25:02 +08:00
|
|
|
found := false
|
|
|
|
for _, location := range vs.store.Locations {
|
|
|
|
if util.FileExists(path.Join(location.Directory, baseFilename+".ecx")) {
|
|
|
|
found = true
|
|
|
|
baseFilename = path.Join(location.Directory, baseFilename)
|
|
|
|
for _, shardId := range req.ShardIds {
|
|
|
|
os.Remove(baseFilename + erasure_coding.ToExt(int(shardId)))
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !found {
|
|
|
|
return nil, nil
|
2019-05-26 05:02:06 +08:00
|
|
|
}
|
2019-06-01 16:41:22 +08:00
|
|
|
|
2019-06-03 17:26:31 +08:00
|
|
|
// check whether to delete the ecx file also
|
|
|
|
hasEcxFile := false
|
|
|
|
existingShardCount := 0
|
2019-05-26 05:02:06 +08:00
|
|
|
|
2019-06-03 17:26:31 +08:00
|
|
|
for _, location := range vs.store.Locations {
|
|
|
|
fileInfos, err := ioutil.ReadDir(location.Directory)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
2019-05-26 05:02:06 +08:00
|
|
|
}
|
2019-06-03 17:26:31 +08:00
|
|
|
for _, fileInfo := range fileInfos {
|
|
|
|
if fileInfo.Name() == baseFilename+".ecx" {
|
|
|
|
hasEcxFile = true
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if strings.HasPrefix(fileInfo.Name(), baseFilename+".ec") {
|
|
|
|
existingShardCount++
|
|
|
|
}
|
2019-05-26 05:02:06 +08:00
|
|
|
}
|
2019-05-20 15:53:17 +08:00
|
|
|
}
|
|
|
|
|
2019-06-03 17:26:31 +08:00
|
|
|
if hasEcxFile && existingShardCount == 0 {
|
|
|
|
if err := os.Remove(baseFilename + ".ecx"); err != nil {
|
|
|
|
return nil, err
|
2019-06-01 16:41:22 +08:00
|
|
|
}
|
2019-06-20 13:57:14 +08:00
|
|
|
if err := os.Remove(baseFilename + ".ecj"); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-06-01 16:41:22 +08:00
|
|
|
}
|
|
|
|
|
2019-06-03 17:26:31 +08:00
|
|
|
return &volume_server_pb.VolumeEcShardsDeleteResponse{}, nil
|
2019-05-20 15:53:17 +08:00
|
|
|
}
|
2019-05-26 14:23:19 +08:00
|
|
|
|
|
|
|
func (vs *VolumeServer) VolumeEcShardsMount(ctx context.Context, req *volume_server_pb.VolumeEcShardsMountRequest) (*volume_server_pb.VolumeEcShardsMountResponse, error) {
|
|
|
|
|
2019-05-28 02:59:03 +08:00
|
|
|
for _, shardId := range req.ShardIds {
|
2019-05-26 14:23:19 +08:00
|
|
|
err := vs.store.MountEcShards(req.Collection, needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId))
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("ec shard mount %v: %v", req, err)
|
|
|
|
} else {
|
|
|
|
glog.V(2).Infof("ec shard mount %v", req)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("mount %d.%d: %v", req.VolumeId, shardId, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &volume_server_pb.VolumeEcShardsMountResponse{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (vs *VolumeServer) VolumeEcShardsUnmount(ctx context.Context, req *volume_server_pb.VolumeEcShardsUnmountRequest) (*volume_server_pb.VolumeEcShardsUnmountResponse, error) {
|
|
|
|
|
2019-05-28 02:59:03 +08:00
|
|
|
for _, shardId := range req.ShardIds {
|
2019-05-26 14:23:19 +08:00
|
|
|
err := vs.store.UnmountEcShards(needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId))
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("ec shard unmount %v: %v", req, err)
|
|
|
|
} else {
|
|
|
|
glog.V(2).Infof("ec shard unmount %v", req)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unmount %d.%d: %v", req.VolumeId, shardId, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &volume_server_pb.VolumeEcShardsUnmountResponse{}, nil
|
|
|
|
}
|
2019-05-28 02:59:03 +08:00
|
|
|
|
|
|
|
func (vs *VolumeServer) VolumeEcShardRead(req *volume_server_pb.VolumeEcShardReadRequest, stream volume_server_pb.VolumeServer_VolumeEcShardReadServer) error {
|
|
|
|
|
2019-05-28 12:40:51 +08:00
|
|
|
ecVolume, found := vs.store.FindEcVolume(needle.VolumeId(req.VolumeId))
|
2019-05-28 02:59:03 +08:00
|
|
|
if !found {
|
2019-06-04 01:38:21 +08:00
|
|
|
return fmt.Errorf("VolumeEcShardRead not found ec volume id %d", req.VolumeId)
|
2019-05-28 02:59:03 +08:00
|
|
|
}
|
2019-05-28 12:40:51 +08:00
|
|
|
ecShard, found := ecVolume.FindEcVolumeShard(erasure_coding.ShardId(req.ShardId))
|
2019-05-28 02:59:03 +08:00
|
|
|
if !found {
|
|
|
|
return fmt.Errorf("not found ec shard %d.%d", req.VolumeId, req.ShardId)
|
|
|
|
}
|
|
|
|
|
2019-06-21 16:14:10 +08:00
|
|
|
if req.FileKey != 0 {
|
|
|
|
_, size, _ := ecVolume.FindNeedleFromEcx(types.Uint64ToNeedleId(req.FileKey))
|
|
|
|
if size == types.TombstoneFileSize {
|
|
|
|
return stream.Send(&volume_server_pb.VolumeEcShardReadResponse{
|
|
|
|
IsDeleted: true,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-29 12:29:07 +08:00
|
|
|
bufSize := req.Size
|
|
|
|
if bufSize > BufferSizeLimit {
|
|
|
|
bufSize = BufferSizeLimit
|
|
|
|
}
|
|
|
|
buffer := make([]byte, bufSize)
|
|
|
|
|
2019-05-28 02:59:03 +08:00
|
|
|
startOffset, bytesToRead := req.Offset, req.Size
|
|
|
|
|
|
|
|
for bytesToRead > 0 {
|
|
|
|
bytesread, err := ecShard.ReadAt(buffer, startOffset)
|
|
|
|
|
|
|
|
// println(fileName, "read", bytesread, "bytes, with target", bytesToRead)
|
2019-05-29 12:29:07 +08:00
|
|
|
if bytesread > 0 {
|
2019-05-28 02:59:03 +08:00
|
|
|
|
2019-05-29 12:29:07 +08:00
|
|
|
if int64(bytesread) > bytesToRead {
|
|
|
|
bytesread = int(bytesToRead)
|
|
|
|
}
|
|
|
|
err = stream.Send(&volume_server_pb.VolumeEcShardReadResponse{
|
|
|
|
Data: buffer[:bytesread],
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
// println("sending", bytesread, "bytes err", err.Error())
|
2019-05-28 02:59:03 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-05-29 12:29:07 +08:00
|
|
|
bytesToRead -= int64(bytesread)
|
|
|
|
|
2019-05-28 02:59:03 +08:00
|
|
|
}
|
2019-05-29 12:29:07 +08:00
|
|
|
|
2019-05-28 02:59:03 +08:00
|
|
|
if err != nil {
|
2019-05-29 12:29:07 +08:00
|
|
|
if err != io.EOF {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
2019-05-28 02:59:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
}
|
2019-06-20 15:17:11 +08:00
|
|
|
|
|
|
|
func (vs *VolumeServer) VolumeEcBlobDelete(ctx context.Context, req *volume_server_pb.VolumeEcBlobDeleteRequest) (*volume_server_pb.VolumeEcBlobDeleteResponse, error) {
|
|
|
|
|
|
|
|
resp := &volume_server_pb.VolumeEcBlobDeleteResponse{}
|
|
|
|
|
|
|
|
for _, location := range vs.store.Locations {
|
|
|
|
if localEcVolume, found := location.FindEcVolume(needle.VolumeId(req.VolumeId)); found {
|
|
|
|
|
|
|
|
_, size, _, err := localEcVolume.LocateEcShardNeedle(types.NeedleId(req.FileKey), needle.Version(req.Version))
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("locate in local ec volume: %v", err)
|
|
|
|
}
|
|
|
|
if size == types.TombstoneFileSize {
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
err = localEcVolume.DeleteNeedleFromEcx(types.NeedleId(req.FileKey))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|