2018-09-23 15:40:36 +08:00
|
|
|
package filersink
|
2018-09-21 16:54:29 +08:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2019-04-06 11:31:58 +08:00
|
|
|
"google.golang.org/grpc"
|
2018-09-21 16:56:43 +08:00
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2018-09-21 16:54:29 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
2019-02-16 01:59:22 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/security"
|
2018-09-21 16:54:29 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
|
|
|
)
|
|
|
|
|
2019-03-16 08:20:24 +08:00
|
|
|
func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_pb.FileChunk) (replicatedChunks []*filer_pb.FileChunk, err error) {
|
2018-09-21 16:54:29 +08:00
|
|
|
if len(sourceChunks) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for _, sourceChunk := range sourceChunks {
|
|
|
|
wg.Add(1)
|
|
|
|
go func(chunk *filer_pb.FileChunk) {
|
|
|
|
defer wg.Done()
|
2019-03-16 08:20:24 +08:00
|
|
|
replicatedChunk, e := fs.replicateOneChunk(ctx, chunk)
|
2018-09-21 16:54:29 +08:00
|
|
|
if e != nil {
|
|
|
|
err = e
|
|
|
|
}
|
|
|
|
replicatedChunks = append(replicatedChunks, replicatedChunk)
|
|
|
|
}(sourceChunk)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-03-16 08:20:24 +08:00
|
|
|
func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_pb.FileChunk) (*filer_pb.FileChunk, error) {
|
2018-09-21 16:54:29 +08:00
|
|
|
|
2019-03-16 08:20:24 +08:00
|
|
|
fileId, err := fs.fetchAndWrite(ctx, sourceChunk)
|
2018-09-21 16:54:29 +08:00
|
|
|
if err != nil {
|
2019-06-23 11:04:56 +08:00
|
|
|
return nil, fmt.Errorf("copy %s: %v", sourceChunk.GetFileIdString(), err)
|
2018-09-21 16:54:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return &filer_pb.FileChunk{
|
|
|
|
FileId: fileId,
|
|
|
|
Offset: sourceChunk.Offset,
|
|
|
|
Size: sourceChunk.Size,
|
|
|
|
Mtime: sourceChunk.Mtime,
|
|
|
|
ETag: sourceChunk.ETag,
|
2019-06-23 11:04:56 +08:00
|
|
|
SourceFileId: sourceChunk.GetFileIdString(),
|
2018-09-21 16:54:29 +08:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-03-16 08:20:24 +08:00
|
|
|
func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.FileChunk) (fileId string, err error) {
|
2018-09-21 16:54:29 +08:00
|
|
|
|
2019-06-23 11:04:56 +08:00
|
|
|
filename, header, readCloser, err := fs.filerSource.ReadPart(ctx, sourceChunk.GetFileIdString())
|
2018-09-21 16:54:29 +08:00
|
|
|
if err != nil {
|
2019-06-23 11:04:56 +08:00
|
|
|
return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err)
|
2018-09-21 16:54:29 +08:00
|
|
|
}
|
|
|
|
defer readCloser.Close()
|
|
|
|
|
|
|
|
var host string
|
2019-02-16 01:59:22 +08:00
|
|
|
var auth security.EncodedJwt
|
2018-09-21 16:54:29 +08:00
|
|
|
|
2020-01-27 06:42:11 +08:00
|
|
|
if err := fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
|
2018-09-21 16:54:29 +08:00
|
|
|
|
|
|
|
request := &filer_pb.AssignVolumeRequest{
|
|
|
|
Count: 1,
|
|
|
|
Replication: fs.replication,
|
|
|
|
Collection: fs.collection,
|
|
|
|
TtlSec: fs.ttlSec,
|
|
|
|
DataCenter: fs.dataCenter,
|
|
|
|
}
|
|
|
|
|
2019-03-16 08:20:24 +08:00
|
|
|
resp, err := client.AssignVolume(ctx, request)
|
2018-09-21 16:54:29 +08:00
|
|
|
if err != nil {
|
|
|
|
glog.V(0).Infof("assign volume failure %v: %v", request, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-02-16 01:59:22 +08:00
|
|
|
fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
|
2018-09-21 16:54:29 +08:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return "", fmt.Errorf("filerGrpcAddress assign volume: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
|
|
|
|
|
2018-09-23 13:11:49 +08:00
|
|
|
glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header)
|
2018-09-21 16:54:29 +08:00
|
|
|
|
|
|
|
uploadResult, err := operation.Upload(fileUrl, filename, readCloser,
|
2019-02-16 01:59:22 +08:00
|
|
|
"gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth)
|
2018-09-21 16:54:29 +08:00
|
|
|
if err != nil {
|
|
|
|
glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err)
|
|
|
|
return "", fmt.Errorf("upload data: %v", err)
|
|
|
|
}
|
|
|
|
if uploadResult.Error != "" {
|
|
|
|
glog.V(0).Infof("upload failure %v to %s: %v", filename, fileUrl, err)
|
|
|
|
return "", fmt.Errorf("upload result: %v", uploadResult.Error)
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-01-27 06:42:11 +08:00
|
|
|
func (fs *FilerSink) withFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error {
|
2018-09-21 16:54:29 +08:00
|
|
|
|
2020-01-27 06:42:11 +08:00
|
|
|
return util.WithCachedGrpcClient(ctx, func(ctx context.Context, grpcConnection *grpc.ClientConn) error {
|
2019-04-06 11:31:58 +08:00
|
|
|
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
|
2020-01-27 06:42:11 +08:00
|
|
|
return fn(ctx, client)
|
2019-04-06 11:31:58 +08:00
|
|
|
}, fs.grpcAddress, fs.grpcDialOption)
|
2018-09-21 16:54:29 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func volumeId(fileId string) string {
|
|
|
|
lastCommaIndex := strings.LastIndex(fileId, ",")
|
|
|
|
if lastCommaIndex > 0 {
|
|
|
|
return fileId[:lastCommaIndex]
|
|
|
|
}
|
|
|
|
return fileId
|
|
|
|
}
|