seaweedfs/weed/replication/sink/filersink/fetch_write.go

133 lines
3.7 KiB
Go
Raw Normal View History

2018-09-23 15:40:36 +08:00
package filersink
2018-09-21 16:54:29 +08:00
import (
"context"
"fmt"
2018-09-21 16:56:43 +08:00
"strings"
"sync"
"google.golang.org/grpc"
2018-09-21 16:56:43 +08:00
"github.com/chrislusf/seaweedfs/weed/glog"
2018-09-21 16:54:29 +08:00
"github.com/chrislusf/seaweedfs/weed/operation"
2020-03-04 16:39:47 +08:00
"github.com/chrislusf/seaweedfs/weed/pb"
2018-09-21 16:54:29 +08:00
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
2019-02-16 01:59:22 +08:00
"github.com/chrislusf/seaweedfs/weed/security"
2018-09-21 16:54:29 +08:00
)
func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, dir string) (replicatedChunks []*filer_pb.FileChunk, err error) {
2018-09-21 16:54:29 +08:00
if len(sourceChunks) == 0 {
return
}
2020-03-11 14:37:14 +08:00
2020-03-18 01:01:55 +08:00
replicatedChunks = make([]*filer_pb.FileChunk, len(sourceChunks))
2020-03-11 14:37:14 +08:00
2018-09-21 16:54:29 +08:00
var wg sync.WaitGroup
2020-03-11 14:37:14 +08:00
for chunkIndex, sourceChunk := range sourceChunks {
2018-09-21 16:54:29 +08:00
wg.Add(1)
2020-03-11 14:37:14 +08:00
go func(chunk *filer_pb.FileChunk, index int) {
2018-09-21 16:54:29 +08:00
defer wg.Done()
replicatedChunk, e := fs.replicateOneChunk(chunk, dir)
2018-09-21 16:54:29 +08:00
if e != nil {
err = e
}
2020-03-11 14:37:14 +08:00
replicatedChunks[index] = replicatedChunk
}(sourceChunk, chunkIndex)
2018-09-21 16:54:29 +08:00
}
wg.Wait()
return
}
func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk, dir string) (*filer_pb.FileChunk, error) {
2018-09-21 16:54:29 +08:00
fileId, err := fs.fetchAndWrite(sourceChunk, dir)
2018-09-21 16:54:29 +08:00
if err != nil {
2019-06-23 11:04:56 +08:00
return nil, fmt.Errorf("copy %s: %v", sourceChunk.GetFileIdString(), err)
2018-09-21 16:54:29 +08:00
}
return &filer_pb.FileChunk{
FileId: fileId,
Offset: sourceChunk.Offset,
Size: sourceChunk.Size,
Mtime: sourceChunk.Mtime,
ETag: sourceChunk.ETag,
2019-06-23 11:04:56 +08:00
SourceFileId: sourceChunk.GetFileIdString(),
CipherKey: sourceChunk.CipherKey,
IsGzipped: sourceChunk.IsGzipped,
2018-09-21 16:54:29 +08:00
}, nil
}
func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, dir string) (fileId string, err error) {
2018-09-21 16:54:29 +08:00
filename, header, readCloser, err := fs.filerSource.ReadPart(sourceChunk.GetFileIdString())
2018-09-21 16:54:29 +08:00
if err != nil {
2019-06-23 11:04:56 +08:00
return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err)
2018-09-21 16:54:29 +08:00
}
defer readCloser.Close()
var host string
2019-02-16 01:59:22 +08:00
var auth security.EncodedJwt
2018-09-21 16:54:29 +08:00
if err := fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
2018-09-21 16:54:29 +08:00
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: fs.replication,
Collection: fs.collection,
TtlSec: fs.ttlSec,
DataCenter: fs.dataCenter,
ParentPath: dir,
2018-09-21 16:54:29 +08:00
}
resp, err := client.AssignVolume(context.Background(), request)
2018-09-21 16:54:29 +08:00
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
}
if resp.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
}
2018-09-21 16:54:29 +08:00
2019-02-16 01:59:22 +08:00
fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
2018-09-21 16:54:29 +08:00
return nil
}); err != nil {
return "", fmt.Errorf("filerGrpcAddress assign volume: %v", err)
}
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
2018-09-23 13:11:49 +08:00
glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header)
2018-09-21 16:54:29 +08:00
// fetch data as is, regardless whether it is encrypted or not
uploadResult, err := operation.Upload(fileUrl, filename, false, readCloser, "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth)
2018-09-21 16:54:29 +08:00
if err != nil {
glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err)
return "", fmt.Errorf("upload data: %v", err)
}
if uploadResult.Error != "" {
glog.V(0).Infof("upload failure %v to %s: %v", filename, fileUrl, err)
return "", fmt.Errorf("upload result: %v", uploadResult.Error)
}
return
}
func (fs *FilerSink) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
2018-09-21 16:54:29 +08:00
2020-03-04 16:39:47 +08:00
return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
2019-04-06 11:31:58 +08:00
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
return fn(client)
2019-04-06 11:31:58 +08:00
}, fs.grpcAddress, fs.grpcDialOption)
2018-09-21 16:54:29 +08:00
}
func volumeId(fileId string) string {
lastCommaIndex := strings.LastIndex(fileId, ",")
if lastCommaIndex > 0 {
return fileId[:lastCommaIndex]
}
return fileId
}