seaweedfs/weed/server/filer_grpc_server.go

525 lines
16 KiB
Go
Raw Normal View History

2018-05-08 16:59:43 +08:00
package weed_server
import (
"context"
2018-05-28 02:52:26 +08:00
"fmt"
2018-05-27 14:59:56 +08:00
"os"
"path/filepath"
"strconv"
2018-05-28 02:52:26 +08:00
"time"
2018-05-27 14:59:56 +08:00
2020-09-01 15:21:19 +08:00
"github.com/chrislusf/seaweedfs/weed/filer"
2018-05-14 14:56:16 +08:00
"github.com/chrislusf/seaweedfs/weed/glog"
2018-05-28 02:52:26 +08:00
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
2020-03-23 15:01:34 +08:00
"github.com/chrislusf/seaweedfs/weed/util"
2018-05-08 16:59:43 +08:00
)
2018-05-10 14:18:02 +08:00
func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) {
2018-05-08 16:59:43 +08:00
glog.V(4).Infof("LookupDirectoryEntry %s", filepath.Join(req.Directory, req.Name))
2020-04-06 04:11:43 +08:00
entry, err := fs.filer.FindEntry(ctx, util.JoinPath(req.Directory, req.Name))
2020-03-08 09:01:39 +08:00
if err == filer_pb.ErrNotFound {
return &filer_pb.LookupDirectoryEntryResponse{}, err
2020-01-25 10:07:34 +08:00
}
2018-05-08 16:59:43 +08:00
if err != nil {
2020-01-25 09:26:03 +08:00
glog.V(3).Infof("LookupDirectoryEntry %s: %+v, ", filepath.Join(req.Directory, req.Name), err)
return nil, err
2018-05-08 16:59:43 +08:00
}
2018-05-10 14:18:02 +08:00
return &filer_pb.LookupDirectoryEntryResponse{
Entry: &filer_pb.Entry{
2020-10-08 14:30:54 +08:00
Name: req.Name,
IsDirectory: entry.IsDirectory(),
Attributes: filer.EntryAttributeToPb(entry),
Chunks: entry.Chunks,
Extended: entry.Extended,
HardLinkId: entry.HardLinkId,
2020-09-24 18:06:44 +08:00
HardLinkCounter: entry.HardLinkCounter,
2020-11-30 20:34:04 +08:00
Content: entry.Content,
2018-05-08 16:59:43 +08:00
},
}, nil
}
2021-01-16 15:56:24 +08:00
func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) (err error) {
2018-05-08 16:59:43 +08:00
glog.V(4).Infof("ListEntries %v", req)
2018-07-22 16:14:36 +08:00
limit := int(req.Limit)
if limit == 0 {
limit = fs.option.DirListingLimit
2018-05-08 16:59:43 +08:00
}
2020-09-01 15:21:19 +08:00
paginationLimit := filer.PaginationSize
if limit < paginationLimit {
paginationLimit = limit
}
2018-07-22 16:14:36 +08:00
lastFileName := req.StartFromFileName
includeLastFile := req.InclusiveStartFrom
2021-01-16 15:56:24 +08:00
var listErr error
2018-07-22 16:14:36 +08:00
for limit > 0 {
2021-01-16 15:56:24 +08:00
var hasEntries bool
lastFileName, listErr = fs.filer.StreamListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, int64(paginationLimit), req.Prefix, "", "", func(entry *filer.Entry) bool {
2021-01-16 15:56:24 +08:00
hasEntries = true
if err = stream.Send(&filer_pb.ListEntriesResponse{
2019-12-13 16:22:37 +08:00
Entry: &filer_pb.Entry{
2020-10-08 14:30:54 +08:00
Name: entry.Name(),
IsDirectory: entry.IsDirectory(),
Chunks: entry.Chunks,
Attributes: filer.EntryAttributeToPb(entry),
Extended: entry.Extended,
HardLinkId: entry.HardLinkId,
2020-09-24 18:06:44 +08:00
HardLinkCounter: entry.HardLinkCounter,
2020-11-30 20:34:04 +08:00
Content: entry.Content,
2019-12-13 16:22:37 +08:00
},
}); err != nil {
2021-01-16 15:56:24 +08:00
return false
2019-12-13 16:22:37 +08:00
}
2018-07-22 16:14:36 +08:00
limit--
if limit == 0 {
2021-01-16 15:56:24 +08:00
return false
}
2021-01-16 15:56:24 +08:00
return true
})
2021-01-16 15:56:24 +08:00
if listErr != nil {
return listErr
}
if err != nil {
return err
}
2021-01-16 15:56:24 +08:00
if !hasEntries {
return nil
}
2021-01-16 15:56:24 +08:00
includeLastFile = false
2018-05-08 16:59:43 +08:00
}
2019-12-13 16:22:37 +08:00
return nil
2018-05-08 16:59:43 +08:00
}
func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVolumeRequest) (*filer_pb.LookupVolumeResponse, error) {
2018-05-08 16:59:43 +08:00
resp := &filer_pb.LookupVolumeResponse{
LocationsMap: make(map[string]*filer_pb.Locations),
2018-05-08 16:59:43 +08:00
}
for _, vidString := range req.VolumeIds {
vid, err := strconv.Atoi(vidString)
if err != nil {
2018-07-29 17:25:03 +08:00
glog.V(1).Infof("Unknown volume id %d", vid)
return nil, err
}
var locs []*filer_pb.Location
locations, found := fs.filer.MasterClient.GetLocations(uint32(vid))
if !found {
continue
}
for _, loc := range locations {
locs = append(locs, &filer_pb.Location{
Url: loc.Url,
PublicUrl: loc.PublicUrl,
})
}
resp.LocationsMap[vidString] = &filer_pb.Locations{
Locations: locs,
}
}
return resp, nil
2018-05-08 16:59:43 +08:00
}
func (fs *FilerServer) lookupFileId(fileId string) (targetUrls []string, err error) {
fid, err := needle.ParseFileIdFromString(fileId)
if err != nil {
return nil, err
}
locations, found := fs.filer.MasterClient.GetLocations(uint32(fid.VolumeId))
if !found || len(locations) == 0 {
return nil, fmt.Errorf("not found volume %d in %s", fid.VolumeId, fileId)
}
for _, loc := range locations {
targetUrls = append(targetUrls, fmt.Sprintf("http://%s/%s", loc.Url, fileId))
}
return
}
func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) {
2018-09-23 13:11:37 +08:00
2020-10-25 00:42:54 +08:00
glog.V(4).Infof("CreateEntry %v/%v", req.Directory, req.Entry.Name)
resp = &filer_pb.CreateEntryResponse{}
2020-11-16 08:58:48 +08:00
chunks, garbage, err2 := fs.cleanupChunks(util.Join(req.Directory, req.Entry.Name), nil, req.Entry)
if err2 != nil {
return &filer_pb.CreateEntryResponse{}, fmt.Errorf("CreateEntry cleanupChunks %s %s: %v", req.Directory, req.Entry.Name, err2)
}
2018-09-23 13:11:37 +08:00
2020-09-01 15:21:19 +08:00
createErr := fs.filer.CreateEntry(ctx, &filer.Entry{
2020-10-08 14:30:54 +08:00
FullPath: util.JoinPath(req.Directory, req.Entry.Name),
Attr: filer.PbToEntryAttribute(req.Entry.Attributes),
Chunks: chunks,
Extended: req.Entry.Extended,
HardLinkId: filer.HardLinkId(req.Entry.HardLinkId),
2020-09-24 18:06:44 +08:00
HardLinkCounter: req.Entry.HardLinkCounter,
Content: req.Entry.Content,
}, req.OExcl, req.IsFromOtherCluster, req.Signatures)
if createErr == nil {
fs.filer.DeleteChunks(garbage)
2020-01-25 09:55:39 +08:00
} else {
glog.V(3).Infof("CreateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), createErr)
resp.Error = createErr.Error()
}
return
}
func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) {
glog.V(4).Infof("UpdateEntry %v", req)
2020-04-06 04:11:43 +08:00
fullpath := util.Join(req.Directory, req.Entry.Name)
2020-03-23 15:01:34 +08:00
entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullpath))
if err != nil {
return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("not found %s: %v", fullpath, err)
}
2020-11-16 08:58:48 +08:00
chunks, garbage, err2 := fs.cleanupChunks(fullpath, entry, req.Entry)
if err2 != nil {
return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("UpdateEntry cleanupChunks %s: %v", fullpath, err2)
}
2020-09-01 15:21:19 +08:00
newEntry := &filer.Entry{
2020-10-08 14:30:54 +08:00
FullPath: util.JoinPath(req.Directory, req.Entry.Name),
Attr: entry.Attr,
Extended: req.Entry.Extended,
Chunks: chunks,
HardLinkId: filer.HardLinkId(req.Entry.HardLinkId),
2020-09-24 18:06:44 +08:00
HardLinkCounter: req.Entry.HardLinkCounter,
Content: req.Entry.Content,
2018-05-23 18:08:46 +08:00
}
2019-12-18 13:39:48 +08:00
glog.V(3).Infof("updating %s: %+v, chunks %d: %v => %+v, chunks %d: %v, extended: %v => %v",
2018-05-23 18:08:46 +08:00
fullpath, entry.Attr, len(entry.Chunks), entry.Chunks,
2019-12-18 13:39:48 +08:00
req.Entry.Attributes, len(req.Entry.Chunks), req.Entry.Chunks,
entry.Extended, req.Entry.Extended)
2018-05-23 18:08:46 +08:00
if req.Entry.Attributes != nil {
if req.Entry.Attributes.Mtime != 0 {
newEntry.Attr.Mtime = time.Unix(req.Entry.Attributes.Mtime, 0)
}
if req.Entry.Attributes.FileMode != 0 {
newEntry.Attr.Mode = os.FileMode(req.Entry.Attributes.FileMode)
}
newEntry.Attr.Uid = req.Entry.Attributes.Uid
newEntry.Attr.Gid = req.Entry.Attributes.Gid
newEntry.Attr.Mime = req.Entry.Attributes.Mime
newEntry.Attr.UserName = req.Entry.Attributes.UserName
newEntry.Attr.GroupNames = req.Entry.Attributes.GroupName
2018-05-23 18:08:46 +08:00
}
2020-09-01 15:21:19 +08:00
if filer.EqualEntry(entry, newEntry) {
2018-08-13 16:20:49 +08:00
return &filer_pb.UpdateEntryResponse{}, err
}
2019-03-16 06:55:34 +08:00
if err = fs.filer.UpdateEntry(ctx, entry, newEntry); err == nil {
fs.filer.DeleteChunks(garbage)
2020-09-24 18:06:44 +08:00
fs.filer.NotifyUpdateEvent(ctx, entry, newEntry, true, req.IsFromOtherCluster, req.Signatures)
2020-01-25 09:55:39 +08:00
} else {
glog.V(3).Infof("UpdateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), err)
}
return &filer_pb.UpdateEntryResponse{}, err
}
2020-11-16 08:58:48 +08:00
func (fs *FilerServer) cleanupChunks(fullpath string, existingEntry *filer.Entry, newEntry *filer_pb.Entry) (chunks, garbage []*filer_pb.FileChunk, err error) {
// remove old chunks if not included in the new ones
if existingEntry != nil {
2020-09-01 15:21:19 +08:00
garbage, err = filer.MinusChunks(fs.lookupFileId, existingEntry.Chunks, newEntry.Chunks)
if err != nil {
return newEntry.Chunks, nil, fmt.Errorf("MinusChunks: %v", err)
}
}
// files with manifest chunks are usually large and append only, skip calculating covered chunks
2020-09-01 15:21:19 +08:00
manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(newEntry.Chunks)
2020-09-01 15:21:19 +08:00
chunks, coveredChunks := filer.CompactFileChunks(fs.lookupFileId, nonManifestChunks)
garbage = append(garbage, coveredChunks...)
2020-09-24 18:06:44 +08:00
if newEntry.Attributes != nil {
so, _ := fs.detectStorageOption(fullpath,
2020-11-16 08:58:48 +08:00
newEntry.Attributes.Collection,
newEntry.Attributes.Replication,
newEntry.Attributes.TtlSec,
2020-12-14 03:59:32 +08:00
newEntry.Attributes.DiskType,
2020-11-16 08:58:48 +08:00
"",
"",
) // ignore readonly error for capacity needed to manifestize
2020-11-16 06:41:56 +08:00
chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), chunks)
2020-09-24 18:06:44 +08:00
if err != nil {
// not good, but should be ok
glog.V(0).Infof("MaybeManifestize: %v", err)
}
}
chunks = append(chunks, manifestChunks...)
return
}
2020-04-17 17:28:09 +08:00
func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendToEntryRequest) (*filer_pb.AppendToEntryResponse, error) {
glog.V(4).Infof("AppendToEntry %v", req)
2020-04-17 17:28:09 +08:00
fullpath := util.NewFullPath(req.Directory, req.EntryName)
var offset int64 = 0
2020-11-16 08:58:48 +08:00
entry, err := fs.filer.FindEntry(ctx, fullpath)
2020-04-17 17:28:09 +08:00
if err == filer_pb.ErrNotFound {
2020-09-01 15:21:19 +08:00
entry = &filer.Entry{
2020-04-17 17:28:09 +08:00
FullPath: fullpath,
2020-09-01 15:21:19 +08:00
Attr: filer.Attr{
2020-04-17 17:28:09 +08:00
Crtime: time.Now(),
Mtime: time.Now(),
Mode: os.FileMode(0644),
Uid: OS_UID,
Gid: OS_GID,
},
}
} else {
2020-09-01 15:21:19 +08:00
offset = int64(filer.TotalSize(entry.Chunks))
2020-04-17 17:28:09 +08:00
}
for _, chunk := range req.Chunks {
chunk.Offset = offset
offset += int64(chunk.Size)
}
entry.Chunks = append(entry.Chunks, req.Chunks...)
so, err := fs.detectStorageOption(string(fullpath), entry.Collection, entry.Replication, entry.TtlSec, entry.DiskType, "", "")
if err != nil {
glog.Warningf("detectStorageOption: %v", err)
return &filer_pb.AppendToEntryResponse{}, err
}
2020-11-16 06:41:56 +08:00
entry.Chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), entry.Chunks)
if err != nil {
// not good, but should be ok
glog.V(0).Infof("MaybeManifestize: %v", err)
}
err = fs.filer.CreateEntry(context.Background(), entry, false, false, nil)
2020-04-17 17:28:09 +08:00
return &filer_pb.AppendToEntryResponse{}, err
}
2018-05-10 14:18:02 +08:00
func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) {
glog.V(4).Infof("DeleteEntry %v", req)
2020-09-10 02:21:23 +08:00
err = fs.filer.DeleteEntryMetaAndData(ctx, util.JoinPath(req.Directory, req.Name), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData, req.IsFromOtherCluster, req.Signatures)
2020-02-26 06:38:36 +08:00
resp = &filer_pb.DeleteEntryResponse{}
if err != nil && err != filer_pb.ErrNotFound {
2020-02-26 06:38:36 +08:00
resp.Error = err.Error()
}
return resp, nil
}
func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) {
so, err := fs.detectStorageOption(req.Path, req.Collection, req.Replication, req.TtlSec, req.DiskType, req.DataCenter, req.Rack)
if err != nil {
glog.V(3).Infof("AssignVolume: %v", err)
return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil
}
2020-11-16 08:58:48 +08:00
assignRequest, altRequest := so.ToAssignRequests(int(req.Count))
assignResult, err := operation.Assign(fs.filer.GetMaster, fs.grpcDialOption, assignRequest, altRequest)
if err != nil {
2020-01-25 10:07:34 +08:00
glog.V(3).Infof("AssignVolume: %v", err)
return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil
}
if assignResult.Error != "" {
2020-01-25 10:07:34 +08:00
glog.V(3).Infof("AssignVolume error: %v", assignResult.Error)
return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume result: %v", assignResult.Error)}, nil
}
return &filer_pb.AssignVolumeResponse{
FileId: assignResult.Fid,
Count: int32(assignResult.Count),
Url: assignResult.Url,
PublicUrl: assignResult.PublicUrl,
Auth: string(assignResult.Auth),
2020-11-16 08:58:48 +08:00
Collection: so.Collection,
Replication: so.Replication,
}, nil
}
2020-10-16 01:52:17 +08:00
func (fs *FilerServer) CollectionList(ctx context.Context, req *filer_pb.CollectionListRequest) (resp *filer_pb.CollectionListResponse, err error) {
glog.V(4).Infof("CollectionList %v", req)
resp = &filer_pb.CollectionListResponse{}
err = fs.filer.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
masterResp, err := client.CollectionList(context.Background(), &master_pb.CollectionListRequest{
IncludeNormalVolumes: req.IncludeNormalVolumes,
IncludeEcVolumes: req.IncludeEcVolumes,
})
if err != nil {
return err
}
for _, c := range masterResp.Collections {
resp.Collections = append(resp.Collections, &filer_pb.Collection{Name: c.Name})
}
return nil
})
return
}
func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) {
glog.V(4).Infof("DeleteCollection %v", req)
err = fs.filer.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
_, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{
2019-03-19 20:19:37 +08:00
Name: req.GetCollection(),
})
return err
})
return &filer_pb.DeleteCollectionResponse{}, err
}
func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsRequest) (resp *filer_pb.StatisticsResponse, err error) {
2020-03-27 13:46:49 +08:00
var output *master_pb.StatisticsResponse
err = fs.filer.MasterClient.WithClient(func(masterClient master_pb.SeaweedClient) error {
grpcResponse, grpcErr := masterClient.Statistics(context.Background(), &master_pb.StatisticsRequest{
Replication: req.Replication,
Collection: req.Collection,
Ttl: req.Ttl,
2020-12-17 01:14:05 +08:00
DiskType: req.DiskType,
2020-03-27 13:46:49 +08:00
})
if grpcErr != nil {
return grpcErr
}
output = grpcResponse
return nil
})
if err != nil {
return nil, err
}
return &filer_pb.StatisticsResponse{
TotalSize: output.TotalSize,
UsedSize: output.UsedSize,
FileCount: output.FileCount,
}, nil
}
func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.GetFilerConfigurationRequest) (resp *filer_pb.GetFilerConfigurationResponse, err error) {
t := &filer_pb.GetFilerConfigurationResponse{
2020-09-17 21:46:51 +08:00
Masters: fs.option.Masters,
Collection: fs.option.Collection,
Replication: fs.option.DefaultReplication,
MaxMb: uint32(fs.option.MaxMB),
DirBuckets: fs.filer.DirBucketsPath,
Cipher: fs.filer.Cipher,
Signature: fs.filer.Signature,
MetricsAddress: fs.metricsAddress,
MetricsIntervalSec: int32(fs.metricsIntervalSec),
}
glog.V(4).Infof("GetFilerConfiguration: %v", t)
return t, nil
}
2020-05-05 17:05:28 +08:00
func (fs *FilerServer) KeepConnected(stream filer_pb.SeaweedFiler_KeepConnectedServer) error {
req, err := stream.Recv()
if err != nil {
return err
}
clientName := fmt.Sprintf("%s:%d", req.Name, req.GrpcPort)
2020-05-08 17:47:22 +08:00
m := make(map[string]bool)
for _, tp := range req.Resources {
m[tp] = true
}
2020-05-05 17:05:28 +08:00
fs.brokersLock.Lock()
2020-05-08 17:47:22 +08:00
fs.brokers[clientName] = m
2020-05-05 17:05:28 +08:00
glog.V(0).Infof("+ broker %v", clientName)
fs.brokersLock.Unlock()
defer func() {
fs.brokersLock.Lock()
delete(fs.brokers, clientName)
glog.V(0).Infof("- broker %v: %v", clientName, err)
fs.brokersLock.Unlock()
}()
for {
if err := stream.Send(&filer_pb.KeepConnectedResponse{}); err != nil {
glog.V(0).Infof("send broker %v: %+v", clientName, err)
return err
}
// println("replied")
if _, err := stream.Recv(); err != nil {
glog.V(0).Infof("recv broker %v: %v", clientName, err)
return err
}
// println("received")
}
}
2020-05-08 17:47:22 +08:00
func (fs *FilerServer) LocateBroker(ctx context.Context, req *filer_pb.LocateBrokerRequest) (resp *filer_pb.LocateBrokerResponse, err error) {
resp = &filer_pb.LocateBrokerResponse{}
fs.brokersLock.Lock()
defer fs.brokersLock.Unlock()
var localBrokers []*filer_pb.LocateBrokerResponse_Resource
for b, m := range fs.brokers {
if _, found := m[req.Resource]; found {
resp.Found = true
resp.Resources = []*filer_pb.LocateBrokerResponse_Resource{
{
GrpcAddresses: b,
ResourceCount: int32(len(m)),
},
}
return
}
localBrokers = append(localBrokers, &filer_pb.LocateBrokerResponse_Resource{
GrpcAddresses: b,
ResourceCount: int32(len(m)),
})
}
resp.Resources = localBrokers
return resp, nil
}