2018-05-08 16:59:43 +08:00
|
|
|
package weed_server
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2018-05-28 02:52:26 +08:00
|
|
|
"fmt"
|
2018-05-27 14:59:56 +08:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2018-11-23 16:24:51 +08:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2018-05-28 02:52:26 +08:00
|
|
|
"time"
|
2018-05-27 14:59:56 +08:00
|
|
|
|
2018-05-14 14:56:16 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/filer2"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2018-05-28 02:52:26 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
2018-11-23 16:24:51 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
2018-05-08 16:59:43 +08:00
|
|
|
)
|
|
|
|
|
2018-05-10 14:18:02 +08:00
|
|
|
func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) {
|
2018-05-08 16:59:43 +08:00
|
|
|
|
2019-03-16 06:55:34 +08:00
|
|
|
entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name))))
|
2020-01-25 10:07:34 +08:00
|
|
|
if err == filer2.ErrNotFound {
|
|
|
|
return &filer_pb.LookupDirectoryEntryResponse{}, nil
|
|
|
|
}
|
2018-05-08 16:59:43 +08:00
|
|
|
if err != nil {
|
2020-01-25 09:26:03 +08:00
|
|
|
glog.V(3).Infof("LookupDirectoryEntry %s: %+v, ", filepath.Join(req.Directory, req.Name), err)
|
2019-09-02 12:40:26 +08:00
|
|
|
return nil, err
|
2018-05-08 16:59:43 +08:00
|
|
|
}
|
|
|
|
|
2018-05-10 14:18:02 +08:00
|
|
|
return &filer_pb.LookupDirectoryEntryResponse{
|
|
|
|
Entry: &filer_pb.Entry{
|
2018-05-08 16:59:43 +08:00
|
|
|
Name: req.Name,
|
2018-05-14 14:56:16 +08:00
|
|
|
IsDirectory: entry.IsDirectory(),
|
2018-06-11 07:57:32 +08:00
|
|
|
Attributes: filer2.EntryAttributeToPb(entry),
|
|
|
|
Chunks: entry.Chunks,
|
2019-12-18 13:39:48 +08:00
|
|
|
Extended: entry.Extended,
|
2018-05-08 16:59:43 +08:00
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-12-13 16:22:37 +08:00
|
|
|
func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) error {
|
2018-05-08 16:59:43 +08:00
|
|
|
|
2018-07-22 16:14:36 +08:00
|
|
|
limit := int(req.Limit)
|
|
|
|
if limit == 0 {
|
|
|
|
limit = fs.option.DirListingLimit
|
2018-05-08 16:59:43 +08:00
|
|
|
}
|
|
|
|
|
2019-12-13 01:11:10 +08:00
|
|
|
paginationLimit := filer2.PaginationSize
|
2019-11-22 17:39:50 +08:00
|
|
|
if limit < paginationLimit {
|
|
|
|
paginationLimit = limit
|
|
|
|
}
|
|
|
|
|
2018-07-22 16:14:36 +08:00
|
|
|
lastFileName := req.StartFromFileName
|
|
|
|
includeLastFile := req.InclusiveStartFrom
|
|
|
|
for limit > 0 {
|
2019-12-13 16:22:37 +08:00
|
|
|
entries, err := fs.filer.ListDirectoryEntries(stream.Context(), filer2.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit)
|
2020-02-25 14:28:45 +08:00
|
|
|
|
2018-07-22 16:14:36 +08:00
|
|
|
if err != nil {
|
2019-12-13 16:22:37 +08:00
|
|
|
return err
|
2018-07-22 16:14:36 +08:00
|
|
|
}
|
|
|
|
if len(entries) == 0 {
|
2019-12-13 16:22:37 +08:00
|
|
|
return nil
|
2018-07-22 16:14:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
includeLastFile = false
|
|
|
|
|
|
|
|
for _, entry := range entries {
|
|
|
|
|
|
|
|
lastFileName = entry.Name()
|
|
|
|
|
|
|
|
if req.Prefix != "" {
|
|
|
|
if !strings.HasPrefix(entry.Name(), req.Prefix) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-13 16:22:37 +08:00
|
|
|
if err := stream.Send(&filer_pb.ListEntriesResponse{
|
|
|
|
Entry: &filer_pb.Entry{
|
|
|
|
Name: entry.Name(),
|
|
|
|
IsDirectory: entry.IsDirectory(),
|
|
|
|
Chunks: entry.Chunks,
|
|
|
|
Attributes: filer2.EntryAttributeToPb(entry),
|
2019-12-18 13:39:48 +08:00
|
|
|
Extended: entry.Extended,
|
2019-12-13 16:22:37 +08:00
|
|
|
},
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-02-25 14:28:45 +08:00
|
|
|
|
2018-07-22 16:14:36 +08:00
|
|
|
limit--
|
2019-11-22 17:39:50 +08:00
|
|
|
if limit == 0 {
|
2019-12-13 16:22:37 +08:00
|
|
|
return nil
|
2019-11-22 17:39:50 +08:00
|
|
|
}
|
2018-07-22 16:14:36 +08:00
|
|
|
}
|
2018-05-14 17:02:17 +08:00
|
|
|
|
2019-11-22 17:39:50 +08:00
|
|
|
if len(entries) < paginationLimit {
|
2018-12-17 15:20:08 +08:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2018-05-08 16:59:43 +08:00
|
|
|
}
|
|
|
|
|
2019-12-13 16:22:37 +08:00
|
|
|
return nil
|
2018-05-08 16:59:43 +08:00
|
|
|
}
|
|
|
|
|
2018-05-24 16:22:37 +08:00
|
|
|
func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVolumeRequest) (*filer_pb.LookupVolumeResponse, error) {
|
2018-05-08 16:59:43 +08:00
|
|
|
|
2018-05-24 16:22:37 +08:00
|
|
|
resp := &filer_pb.LookupVolumeResponse{
|
|
|
|
LocationsMap: make(map[string]*filer_pb.Locations),
|
2018-05-08 16:59:43 +08:00
|
|
|
}
|
|
|
|
|
2018-07-29 05:22:46 +08:00
|
|
|
for _, vidString := range req.VolumeIds {
|
|
|
|
vid, err := strconv.Atoi(vidString)
|
|
|
|
if err != nil {
|
2018-07-29 17:25:03 +08:00
|
|
|
glog.V(1).Infof("Unknown volume id %d", vid)
|
2018-07-29 05:22:46 +08:00
|
|
|
return nil, err
|
|
|
|
}
|
2018-05-24 16:22:37 +08:00
|
|
|
var locs []*filer_pb.Location
|
2019-07-28 18:58:13 +08:00
|
|
|
locations, found := fs.filer.MasterClient.GetLocations(uint32(vid))
|
|
|
|
if !found {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, loc := range locations {
|
2018-05-24 16:22:37 +08:00
|
|
|
locs = append(locs, &filer_pb.Location{
|
|
|
|
Url: loc.Url,
|
|
|
|
PublicUrl: loc.PublicUrl,
|
|
|
|
})
|
|
|
|
}
|
2018-07-29 05:22:46 +08:00
|
|
|
resp.LocationsMap[vidString] = &filer_pb.Locations{
|
2018-05-24 16:22:37 +08:00
|
|
|
Locations: locs,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
2018-05-08 16:59:43 +08:00
|
|
|
}
|
|
|
|
|
2018-05-16 15:08:44 +08:00
|
|
|
func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) {
|
2018-09-23 13:11:37 +08:00
|
|
|
|
2020-01-26 01:17:19 +08:00
|
|
|
resp = &filer_pb.CreateEntryResponse{}
|
|
|
|
|
2019-03-05 05:00:08 +08:00
|
|
|
fullpath := filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name)))
|
2018-09-23 13:11:37 +08:00
|
|
|
chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks)
|
|
|
|
|
2018-12-03 14:57:59 +08:00
|
|
|
if req.Entry.Attributes == nil {
|
2020-01-25 09:55:39 +08:00
|
|
|
glog.V(3).Infof("CreateEntry %s: nil attributes", filepath.Join(req.Directory, req.Entry.Name))
|
2020-01-26 01:17:19 +08:00
|
|
|
resp.Error = fmt.Sprintf("can not create entry with empty attributes")
|
|
|
|
return
|
2018-12-03 14:57:59 +08:00
|
|
|
}
|
|
|
|
|
2020-01-26 01:17:19 +08:00
|
|
|
createErr := fs.filer.CreateEntry(ctx, &filer2.Entry{
|
2018-09-23 13:11:37 +08:00
|
|
|
FullPath: fullpath,
|
2018-06-11 07:57:32 +08:00
|
|
|
Attr: filer2.PbToEntryAttribute(req.Entry.Attributes),
|
2018-09-23 13:11:37 +08:00
|
|
|
Chunks: chunks,
|
2020-01-23 03:42:40 +08:00
|
|
|
}, req.OExcl)
|
2018-05-16 15:08:44 +08:00
|
|
|
|
2020-01-26 01:17:19 +08:00
|
|
|
if createErr == nil {
|
2019-12-13 16:23:05 +08:00
|
|
|
fs.filer.DeleteChunks(garbages)
|
2020-01-25 09:55:39 +08:00
|
|
|
} else {
|
2020-01-26 01:17:19 +08:00
|
|
|
glog.V(3).Infof("CreateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), createErr)
|
|
|
|
resp.Error = createErr.Error()
|
2018-05-16 15:08:44 +08:00
|
|
|
}
|
|
|
|
|
2020-01-26 01:17:19 +08:00
|
|
|
return
|
2018-05-16 15:08:44 +08:00
|
|
|
}
|
|
|
|
|
2018-05-22 18:26:38 +08:00
|
|
|
func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) {
|
2018-05-21 15:00:28 +08:00
|
|
|
|
2019-03-05 05:00:08 +08:00
|
|
|
fullpath := filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))
|
2019-03-16 06:55:34 +08:00
|
|
|
entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(fullpath))
|
2018-05-21 15:00:28 +08:00
|
|
|
if err != nil {
|
2018-05-26 18:49:46 +08:00
|
|
|
return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("not found %s: %v", fullpath, err)
|
2018-05-21 15:00:28 +08:00
|
|
|
}
|
|
|
|
|
2018-05-22 18:26:38 +08:00
|
|
|
// remove old chunks if not included in the new ones
|
2019-06-23 11:04:56 +08:00
|
|
|
unusedChunks := filer2.MinusChunks(entry.Chunks, req.Entry.Chunks)
|
2018-05-21 15:00:28 +08:00
|
|
|
|
2018-05-22 18:26:38 +08:00
|
|
|
chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks)
|
2018-05-21 15:00:28 +08:00
|
|
|
|
2018-05-23 18:08:46 +08:00
|
|
|
newEntry := &filer2.Entry{
|
2019-03-05 05:00:08 +08:00
|
|
|
FullPath: filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))),
|
2018-05-23 18:08:46 +08:00
|
|
|
Attr: entry.Attr,
|
2019-12-18 13:39:48 +08:00
|
|
|
Extended: req.Entry.Extended,
|
2018-05-23 18:08:46 +08:00
|
|
|
Chunks: chunks,
|
|
|
|
}
|
2018-05-16 15:08:44 +08:00
|
|
|
|
2019-12-18 13:39:48 +08:00
|
|
|
glog.V(3).Infof("updating %s: %+v, chunks %d: %v => %+v, chunks %d: %v, extended: %v => %v",
|
2018-05-23 18:08:46 +08:00
|
|
|
fullpath, entry.Attr, len(entry.Chunks), entry.Chunks,
|
2019-12-18 13:39:48 +08:00
|
|
|
req.Entry.Attributes, len(req.Entry.Chunks), req.Entry.Chunks,
|
|
|
|
entry.Extended, req.Entry.Extended)
|
2018-05-23 18:08:46 +08:00
|
|
|
|
|
|
|
if req.Entry.Attributes != nil {
|
|
|
|
if req.Entry.Attributes.Mtime != 0 {
|
|
|
|
newEntry.Attr.Mtime = time.Unix(req.Entry.Attributes.Mtime, 0)
|
|
|
|
}
|
|
|
|
if req.Entry.Attributes.FileMode != 0 {
|
|
|
|
newEntry.Attr.Mode = os.FileMode(req.Entry.Attributes.FileMode)
|
|
|
|
}
|
|
|
|
newEntry.Attr.Uid = req.Entry.Attributes.Uid
|
|
|
|
newEntry.Attr.Gid = req.Entry.Attributes.Gid
|
2018-05-31 11:24:57 +08:00
|
|
|
newEntry.Attr.Mime = req.Entry.Attributes.Mime
|
2018-12-04 16:46:00 +08:00
|
|
|
newEntry.Attr.UserName = req.Entry.Attributes.UserName
|
|
|
|
newEntry.Attr.GroupNames = req.Entry.Attributes.GroupName
|
2018-05-23 18:08:46 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-08-13 16:20:49 +08:00
|
|
|
if filer2.EqualEntry(entry, newEntry) {
|
|
|
|
return &filer_pb.UpdateEntryResponse{}, err
|
|
|
|
}
|
|
|
|
|
2019-03-16 06:55:34 +08:00
|
|
|
if err = fs.filer.UpdateEntry(ctx, entry, newEntry); err == nil {
|
2019-12-13 16:23:05 +08:00
|
|
|
fs.filer.DeleteChunks(unusedChunks)
|
|
|
|
fs.filer.DeleteChunks(garbages)
|
2020-01-25 09:55:39 +08:00
|
|
|
} else {
|
|
|
|
glog.V(3).Infof("UpdateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), err)
|
2018-05-21 15:00:28 +08:00
|
|
|
}
|
|
|
|
|
2018-09-17 15:27:56 +08:00
|
|
|
fs.filer.NotifyUpdateEvent(entry, newEntry, true)
|
2018-08-13 16:20:49 +08:00
|
|
|
|
2018-05-22 18:26:38 +08:00
|
|
|
return &filer_pb.UpdateEntryResponse{}, err
|
2018-05-16 15:08:44 +08:00
|
|
|
}
|
|
|
|
|
2018-05-10 14:18:02 +08:00
|
|
|
func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) {
|
2019-09-12 11:26:20 +08:00
|
|
|
err = fs.filer.DeleteEntryMetaAndData(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name))), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData)
|
2020-02-26 06:38:36 +08:00
|
|
|
resp = &filer_pb.DeleteEntryResponse{}
|
|
|
|
if err != nil {
|
|
|
|
resp.Error = err.Error()
|
|
|
|
}
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fs *FilerServer) StreamDeleteEntries(stream filer_pb.SeaweedFiler_StreamDeleteEntriesServer) error {
|
|
|
|
for {
|
|
|
|
req, err := stream.Recv()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("receive delete entry request: %v", err)
|
|
|
|
}
|
|
|
|
fullpath := filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name)))
|
2020-02-26 14:23:59 +08:00
|
|
|
err = fs.filer.DeleteEntryMetaAndData(context.Background(), fullpath, req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData)
|
2020-02-26 06:38:36 +08:00
|
|
|
resp := &filer_pb.DeleteEntryResponse{}
|
|
|
|
if err != nil {
|
|
|
|
resp.Error = err.Error()
|
|
|
|
}
|
|
|
|
if err := stream.Send(resp); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
2018-05-08 16:59:43 +08:00
|
|
|
}
|
2018-05-16 15:08:44 +08:00
|
|
|
|
|
|
|
func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) {
|
|
|
|
|
2018-06-12 14:13:33 +08:00
|
|
|
ttlStr := ""
|
|
|
|
if req.TtlSec > 0 {
|
|
|
|
ttlStr = strconv.Itoa(int(req.TtlSec))
|
|
|
|
}
|
2020-02-25 14:28:45 +08:00
|
|
|
collection, replication := fs.detectCollection(req.ParentPath, req.Collection, req.Replication)
|
2018-06-12 14:13:33 +08:00
|
|
|
|
2018-07-10 15:20:50 +08:00
|
|
|
var altRequest *operation.VolumeAssignRequest
|
2018-07-10 14:18:20 +08:00
|
|
|
|
2018-07-15 04:36:28 +08:00
|
|
|
dataCenter := req.DataCenter
|
2018-07-15 05:09:45 +08:00
|
|
|
if dataCenter == "" {
|
2018-07-15 04:36:28 +08:00
|
|
|
dataCenter = fs.option.DataCenter
|
|
|
|
}
|
|
|
|
|
2018-07-10 14:18:20 +08:00
|
|
|
assignRequest := &operation.VolumeAssignRequest{
|
2018-05-16 15:08:44 +08:00
|
|
|
Count: uint64(req.Count),
|
2020-02-25 14:28:45 +08:00
|
|
|
Replication: replication,
|
|
|
|
Collection: collection,
|
2018-06-12 14:13:33 +08:00
|
|
|
Ttl: ttlStr,
|
2018-07-15 04:36:28 +08:00
|
|
|
DataCenter: dataCenter,
|
2018-07-10 14:18:20 +08:00
|
|
|
}
|
2018-07-15 04:36:28 +08:00
|
|
|
if dataCenter != "" {
|
2018-07-10 15:20:50 +08:00
|
|
|
altRequest = &operation.VolumeAssignRequest{
|
2018-07-10 14:18:20 +08:00
|
|
|
Count: uint64(req.Count),
|
2020-02-25 14:28:45 +08:00
|
|
|
Replication: replication,
|
|
|
|
Collection: collection,
|
2018-07-10 14:18:20 +08:00
|
|
|
Ttl: ttlStr,
|
|
|
|
DataCenter: "",
|
|
|
|
}
|
|
|
|
}
|
2019-02-19 04:11:52 +08:00
|
|
|
assignResult, err := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, assignRequest, altRequest)
|
2018-05-16 15:08:44 +08:00
|
|
|
if err != nil {
|
2020-01-25 10:07:34 +08:00
|
|
|
glog.V(3).Infof("AssignVolume: %v", err)
|
2020-02-26 09:15:09 +08:00
|
|
|
return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil
|
2018-05-16 15:08:44 +08:00
|
|
|
}
|
|
|
|
if assignResult.Error != "" {
|
2020-01-25 10:07:34 +08:00
|
|
|
glog.V(3).Infof("AssignVolume error: %v", assignResult.Error)
|
2020-02-26 09:15:09 +08:00
|
|
|
return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume result: %v", assignResult.Error)}, nil
|
2018-05-16 15:08:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return &filer_pb.AssignVolumeResponse{
|
2020-02-25 14:28:45 +08:00
|
|
|
FileId: assignResult.Fid,
|
|
|
|
Count: int32(assignResult.Count),
|
|
|
|
Url: assignResult.Url,
|
|
|
|
PublicUrl: assignResult.PublicUrl,
|
|
|
|
Auth: string(assignResult.Auth),
|
|
|
|
Collection: collection,
|
|
|
|
Replication: replication,
|
2020-02-26 09:15:09 +08:00
|
|
|
}, nil
|
2018-05-16 15:08:44 +08:00
|
|
|
}
|
2018-07-20 15:10:01 +08:00
|
|
|
|
|
|
|
func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) {
|
|
|
|
|
2020-02-26 13:50:12 +08:00
|
|
|
err = fs.filer.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
|
|
|
|
_, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{
|
2019-03-19 20:19:37 +08:00
|
|
|
Name: req.GetCollection(),
|
|
|
|
})
|
|
|
|
return err
|
|
|
|
})
|
2018-07-20 15:10:01 +08:00
|
|
|
|
|
|
|
return &filer_pb.DeleteCollectionResponse{}, err
|
|
|
|
}
|
2018-11-23 16:24:51 +08:00
|
|
|
|
|
|
|
func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsRequest) (resp *filer_pb.StatisticsResponse, err error) {
|
|
|
|
|
|
|
|
input := &master_pb.StatisticsRequest{
|
|
|
|
Replication: req.Replication,
|
|
|
|
Collection: req.Collection,
|
|
|
|
Ttl: req.Ttl,
|
|
|
|
}
|
|
|
|
|
2019-02-19 04:11:52 +08:00
|
|
|
output, err := operation.Statistics(fs.filer.GetMaster(), fs.grpcDialOption, input)
|
2018-11-23 16:24:51 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &filer_pb.StatisticsResponse{
|
|
|
|
TotalSize: output.TotalSize,
|
|
|
|
UsedSize: output.UsedSize,
|
|
|
|
FileCount: output.FileCount,
|
|
|
|
}, nil
|
|
|
|
}
|
2019-06-23 16:57:35 +08:00
|
|
|
|
|
|
|
func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.GetFilerConfigurationRequest) (resp *filer_pb.GetFilerConfigurationResponse, err error) {
|
|
|
|
|
|
|
|
return &filer_pb.GetFilerConfigurationResponse{
|
|
|
|
Masters: fs.option.Masters,
|
|
|
|
Collection: fs.option.Collection,
|
|
|
|
Replication: fs.option.DefaultReplication,
|
|
|
|
MaxMb: uint32(fs.option.MaxMB),
|
2020-02-27 16:07:13 +08:00
|
|
|
DirBuckets: fs.filer.DirBucketsPath,
|
|
|
|
DirQueues: fs.filer.DirQueuesPath,
|
2019-06-23 16:57:35 +08:00
|
|
|
}, nil
|
|
|
|
}
|