2020-09-01 15:21:19 +08:00
|
|
|
package filer
|
2018-05-11 17:20:15 +08:00
|
|
|
|
|
|
|
import (
|
2018-07-29 12:02:56 +08:00
|
|
|
"context"
|
2018-05-11 17:20:15 +08:00
|
|
|
"fmt"
|
2021-11-08 16:09:11 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/cluster"
|
2021-09-13 13:47:52 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
2021-11-07 05:23:35 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
2018-05-28 02:52:26 +08:00
|
|
|
"os"
|
|
|
|
"strings"
|
2018-05-13 04:45:29 +08:00
|
|
|
"time"
|
2018-07-29 12:02:56 +08:00
|
|
|
|
2019-12-13 01:11:10 +08:00
|
|
|
"google.golang.org/grpc"
|
|
|
|
|
2018-07-29 12:02:56 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2020-03-08 09:01:39 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
2020-02-21 07:44:17 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2020-04-12 03:43:17 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util/log_buffer"
|
2018-07-28 17:10:32 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
2018-05-11 17:20:15 +08:00
|
|
|
)
|
|
|
|
|
2020-08-30 08:37:19 +08:00
|
|
|
const (
|
|
|
|
LogFlushInterval = time.Minute
|
2020-12-13 05:26:10 +08:00
|
|
|
PaginationSize = 1024
|
2020-09-06 05:08:59 +08:00
|
|
|
FilerStoreId = "filer.store.id"
|
2020-08-30 08:37:19 +08:00
|
|
|
)
|
2019-12-13 01:11:10 +08:00
|
|
|
|
2018-12-03 11:42:50 +08:00
|
|
|
var (
|
2020-09-02 12:59:26 +08:00
|
|
|
OS_UID = uint32(os.Getuid())
|
|
|
|
OS_GID = uint32(os.Getgid())
|
2018-12-03 11:42:50 +08:00
|
|
|
)
|
|
|
|
|
2018-05-11 17:20:15 +08:00
|
|
|
type Filer struct {
|
2020-09-24 18:06:44 +08:00
|
|
|
Store VirtualFilerStore
|
2020-02-21 07:44:17 +08:00
|
|
|
MasterClient *wdclient.MasterClient
|
|
|
|
fileIdDeletionQueue *util.UnboundedQueue
|
|
|
|
GrpcDialOption grpc.DialOption
|
2020-02-25 14:28:45 +08:00
|
|
|
DirBucketsPath string
|
2020-04-12 14:37:10 +08:00
|
|
|
FsyncBuckets []string
|
2020-02-25 14:28:45 +08:00
|
|
|
buckets *FilerBuckets
|
2020-03-06 16:49:47 +08:00
|
|
|
Cipher bool
|
2020-07-06 06:52:36 +08:00
|
|
|
LocalMetaLogBuffer *log_buffer.LogBuffer
|
2020-04-13 05:03:07 +08:00
|
|
|
metaLogCollection string
|
|
|
|
metaLogReplication string
|
2020-07-13 08:31:24 +08:00
|
|
|
MetaAggregator *MetaAggregator
|
2020-08-29 14:48:48 +08:00
|
|
|
Signature int32
|
2020-11-16 06:06:03 +08:00
|
|
|
FilerConf *FilerConf
|
2021-07-29 13:43:12 +08:00
|
|
|
RemoteStorage *FilerRemoteStorage
|
2021-09-25 16:18:44 +08:00
|
|
|
UniqueFileId uint32
|
2018-05-11 17:20:15 +08:00
|
|
|
}
|
|
|
|
|
2022-05-02 12:59:16 +08:00
|
|
|
func NewFiler(masters map[string]pb.ServerAddress, grpcDialOption grpc.DialOption, filerHost pb.ServerAddress,
|
|
|
|
filerGroup string, collection string, replication string, dataCenter string, notifyFn func()) *Filer {
|
2018-11-21 12:56:28 +08:00
|
|
|
f := &Filer{
|
2022-05-02 12:59:16 +08:00
|
|
|
MasterClient: wdclient.NewMasterClient(grpcDialOption, filerGroup, cluster.FilerType, filerHost, dataCenter, masters),
|
2020-02-21 07:44:17 +08:00
|
|
|
fileIdDeletionQueue: util.NewUnboundedQueue(),
|
|
|
|
GrpcDialOption: grpcDialOption,
|
2020-11-16 06:06:03 +08:00
|
|
|
FilerConf: NewFilerConf(),
|
2021-07-29 13:43:12 +08:00
|
|
|
RemoteStorage: NewFilerRemoteStorage(),
|
2021-09-25 16:18:44 +08:00
|
|
|
UniqueFileId: uint32(util.RandomInt32()),
|
2018-05-11 17:20:15 +08:00
|
|
|
}
|
2021-06-27 20:51:28 +08:00
|
|
|
f.LocalMetaLogBuffer = log_buffer.NewLogBuffer("local", LogFlushInterval, f.logFlushFunc, notifyFn)
|
2020-04-13 05:03:07 +08:00
|
|
|
f.metaLogCollection = collection
|
|
|
|
f.metaLogReplication = replication
|
2020-11-12 07:10:06 +08:00
|
|
|
|
2018-11-21 12:56:28 +08:00
|
|
|
go f.loopProcessingDeletion()
|
|
|
|
|
|
|
|
return f
|
2018-05-11 17:20:15 +08:00
|
|
|
}
|
|
|
|
|
2021-11-07 05:23:35 +08:00
|
|
|
func (f *Filer) AggregateFromPeers(self pb.ServerAddress) {
|
2020-07-13 08:31:24 +08:00
|
|
|
|
2021-11-07 05:23:35 +08:00
|
|
|
f.MetaAggregator = NewMetaAggregator(f, self, f.GrpcDialOption)
|
|
|
|
f.MasterClient.OnPeerUpdate = f.MetaAggregator.OnPeerUpdate
|
|
|
|
|
|
|
|
for _, peerUpdate := range f.ListExistingPeerUpdates() {
|
|
|
|
f.MetaAggregator.OnPeerUpdate(peerUpdate)
|
2020-07-13 08:31:24 +08:00
|
|
|
}
|
2020-09-06 15:11:46 +08:00
|
|
|
|
2021-11-07 05:23:35 +08:00
|
|
|
}
|
2020-07-13 08:31:24 +08:00
|
|
|
|
2021-12-26 16:15:03 +08:00
|
|
|
func (f *Filer) ListExistingPeerUpdates() (existingNodes []*master_pb.ClusterNodeUpdate) {
|
2021-11-07 05:23:35 +08:00
|
|
|
|
2021-12-26 16:15:03 +08:00
|
|
|
if grpcErr := pb.WithMasterClient(false, f.MasterClient.GetMaster(), f.GrpcDialOption, func(client master_pb.SeaweedClient) error {
|
2021-11-07 05:23:35 +08:00
|
|
|
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
|
2021-11-08 16:09:11 +08:00
|
|
|
ClientType: cluster.FilerType,
|
2022-05-02 12:59:16 +08:00
|
|
|
FilerGroup: f.MasterClient.FilerGroup,
|
2021-11-07 05:23:35 +08:00
|
|
|
})
|
|
|
|
|
|
|
|
glog.V(0).Infof("the cluster has %d filers\n", len(resp.ClusterNodes))
|
|
|
|
for _, node := range resp.ClusterNodes {
|
|
|
|
existingNodes = append(existingNodes, &master_pb.ClusterNodeUpdate{
|
2021-11-08 16:09:11 +08:00
|
|
|
NodeType: cluster.FilerType,
|
2021-11-07 05:23:35 +08:00
|
|
|
Address: node.Address,
|
|
|
|
IsLeader: node.IsLeader,
|
|
|
|
IsAdd: true,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}); grpcErr != nil {
|
|
|
|
glog.V(0).Infof("connect to %s: %v", f.MasterClient.GetMaster(), grpcErr)
|
|
|
|
}
|
|
|
|
return
|
2020-07-13 08:31:24 +08:00
|
|
|
}
|
|
|
|
|
2018-05-28 02:52:26 +08:00
|
|
|
func (f *Filer) SetStore(store FilerStore) {
|
2020-07-13 23:19:48 +08:00
|
|
|
f.Store = NewFilerStoreWrapper(store)
|
2020-09-06 05:08:59 +08:00
|
|
|
|
|
|
|
f.setOrLoadFilerStoreSignature(store)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Filer) setOrLoadFilerStoreSignature(store FilerStore) {
|
|
|
|
storeIdBytes, err := store.KvGet(context.Background(), []byte(FilerStoreId))
|
|
|
|
if err == ErrKvNotFound || err == nil && len(storeIdBytes) == 0 {
|
|
|
|
f.Signature = util.RandomInt32()
|
|
|
|
storeIdBytes = make([]byte, 4)
|
|
|
|
util.Uint32toBytes(storeIdBytes, uint32(f.Signature))
|
|
|
|
if err = store.KvPut(context.Background(), []byte(FilerStoreId), storeIdBytes); err != nil {
|
|
|
|
glog.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err)
|
|
|
|
}
|
|
|
|
glog.V(0).Infof("create %s to %d", FilerStoreId, f.Signature)
|
|
|
|
} else if err == nil && len(storeIdBytes) == 4 {
|
|
|
|
f.Signature = int32(util.BytesToUint32(storeIdBytes))
|
|
|
|
glog.V(0).Infof("existing %s = %d", FilerStoreId, f.Signature)
|
|
|
|
} else {
|
|
|
|
glog.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err)
|
|
|
|
}
|
2018-05-11 17:20:15 +08:00
|
|
|
}
|
|
|
|
|
2020-07-07 00:24:54 +08:00
|
|
|
func (f *Filer) GetStore() (store FilerStore) {
|
2020-07-13 23:19:48 +08:00
|
|
|
return f.Store
|
2020-07-07 00:24:54 +08:00
|
|
|
}
|
|
|
|
|
2021-09-13 13:47:52 +08:00
|
|
|
func (fs *Filer) GetMaster() pb.ServerAddress {
|
2018-07-29 05:22:46 +08:00
|
|
|
return fs.MasterClient.GetMaster()
|
2018-07-28 17:10:32 +08:00
|
|
|
}
|
|
|
|
|
2021-11-07 05:23:35 +08:00
|
|
|
func (fs *Filer) KeepMasterClientConnected() {
|
2021-11-06 08:52:15 +08:00
|
|
|
fs.MasterClient.KeepConnectedToMaster()
|
2018-07-28 17:10:32 +08:00
|
|
|
}
|
|
|
|
|
2019-03-31 14:08:29 +08:00
|
|
|
func (f *Filer) BeginTransaction(ctx context.Context) (context.Context, error) {
|
2020-07-13 23:19:48 +08:00
|
|
|
return f.Store.BeginTransaction(ctx)
|
2019-03-31 14:08:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Filer) CommitTransaction(ctx context.Context) error {
|
2020-07-13 23:19:48 +08:00
|
|
|
return f.Store.CommitTransaction(ctx)
|
2019-03-31 14:08:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Filer) RollbackTransaction(ctx context.Context) error {
|
2020-07-13 23:19:48 +08:00
|
|
|
return f.Store.RollbackTransaction(ctx)
|
2019-03-31 14:08:29 +08:00
|
|
|
}
|
|
|
|
|
2022-03-17 14:55:31 +08:00
|
|
|
func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFromOtherCluster bool, signatures []int32, skipCreateParentDir bool) error {
|
2018-05-13 04:45:29 +08:00
|
|
|
|
2018-12-03 15:16:17 +08:00
|
|
|
if string(entry.FullPath) == "/" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-12-28 12:46:15 +08:00
|
|
|
oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
|
2018-05-13 04:45:29 +08:00
|
|
|
|
2018-05-20 03:40:24 +08:00
|
|
|
/*
|
2018-05-28 02:52:26 +08:00
|
|
|
if !hasWritePermission(lastDirectoryEntry, entry) {
|
|
|
|
glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d",
|
|
|
|
lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid)
|
|
|
|
return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath)
|
|
|
|
}
|
2018-05-20 03:40:24 +08:00
|
|
|
*/
|
2018-05-13 04:45:29 +08:00
|
|
|
|
2018-09-22 15:11:46 +08:00
|
|
|
if oldEntry == nil {
|
2020-12-28 12:46:15 +08:00
|
|
|
|
2022-03-17 14:55:31 +08:00
|
|
|
if !skipCreateParentDir {
|
2022-03-17 10:53:47 +08:00
|
|
|
dirParts := strings.Split(string(entry.FullPath), "/")
|
|
|
|
if err := f.ensureParentDirecotryEntry(ctx, entry, dirParts, len(dirParts)-1, isFromOtherCluster); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-12-28 12:46:15 +08:00
|
|
|
}
|
|
|
|
|
2020-10-25 00:42:54 +08:00
|
|
|
glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
|
2020-07-13 23:19:48 +08:00
|
|
|
if err := f.Store.InsertEntry(ctx, entry); err != nil {
|
2019-06-21 14:45:30 +08:00
|
|
|
glog.Errorf("insert entry %s: %v", entry.FullPath, err)
|
2018-09-22 15:11:46 +08:00
|
|
|
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
|
|
|
|
}
|
|
|
|
} else {
|
2020-01-23 03:42:40 +08:00
|
|
|
if o_excl {
|
2020-01-25 14:01:51 +08:00
|
|
|
glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
|
2020-01-23 03:42:40 +08:00
|
|
|
return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
|
|
|
|
}
|
2020-10-25 11:11:31 +08:00
|
|
|
glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
|
2019-03-16 06:55:34 +08:00
|
|
|
if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
|
2019-06-21 14:45:30 +08:00
|
|
|
glog.Errorf("update entry %s: %v", entry.FullPath, err)
|
2018-09-22 15:11:46 +08:00
|
|
|
return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
|
|
|
|
}
|
2018-05-13 04:45:29 +08:00
|
|
|
}
|
|
|
|
|
2020-02-25 14:28:45 +08:00
|
|
|
f.maybeAddBucket(entry)
|
2020-08-29 14:48:48 +08:00
|
|
|
f.NotifyUpdateEvent(ctx, oldEntry, entry, true, isFromOtherCluster, signatures)
|
2018-08-13 16:20:49 +08:00
|
|
|
|
2018-07-22 08:40:00 +08:00
|
|
|
f.deleteChunksIfNotNew(oldEntry, entry)
|
2018-06-18 04:02:22 +08:00
|
|
|
|
2020-01-25 14:13:06 +08:00
|
|
|
glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
|
|
|
|
|
2018-05-13 04:45:29 +08:00
|
|
|
return nil
|
2018-05-11 17:20:15 +08:00
|
|
|
}
|
|
|
|
|
2020-12-28 12:46:15 +08:00
|
|
|
func (f *Filer) ensureParentDirecotryEntry(ctx context.Context, entry *Entry, dirParts []string, level int, isFromOtherCluster bool) (err error) {
|
|
|
|
|
|
|
|
if level == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
dirPath := "/" + util.Join(dirParts[:level]...)
|
|
|
|
// fmt.Printf("%d directory: %+v\n", i, dirPath)
|
|
|
|
|
|
|
|
// check the store directly
|
|
|
|
glog.V(4).Infof("find uncached directory: %s", dirPath)
|
|
|
|
dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath))
|
|
|
|
|
|
|
|
// no such existing directory
|
|
|
|
if dirEntry == nil {
|
|
|
|
|
|
|
|
// ensure parent directory
|
|
|
|
if err = f.ensureParentDirecotryEntry(ctx, entry, dirParts, level-1, isFromOtherCluster); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// create the directory
|
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
dirEntry = &Entry{
|
|
|
|
FullPath: util.FullPath(dirPath),
|
|
|
|
Attr: Attr{
|
|
|
|
Mtime: now,
|
|
|
|
Crtime: now,
|
2021-08-06 11:24:35 +08:00
|
|
|
Mode: os.ModeDir | entry.Mode | 0111,
|
2020-12-28 12:46:15 +08:00
|
|
|
Uid: entry.Uid,
|
|
|
|
Gid: entry.Gid,
|
|
|
|
Collection: entry.Collection,
|
|
|
|
Replication: entry.Replication,
|
|
|
|
UserName: entry.UserName,
|
|
|
|
GroupNames: entry.GroupNames,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
|
|
|
|
mkdirErr := f.Store.InsertEntry(ctx, dirEntry)
|
|
|
|
if mkdirErr != nil {
|
|
|
|
if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound {
|
|
|
|
glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
|
|
|
|
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
f.maybeAddBucket(dirEntry)
|
|
|
|
f.NotifyUpdateEvent(ctx, nil, dirEntry, false, isFromOtherCluster, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if !dirEntry.IsDirectory() {
|
|
|
|
glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
|
|
|
|
return fmt.Errorf("%s is a file", dirPath)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-03-16 06:55:34 +08:00
|
|
|
func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) {
|
2018-11-29 16:07:54 +08:00
|
|
|
if oldEntry != nil {
|
2020-11-30 19:11:52 +08:00
|
|
|
entry.Attr.Crtime = oldEntry.Attr.Crtime
|
2018-11-29 16:07:54 +08:00
|
|
|
if oldEntry.IsDirectory() && !entry.IsDirectory() {
|
2021-03-31 11:36:06 +08:00
|
|
|
glog.Errorf("existing %s is a directory", oldEntry.FullPath)
|
|
|
|
return fmt.Errorf("existing %s is a directory", oldEntry.FullPath)
|
2018-11-29 16:07:54 +08:00
|
|
|
}
|
|
|
|
if !oldEntry.IsDirectory() && entry.IsDirectory() {
|
2021-03-31 11:36:06 +08:00
|
|
|
glog.Errorf("existing %s is a file", oldEntry.FullPath)
|
|
|
|
return fmt.Errorf("existing %s is a file", oldEntry.FullPath)
|
2018-11-29 16:07:54 +08:00
|
|
|
}
|
|
|
|
}
|
2020-07-13 23:19:48 +08:00
|
|
|
return f.Store.UpdateEntry(ctx, entry)
|
2018-05-11 17:20:15 +08:00
|
|
|
}
|
|
|
|
|
2020-12-11 14:23:22 +08:00
|
|
|
var (
|
|
|
|
Root = &Entry{
|
|
|
|
FullPath: "/",
|
|
|
|
Attr: Attr{
|
|
|
|
Mtime: time.Now(),
|
|
|
|
Crtime: time.Now(),
|
|
|
|
Mode: os.ModeDir | 0755,
|
|
|
|
Uid: OS_UID,
|
|
|
|
Gid: OS_GID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
)
|
2018-12-03 11:42:50 +08:00
|
|
|
|
2020-12-11 14:23:22 +08:00
|
|
|
func (f *Filer) FindEntry(ctx context.Context, p util.FullPath) (entry *Entry, err error) {
|
2018-12-03 11:42:50 +08:00
|
|
|
|
|
|
|
if string(p) == "/" {
|
2020-12-11 14:23:22 +08:00
|
|
|
return Root, nil
|
2018-12-03 11:42:50 +08:00
|
|
|
}
|
2020-07-13 23:19:48 +08:00
|
|
|
entry, err = f.Store.FindEntry(ctx, p)
|
2020-03-09 16:02:01 +08:00
|
|
|
if entry != nil && entry.TtlSec > 0 {
|
|
|
|
if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
|
2020-11-27 03:14:56 +08:00
|
|
|
f.Store.DeleteOneEntry(ctx, entry)
|
2020-03-09 16:02:01 +08:00
|
|
|
return nil, filer_pb.ErrNotFound
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
|
2018-05-11 17:20:15 +08:00
|
|
|
}
|
|
|
|
|
2021-01-16 15:56:24 +08:00
|
|
|
func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (expiredCount int64, lastFileName string, err error) {
|
|
|
|
lastFileName, err = f.Store.ListDirectoryPrefixedEntries(ctx, p, startFileName, inclusive, limit, prefix, func(entry *Entry) bool {
|
2021-12-28 14:53:00 +08:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return false
|
|
|
|
default:
|
|
|
|
if entry.TtlSec > 0 {
|
|
|
|
if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
|
|
|
|
f.Store.DeleteOneEntry(ctx, entry)
|
|
|
|
expiredCount++
|
|
|
|
return true
|
|
|
|
}
|
2020-03-09 16:02:01 +08:00
|
|
|
}
|
2021-12-28 14:53:00 +08:00
|
|
|
return eachEntryFunc(entry)
|
2020-03-09 16:02:01 +08:00
|
|
|
}
|
2021-01-16 15:56:24 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return expiredCount, lastFileName, err
|
2020-03-09 16:02:01 +08:00
|
|
|
}
|
|
|
|
return
|
2018-05-11 17:20:15 +08:00
|
|
|
}
|
|
|
|
|
2020-03-15 11:30:26 +08:00
|
|
|
func (f *Filer) Shutdown() {
|
2020-07-06 06:52:36 +08:00
|
|
|
f.LocalMetaLogBuffer.Shutdown()
|
2020-07-13 23:19:48 +08:00
|
|
|
f.Store.Shutdown()
|
2020-03-15 11:30:26 +08:00
|
|
|
}
|