seaweedfs/weed/filer/filer_on_meta_event.go

99 lines
2.5 KiB
Go
Raw Normal View History

package filer
import (
2020-11-15 13:21:20 +08:00
"bytes"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
2020-11-16 06:06:03 +08:00
"github.com/chrislusf/seaweedfs/weed/util"
)
// onMetadataChangeEvent is triggered after filer processed change events from local or remote filers
func (f *Filer) onMetadataChangeEvent(event *filer_pb.SubscribeMetadataResponse) {
2021-03-10 05:21:26 +08:00
f.maybeReloadFilerConfiguration(event)
f.maybeReloadRemoteStorageConfigurationAndMapping(event)
f.onBucketEvents(event)
}
func (f *Filer) onBucketEvents(event *filer_pb.SubscribeMetadataResponse) {
message := event.EventNotification
for _, sig := range message.Signatures {
if sig == f.Signature {
return
}
}
if f.DirBucketsPath == event.Directory {
2022-02-25 17:17:26 +08:00
if filer_pb.IsCreate(event) {
if message.NewEntry.IsDirectory {
f.Store.OnBucketCreation(message.NewEntry.Name)
}
}
2022-02-25 17:17:26 +08:00
if filer_pb.IsDelete(event) {
if message.OldEntry.IsDirectory {
f.Store.OnBucketDeletion(message.OldEntry.Name)
}
}
}
2021-03-10 05:21:26 +08:00
}
func (f *Filer) maybeReloadFilerConfiguration(event *filer_pb.SubscribeMetadataResponse) {
if DirectoryEtcSeaweedFS != event.Directory {
if DirectoryEtcSeaweedFS != event.EventNotification.NewParentPath {
2020-11-15 13:21:20 +08:00
return
}
}
entry := event.EventNotification.NewEntry
if entry == nil {
return
}
glog.V(0).Infof("procesing %v", event)
2020-11-16 06:06:03 +08:00
if entry.Name == FilerConfName {
2020-11-15 13:21:20 +08:00
f.reloadFilerConfiguration(entry)
}
}
func (f *Filer) readEntry(chunks []*filer_pb.FileChunk, size uint64) ([]byte, error) {
2020-11-15 13:21:20 +08:00
var buf bytes.Buffer
err := StreamContent(f.MasterClient, &buf, chunks, 0, int64(size))
2020-11-15 13:21:20 +08:00
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) {
2020-11-16 06:06:03 +08:00
fc := NewFilerConf()
err := fc.loadFromChunks(f, entry.Content, entry.Chunks, FileSize(entry))
2020-11-15 13:21:20 +08:00
if err != nil {
2020-11-16 06:06:03 +08:00
glog.Errorf("read filer conf chunks: %v", err)
2020-11-15 13:21:20 +08:00
return
}
2020-11-16 06:06:03 +08:00
f.FilerConf = fc
}
2020-11-16 06:06:03 +08:00
func (f *Filer) LoadFilerConf() {
fc := NewFilerConf()
err := util.Retry("loadFilerConf", func() error {
return fc.loadFromFiler(f)
})
if err != nil {
glog.Errorf("read filer conf: %v", err)
return
}
f.FilerConf = fc
}
////////////////////////////////////
// load and maintain remote storages
////////////////////////////////////
func (f *Filer) LoadRemoteStorageConfAndMapping() {
if err := f.RemoteStorage.LoadRemoteStorageConfigurationsAndMapping(f); err != nil {
glog.Errorf("read remote conf and mapping: %v", err)
return
}
}
func (f *Filer) maybeReloadRemoteStorageConfigurationAndMapping(event *filer_pb.SubscribeMetadataResponse) {
// FIXME add reloading
}