seaweedfs/weed/server/filer_grpc_server_sub_meta.go

334 lines
10 KiB
Go
Raw Normal View History

2020-03-30 16:19:33 +08:00
package weed_server
import (
2020-04-20 15:08:47 +08:00
"fmt"
"github.com/seaweedfs/seaweedfs/weed/stats"
2020-04-05 15:51:16 +08:00
"strings"
"time"
2022-08-18 03:05:07 +08:00
"google.golang.org/protobuf/proto"
2020-04-20 15:08:47 +08:00
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
2021-07-05 16:01:16 +08:00
)
const (
// MaxUnsyncedEvents send empty notification with timestamp when certain amount of events have been filtered
MaxUnsyncedEvents = 1e3
2020-03-30 16:19:33 +08:00
)
2020-04-13 12:00:55 +08:00
func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeMetadataServer) error {
2020-03-30 16:19:33 +08:00
peerAddress := findClientAddress(stream.Context(), 0)
2023-10-14 07:48:14 +08:00
isReplacing, alreadyKnown, clientName := fs.addClient("", req.ClientName, peerAddress, req.ClientId, req.ClientEpoch)
if isReplacing {
fs.filer.MetaAggregator.ListenersCond.Broadcast() // nudges the subscribers that are waiting
} else if alreadyKnown {
return fmt.Errorf("duplicated subscription detected for client %s id %d", clientName, req.ClientId)
}
2023-10-14 07:48:14 +08:00
defer fs.deleteClient("", clientName, req.ClientId, req.ClientEpoch)
2020-03-30 16:19:33 +08:00
2024-01-08 16:03:08 +08:00
lastReadTime := log_buffer.NewMessagePosition(req.SinceNs, -2)
glog.V(0).Infof(" %v starts to subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
2020-04-20 14:54:32 +08:00
2021-07-05 16:01:16 +08:00
eachEventNotificationFn := fs.eachEventNotificationFn(req, stream, clientName)
2020-07-06 06:43:06 +08:00
eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn)
var processedTsNs int64
2021-07-02 05:01:25 +08:00
var readPersistedLogErr error
var readInMemoryLogErr error
2022-05-31 06:20:51 +08:00
var isDone bool
2020-07-06 06:43:06 +08:00
2020-09-10 02:21:23 +08:00
for {
2021-06-27 20:51:28 +08:00
glog.V(4).Infof("read on disk %v aggregated subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
2021-06-25 03:46:00 +08:00
2022-05-31 06:20:51 +08:00
processedTsNs, isDone, readPersistedLogErr = fs.filer.ReadPersistedLogBuffer(lastReadTime, req.UntilNs, eachLogEntryFn)
2021-07-02 05:01:25 +08:00
if readPersistedLogErr != nil {
return fmt.Errorf("reading from persisted logs: %v", readPersistedLogErr)
}
2022-05-31 06:20:51 +08:00
if isDone {
return nil
}
if processedTsNs != 0 {
2024-01-08 16:03:08 +08:00
lastReadTime = log_buffer.NewMessagePosition(processedTsNs, -2)
}
2021-06-27 20:51:28 +08:00
glog.V(4).Infof("read in memory %v aggregated subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
2021-06-25 03:46:00 +08:00
2024-01-15 16:20:12 +08:00
lastReadTime, isDone, readInMemoryLogErr = fs.filer.MetaAggregator.MetaLogBuffer.LoopProcessLogData("aggMeta:"+clientName, lastReadTime, req.UntilNs, func() bool {
2020-09-10 02:21:23 +08:00
fs.filer.MetaAggregator.ListenersLock.Lock()
fs.filer.MetaAggregator.ListenersCond.Wait()
fs.filer.MetaAggregator.ListenersLock.Unlock()
2023-10-14 07:48:14 +08:00
if !fs.hasClient(req.ClientId, req.ClientEpoch) {
return false
}
2020-09-10 02:21:23 +08:00
return true
}, eachLogEntryFn)
2021-07-02 05:01:25 +08:00
if readInMemoryLogErr != nil {
if readInMemoryLogErr == log_buffer.ResumeFromDiskError {
continue
}
2021-07-02 05:01:25 +08:00
glog.Errorf("processed to %v: %v", lastReadTime, readInMemoryLogErr)
if readInMemoryLogErr != log_buffer.ResumeError {
2020-09-10 02:21:23 +08:00
break
}
}
2022-05-31 06:25:21 +08:00
if isDone {
return nil
}
2023-10-14 07:48:14 +08:00
if !fs.hasClient(req.ClientId, req.ClientEpoch) {
glog.V(0).Infof("client %v is closed", clientName)
return nil
}
2021-07-02 05:01:25 +08:00
time.Sleep(1127 * time.Millisecond)
2020-09-10 02:21:23 +08:00
}
2020-07-06 06:43:06 +08:00
2021-07-02 05:01:25 +08:00
return readInMemoryLogErr
2020-07-06 06:43:06 +08:00
}
func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeLocalMetadataServer) error {
peerAddress := findClientAddress(stream.Context(), 0)
// use negative client id to differentiate from addClient()/deleteClient() used in SubscribeMetadata()
req.ClientId = -req.ClientId
2023-10-14 07:48:14 +08:00
isReplacing, alreadyKnown, clientName := fs.addClient("local", req.ClientName, peerAddress, req.ClientId, req.ClientEpoch)
if isReplacing {
fs.listenersCond.Broadcast() // nudges the subscribers that are waiting
} else if alreadyKnown {
return fmt.Errorf("duplicated local subscription detected for client %s clientId:%d", clientName, req.ClientId)
}
defer func() {
2023-10-14 07:48:14 +08:00
glog.V(0).Infof("disconnect %v local subscriber %s clientId:%d", clientName, req.PathPrefix, req.ClientId)
fs.deleteClient("local", clientName, req.ClientId, req.ClientEpoch)
}()
2024-01-08 16:03:08 +08:00
lastReadTime := log_buffer.NewMessagePosition(req.SinceNs, -2)
glog.V(0).Infof(" + %v local subscribe %s from %+v clientId:%d", clientName, req.PathPrefix, lastReadTime, req.ClientId)
2021-07-05 16:01:16 +08:00
eachEventNotificationFn := fs.eachEventNotificationFn(req, stream, clientName)
eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn)
var processedTsNs int64
2021-07-02 05:01:25 +08:00
var readPersistedLogErr error
var readInMemoryLogErr error
2022-05-31 06:20:51 +08:00
var isDone bool
2020-09-10 02:21:23 +08:00
for {
// println("reading from persisted logs ...")
2021-06-25 03:46:00 +08:00
glog.V(0).Infof("read on disk %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
2022-05-31 06:20:51 +08:00
processedTsNs, isDone, readPersistedLogErr = fs.filer.ReadPersistedLogBuffer(lastReadTime, req.UntilNs, eachLogEntryFn)
2021-07-02 05:01:25 +08:00
if readPersistedLogErr != nil {
2021-10-18 04:50:34 +08:00
glog.V(0).Infof("read on disk %v local subscribe %s from %+v: %v", clientName, req.PathPrefix, lastReadTime, readPersistedLogErr)
2021-07-02 05:01:25 +08:00
return fmt.Errorf("reading from persisted logs: %v", readPersistedLogErr)
}
2022-05-31 06:20:51 +08:00
if isDone {
return nil
}
if processedTsNs != 0 {
2024-01-08 16:03:08 +08:00
lastReadTime = log_buffer.NewMessagePosition(processedTsNs, -2)
2021-07-02 05:01:25 +08:00
} else {
if readInMemoryLogErr == log_buffer.ResumeFromDiskError {
time.Sleep(1127 * time.Millisecond)
continue
}
}
2021-06-25 03:46:00 +08:00
glog.V(0).Infof("read in memory %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
2024-01-15 16:20:12 +08:00
lastReadTime, isDone, readInMemoryLogErr = fs.filer.LocalMetaLogBuffer.LoopProcessLogData("localMeta:"+clientName, lastReadTime, req.UntilNs, func() bool {
2020-09-10 02:21:23 +08:00
fs.listenersLock.Lock()
fs.listenersCond.Wait()
fs.listenersLock.Unlock()
2023-10-14 07:48:14 +08:00
if !fs.hasClient(req.ClientId, req.ClientEpoch) {
return false
}
2020-09-10 02:21:23 +08:00
return true
}, eachLogEntryFn)
2021-07-02 05:01:25 +08:00
if readInMemoryLogErr != nil {
if readInMemoryLogErr == log_buffer.ResumeFromDiskError {
continue
}
2021-07-02 05:01:25 +08:00
glog.Errorf("processed to %v: %v", lastReadTime, readInMemoryLogErr)
if readInMemoryLogErr != log_buffer.ResumeError {
2020-09-10 02:21:23 +08:00
break
}
}
2022-05-31 06:25:21 +08:00
if isDone {
return nil
}
2023-10-14 07:48:14 +08:00
if !fs.hasClient(req.ClientId, req.ClientEpoch) {
return nil
}
2020-09-10 02:21:23 +08:00
}
2021-07-02 05:01:25 +08:00
return readInMemoryLogErr
}
2020-07-06 06:43:06 +08:00
func eachLogEntryFn(eachEventNotificationFn func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error) func(logEntry *filer_pb.LogEntry) error {
return func(logEntry *filer_pb.LogEntry) error {
event := &filer_pb.SubscribeMetadataResponse{}
if err := proto.Unmarshal(logEntry.Data, event); err != nil {
glog.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
return fmt.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
}
if err := eachEventNotificationFn(event.Directory, event.EventNotification, event.TsNs); err != nil {
return err
}
return nil
}
}
2021-07-05 16:01:16 +08:00
func (fs *FilerServer) eachEventNotificationFn(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeMetadataServer, clientName string) func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error {
filtered := 0
2020-07-06 06:43:06 +08:00
return func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error {
2021-07-05 16:01:16 +08:00
defer func() {
if filtered > MaxUnsyncedEvents {
if err := stream.Send(&filer_pb.SubscribeMetadataResponse{
EventNotification: &filer_pb.EventNotification{},
TsNs: tsNs,
}); err == nil {
filtered = 0
}
}
}()
2020-04-20 15:08:47 +08:00
2021-07-05 16:01:16 +08:00
filtered++
2020-09-10 02:21:23 +08:00
foundSelf := false
for _, sig := range eventNotification.Signatures {
2021-07-05 16:01:16 +08:00
if sig == req.Signature && req.Signature != 0 {
return nil
}
2020-09-10 02:21:23 +08:00
if sig == fs.filer.Signature {
foundSelf = true
}
}
if !foundSelf {
eventNotification.Signatures = append(eventNotification.Signatures, fs.filer.Signature)
}
2020-04-20 15:08:47 +08:00
// get complete path to the file or directory
var entryName string
if eventNotification.OldEntry != nil {
entryName = eventNotification.OldEntry.Name
} else if eventNotification.NewEntry != nil {
entryName = eventNotification.NewEntry.Name
}
fullpath := util.Join(dirPath, entryName)
// skip on filer internal meta logs
2020-09-01 15:21:19 +08:00
if strings.HasPrefix(fullpath, filer.SystemLogDir) {
2020-04-05 15:51:16 +08:00
return nil
2020-03-30 16:19:33 +08:00
}
if hasPrefixIn(fullpath, req.PathPrefixes) {
// good
} else if matchByDirectory(dirPath, req.Directories) {
// good
2021-09-01 17:45:42 +08:00
} else {
if !strings.HasPrefix(fullpath, req.PathPrefix) {
if eventNotification.NewParentPath != "" {
newFullPath := util.Join(eventNotification.NewParentPath, entryName)
if !strings.HasPrefix(newFullPath, req.PathPrefix) {
return nil
}
} else {
return nil
}
}
2020-04-20 15:08:47 +08:00
}
// collect timestamps for path
stats.FilerServerLastSendTsOfSubscribeGauge.WithLabelValues(fs.option.Host.String(), req.ClientName, req.PathPrefix).Set(float64(tsNs))
2020-04-20 15:08:47 +08:00
message := &filer_pb.SubscribeMetadataResponse{
Directory: dirPath,
EventNotification: eventNotification,
TsNs: tsNs,
2020-04-20 15:08:47 +08:00
}
// println("sending", dirPath, entryName)
2020-04-20 15:08:47 +08:00
if err := stream.Send(message); err != nil {
glog.V(0).Infof("=> client %v: %+v", clientName, err)
return err
}
2021-07-05 16:01:16 +08:00
filtered = 0
2020-04-20 15:08:47 +08:00
return nil
}
2020-03-30 16:19:33 +08:00
}
func hasPrefixIn(text string, prefixes []string) bool {
for _, p := range prefixes {
if strings.HasPrefix(text, p) {
return true
}
}
return false
}
func matchByDirectory(dirPath string, directories []string) bool {
for _, dir := range directories {
if dirPath == dir {
return true
}
}
return false
}
2023-10-14 07:48:14 +08:00
func (fs *FilerServer) addClient(prefix string, clientType string, clientAddress string, clientId int32, clientEpoch int32) (isReplacing, alreadyKnown bool, clientName string) {
2020-03-30 16:19:33 +08:00
clientName = clientType + "@" + clientAddress
2023-10-14 07:48:14 +08:00
glog.V(0).Infof("+ %v listener %v clientId %v clientEpoch %v", prefix, clientName, clientId, clientEpoch)
if clientId != 0 {
fs.knownListenersLock.Lock()
defer fs.knownListenersLock.Unlock()
epoch, found := fs.knownListeners[clientId]
if !found || epoch < clientEpoch {
fs.knownListeners[clientId] = clientEpoch
2023-10-14 07:48:14 +08:00
isReplacing = true
} else {
alreadyKnown = true
}
}
2020-03-30 16:19:33 +08:00
return
}
2023-10-14 07:48:14 +08:00
func (fs *FilerServer) deleteClient(prefix string, clientName string, clientId int32, clientEpoch int32) {
glog.V(0).Infof("- %v listener %v clientId %v clientEpoch %v", prefix, clientName, clientId, clientEpoch)
if clientId != 0 {
fs.knownListenersLock.Lock()
defer fs.knownListenersLock.Unlock()
epoch, found := fs.knownListeners[clientId]
if found && epoch <= clientEpoch {
delete(fs.knownListeners, clientId)
}
}
2020-03-30 16:19:33 +08:00
}
2023-10-14 07:48:14 +08:00
func (fs *FilerServer) hasClient(clientId int32, clientEpoch int32) bool {
if clientId != 0 {
fs.knownListenersLock.Lock()
defer fs.knownListenersLock.Unlock()
epoch, found := fs.knownListeners[clientId]
if found && epoch <= clientEpoch {
return true
}
}
return false
}