seaweedfs/weed/filer/filer_notify.go

183 lines
5.2 KiB
Go
Raw Normal View History

2020-09-01 15:21:19 +08:00
package filer
2018-08-13 16:20:49 +08:00
import (
2020-04-28 14:49:46 +08:00
"context"
2020-03-30 16:19:33 +08:00
"fmt"
2020-04-28 14:49:46 +08:00
"io"
2020-03-30 16:19:33 +08:00
"strings"
"time"
"github.com/golang/protobuf/proto"
2018-09-21 16:56:43 +08:00
"github.com/chrislusf/seaweedfs/weed/glog"
2018-09-16 16:18:30 +08:00
"github.com/chrislusf/seaweedfs/weed/notification"
2018-08-13 16:20:49 +08:00
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
2020-03-30 16:19:33 +08:00
"github.com/chrislusf/seaweedfs/weed/util"
2018-08-13 16:20:49 +08:00
)
func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry, deleteChunks, isFromOtherCluster bool, signatures []int32) {
2020-03-31 04:03:43 +08:00
var fullpath string
2018-08-13 16:20:49 +08:00
if oldEntry != nil {
2020-03-31 04:03:43 +08:00
fullpath = string(oldEntry.FullPath)
2018-08-13 16:20:49 +08:00
} else if newEntry != nil {
2020-03-31 04:03:43 +08:00
fullpath = string(newEntry.FullPath)
2018-08-13 16:20:49 +08:00
} else {
return
}
2020-03-31 04:03:43 +08:00
// println("fullpath:", fullpath)
2020-03-30 16:19:33 +08:00
2020-04-13 05:03:07 +08:00
if strings.HasPrefix(fullpath, SystemLogDir) {
2020-03-30 16:19:33 +08:00
return
}
2020-09-10 02:21:23 +08:00
foundSelf := false
for _, sig := range signatures {
if sig == f.Signature {
foundSelf = true
}
}
if !foundSelf {
signatures = append(signatures, f.Signature)
}
2018-08-13 16:33:21 +08:00
2020-03-30 16:19:33 +08:00
newParentPath := ""
if newEntry != nil {
newParentPath, _ = newEntry.FullPath.DirAndName()
}
eventNotification := &filer_pb.EventNotification{
OldEntry: oldEntry.ToProtoEntry(),
NewEntry: newEntry.ToProtoEntry(),
DeleteChunks: deleteChunks,
NewParentPath: newParentPath,
IsFromOtherCluster: isFromOtherCluster,
2020-09-10 02:21:23 +08:00
Signatures: signatures,
2020-03-30 16:19:33 +08:00
}
if notification.Queue != nil {
glog.V(3).Infof("notifying entry update %v", fullpath)
notification.Queue.SendMessage(fullpath, eventNotification)
2020-03-30 16:19:33 +08:00
}
2020-06-29 05:34:51 +08:00
f.logMetaEvent(ctx, fullpath, eventNotification)
2020-03-30 16:19:33 +08:00
}
2020-06-29 05:34:51 +08:00
func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotification *filer_pb.EventNotification) {
2020-03-31 04:03:43 +08:00
dir, _ := util.FullPath(fullpath).DirAndName()
2020-04-13 12:00:55 +08:00
event := &filer_pb.SubscribeMetadataResponse{
2020-03-30 16:19:33 +08:00
Directory: dir,
EventNotification: eventNotification,
TsNs: time.Now().UnixNano(),
2020-03-30 16:19:33 +08:00
}
data, err := proto.Marshal(event)
if err != nil {
2020-04-13 12:00:55 +08:00
glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
2020-03-30 16:19:33 +08:00
return
}
f.LocalMetaLogBuffer.AddToBuffer([]byte(dir), data, event.TsNs)
2020-03-30 16:19:33 +08:00
}
func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) {
2020-10-22 15:35:46 +08:00
if len(buf) == 0 {
return
}
2020-08-30 12:01:14 +08:00
startTime, stopTime = startTime.UTC(), stopTime.UTC()
2020-04-13 05:03:07 +08:00
targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.segment", SystemLogDir,
2020-03-30 16:19:33 +08:00
startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(),
2020-04-13 05:03:07 +08:00
// startTime.Second(), startTime.Nanosecond(),
)
2018-09-17 02:20:36 +08:00
2020-07-15 02:25:50 +08:00
for {
if err := f.appendToFile(targetFile, buf); err != nil {
glog.V(1).Infof("log write failed %s: %v", targetFile, err)
time.Sleep(737 * time.Millisecond)
} else {
break
}
2020-03-30 16:19:33 +08:00
}
}
2020-04-28 14:49:46 +08:00
2020-07-06 06:43:06 +08:00
func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) {
2020-04-28 14:49:46 +08:00
2020-08-30 12:01:14 +08:00
startTime = startTime.UTC()
2020-04-28 14:49:46 +08:00
startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day())
startHourMinute := fmt.Sprintf("%02d-%02d.segment", startTime.Hour(), startTime.Minute())
sizeBuf := make([]byte, 4)
startTsNs := startTime.UnixNano()
dayEntries, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "", "")
2020-04-28 14:49:46 +08:00
if listDayErr != nil {
2020-07-06 06:43:06 +08:00
return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr)
2020-04-28 14:49:46 +08:00
}
for _, dayEntry := range dayEntries {
// println("checking day", dayEntry.FullPath)
hourMinuteEntries, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "", "")
2020-04-28 14:49:46 +08:00
if listHourMinuteErr != nil {
2020-07-06 06:43:06 +08:00
return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
2020-04-28 14:49:46 +08:00
}
for _, hourMinuteEntry := range hourMinuteEntries {
// println("checking hh-mm", hourMinuteEntry.FullPath)
if dayEntry.Name() == startDate {
if strings.Compare(hourMinuteEntry.Name(), startHourMinute) < 0 {
continue
}
}
// println("processing", hourMinuteEntry.FullPath)
chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.Chunks)
2020-07-06 06:43:06 +08:00
if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil {
2020-04-28 14:49:46 +08:00
chunkedFileReader.Close()
if err == io.EOF {
continue
2020-04-28 14:49:46 +08:00
}
2020-07-06 06:43:06 +08:00
return lastTsNs, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err)
2020-04-28 14:49:46 +08:00
}
chunkedFileReader.Close()
}
}
2020-07-06 06:43:06 +08:00
return lastTsNs, nil
2020-04-28 14:49:46 +08:00
}
2020-07-06 06:43:06 +08:00
func ReadEachLogEntry(r io.Reader, sizeBuf []byte, ns int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) {
2020-04-28 14:49:46 +08:00
for {
n, err := r.Read(sizeBuf)
if err != nil {
2020-07-06 06:43:06 +08:00
return lastTsNs, err
2020-04-28 14:49:46 +08:00
}
if n != 4 {
2020-07-06 06:43:06 +08:00
return lastTsNs, fmt.Errorf("size %d bytes, expected 4 bytes", n)
2020-04-28 14:49:46 +08:00
}
size := util.BytesToUint32(sizeBuf)
2020-04-30 18:05:34 +08:00
// println("entry size", size)
2020-04-28 14:49:46 +08:00
entryData := make([]byte, size)
n, err = r.Read(entryData)
if err != nil {
2020-07-06 06:43:06 +08:00
return lastTsNs, err
2020-04-28 14:49:46 +08:00
}
if n != int(size) {
2020-07-06 06:43:06 +08:00
return lastTsNs, fmt.Errorf("entry data %d bytes, expected %d bytes", n, size)
2020-04-28 14:49:46 +08:00
}
logEntry := &filer_pb.LogEntry{}
if err = proto.Unmarshal(entryData, logEntry); err != nil {
2020-07-06 06:43:06 +08:00
return lastTsNs, err
2020-04-28 14:49:46 +08:00
}
if logEntry.TsNs <= ns {
2020-07-06 06:43:06 +08:00
return lastTsNs, nil
2020-04-28 14:49:46 +08:00
}
// println("each log: ", logEntry.TsNs)
if err := eachLogEntryFn(logEntry); err != nil {
2020-07-06 06:43:06 +08:00
return lastTsNs, err
} else {
lastTsNs = logEntry.TsNs
2020-04-28 14:49:46 +08:00
}
}
}