seaweedfs/weed/messaging/broker/broker_grpc_server_subscribe.go

180 lines
5.0 KiB
Go
Raw Normal View History

2020-04-18 16:12:01 +08:00
package broker
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/util"
2020-09-10 02:21:23 +08:00
"github.com/chrislusf/seaweedfs/weed/util/log_buffer"
2020-04-18 16:12:01 +08:00
"io"
"strings"
2020-04-18 16:12:01 +08:00
"time"
"github.com/golang/protobuf/proto"
2020-09-01 15:21:19 +08:00
"github.com/chrislusf/seaweedfs/weed/filer"
2020-04-18 16:12:01 +08:00
"github.com/chrislusf/seaweedfs/weed/glog"
2020-04-19 15:18:32 +08:00
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
2020-04-18 16:12:01 +08:00
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
)
func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_SubscribeServer) error {
// process initial request
in, err := stream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
2020-05-17 09:53:54 +08:00
var processedTsNs int64
var messageCount int64
2020-04-18 16:12:01 +08:00
subscriberId := in.Init.SubscriberId
2020-04-19 15:18:32 +08:00
// TODO look it up
topicConfig := &messaging_pb.TopicConfiguration{
2020-04-30 18:05:34 +08:00
// IsTransient: true,
}
2020-04-18 16:12:01 +08:00
// get lock
tp := TopicPartition{
Namespace: in.Init.Namespace,
Topic: in.Init.Topic,
Partition: in.Init.Partition,
}
2020-05-17 09:53:54 +08:00
fmt.Printf("+ subscriber %s for %s\n", subscriberId, tp.String())
defer func() {
fmt.Printf("- subscriber %s for %s %d messages last %v\n", subscriberId, tp.String(), messageCount, time.Unix(0, processedTsNs))
}()
2020-05-12 23:48:00 +08:00
lock := broker.topicManager.RequestLock(tp, topicConfig, false)
defer broker.topicManager.ReleaseLock(tp, false)
2020-04-18 16:12:01 +08:00
isConnected := true
go func() {
for isConnected {
2020-05-17 23:57:47 +08:00
if _, err := stream.Recv(); err != nil {
2020-05-18 08:38:31 +08:00
// println("disconnecting connection to", subscriberId, tp.String())
isConnected = false
lock.cond.Signal()
}
}
}()
2020-04-18 16:12:01 +08:00
lastReadTime := time.Now()
switch in.Init.StartPosition {
case messaging_pb.SubscriberMessage_InitMessage_TIMESTAMP:
lastReadTime = time.Unix(0, in.Init.TimestampNs)
case messaging_pb.SubscriberMessage_InitMessage_LATEST:
case messaging_pb.SubscriberMessage_InitMessage_EARLIEST:
lastReadTime = time.Unix(0, 0)
2020-04-18 16:12:01 +08:00
}
// how to process each message
// an error returned will end the subscription
eachMessageFn := func(m *messaging_pb.Message) error {
err := stream.Send(&messaging_pb.BrokerMessage{
Data: m,
2020-04-18 16:12:01 +08:00
})
if err != nil {
glog.V(0).Infof("=> subscriber %v: %+v", subscriberId, err)
}
return err
}
2020-04-28 17:05:44 +08:00
eachLogEntryFn := func(logEntry *filer_pb.LogEntry) error {
m := &messaging_pb.Message{}
if err = proto.Unmarshal(logEntry.Data, m); err != nil {
glog.Errorf("unexpected unmarshal messaging_pb.Message: %v", err)
return err
}
2020-04-30 18:05:34 +08:00
// fmt.Printf("sending : %d bytes ts %d\n", len(m.Value), logEntry.TsNs)
if err = eachMessageFn(m); err != nil {
glog.Errorf("sending %d bytes to %s: %s", len(m.Value), subscriberId, err)
return err
}
2020-05-08 17:47:22 +08:00
if m.IsClose {
// println("processed EOF")
return io.EOF
}
processedTsNs = logEntry.TsNs
2020-05-08 17:47:22 +08:00
messageCount++
return nil
2020-04-28 17:05:44 +08:00
}
2020-05-17 09:53:54 +08:00
// fmt.Printf("subscriber %s read %d on disk log %v\n", subscriberId, messageCount, lastReadTime)
2020-09-10 02:21:23 +08:00
for {
if err = broker.readPersistedLogBuffer(&tp, lastReadTime, eachLogEntryFn); err != nil {
if err != io.EOF {
// println("stopping from persisted logs", err.Error())
return err
}
}
if processedTsNs != 0 {
lastReadTime = time.Unix(0, processedTsNs)
}
2021-06-27 20:51:28 +08:00
lastReadTime, err = lock.logBuffer.LoopProcessLogData("broker", lastReadTime, func() bool {
2020-09-10 02:21:23 +08:00
lock.Mutex.Lock()
lock.cond.Wait()
lock.Mutex.Unlock()
return isConnected
}, eachLogEntryFn)
if err != nil {
if err == log_buffer.ResumeFromDiskError {
continue
}
2020-09-10 02:21:23 +08:00
glog.Errorf("processed to %v: %v", lastReadTime, err)
time.Sleep(3127 * time.Millisecond)
if err != log_buffer.ResumeError {
break
}
}
}
return err
2020-04-18 16:12:01 +08:00
}
func (broker *MessageBroker) readPersistedLogBuffer(tp *TopicPartition, startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (err error) {
2020-08-30 12:01:14 +08:00
startTime = startTime.UTC()
startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day())
startHourMinute := fmt.Sprintf("%02d-%02d", startTime.Hour(), startTime.Minute())
sizeBuf := make([]byte, 4)
startTsNs := startTime.UnixNano()
2020-05-09 15:31:34 +08:00
topicDir := genTopicDir(tp.Namespace, tp.Topic)
2020-04-30 18:05:34 +08:00
partitionSuffix := fmt.Sprintf(".part%02d", tp.Partition)
return filer_pb.List(broker, topicDir, "", func(dayEntry *filer_pb.Entry, isLast bool) error {
dayDir := fmt.Sprintf("%s/%s", topicDir, dayEntry.Name)
return filer_pb.List(broker, dayDir, "", func(hourMinuteEntry *filer_pb.Entry, isLast bool) error {
if dayEntry.Name == startDate {
hourMinute := util.FileNameBase(hourMinuteEntry.Name)
if strings.Compare(hourMinute, startHourMinute) < 0 {
return nil
}
}
2020-05-08 17:47:22 +08:00
if !strings.HasSuffix(hourMinuteEntry.Name, partitionSuffix) {
2020-04-30 18:05:34 +08:00
return nil
}
// println("partition", tp.Partition, "processing", dayDir, "/", hourMinuteEntry.Name)
2020-09-01 15:21:19 +08:00
chunkedFileReader := filer.NewChunkStreamReader(broker, hourMinuteEntry.Chunks)
defer chunkedFileReader.Close()
2020-09-01 15:21:19 +08:00
if _, err := filer.ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil {
chunkedFileReader.Close()
if err == io.EOF {
2020-05-08 17:47:22 +08:00
return err
}
return fmt.Errorf("reading %s/%s: %v", dayDir, hourMinuteEntry.Name, err)
}
return nil
}, "", false, 24*60)
}, startDate, true, 366)
}