seaweedfs/weed/mq/broker/broker_grpc_pub.go

193 lines
6.5 KiB
Go
Raw Normal View History

2022-08-01 04:23:44 +08:00
package broker
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb"
2022-08-01 04:23:44 +08:00
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
"sync/atomic"
2023-09-07 13:39:46 +08:00
"time"
2022-08-01 04:23:44 +08:00
)
// For a new or re-configured topic, or one of the broker went offline,
// the pub clients ask one broker what are the brokers for all the topic partitions.
// The broker will lock the topic on write.
// 1. if the topic is not found, create the topic, and allocate the topic partitions to the brokers
// 2. if the topic is found, return the brokers for the topic partitions
// For a topic to read from, the sub clients ask one broker what are the brokers for all the topic partitions.
// The broker will lock the topic on read.
// 1. if the topic is not found, return error
// 2. if the topic is found, return the brokers for the topic partitions
//
// If the topic needs to be re-balanced, the admin client will lock the topic,
// 1. collect throughput information for all the brokers
// 2. adjust the topic partitions to the brokers
// 3. notify the brokers to add/remove partitions to host
// 3.1 When locking the topic, the partitions and brokers should be remembered in the lock.
// 4. the brokers will stop process incoming messages if not the right partition
// 4.1 the pub clients will need to re-partition the messages and publish to the right brokers for the partition3
// 4.2 the sub clients will need to change the brokers to read from
//
// The following is from each individual component's perspective:
// For a pub client
// For current topic/partition, ask one broker for the brokers for the topic partitions
// 1. connect to the brokers and keep sending, until the broker returns error, or the broker leader is moved.
// For a sub client
// For current topic/partition, ask one broker for the brokers for the topic partitions
// 1. connect to the brokers and keep reading, until the broker returns error, or the broker leader is moved.
// For a broker
// Upon a pub client lookup:
// 1. lock the topic
// 2. if already has topic partition assignment, check all brokers are healthy
// 3. if not, create topic partition assignment
// 2. return the brokers for the topic partitions
// 3. unlock the topic
// Upon a sub client lookup:
// 1. lock the topic
// 2. if already has topic partition assignment, check all brokers are healthy
// 3. if not, return error
// 2. return the brokers for the topic partitions
// 3. unlock the topic
// For an admin tool
// 0. collect stats from all the brokers, and find the topic worth moving
// 1. lock the topic
// 2. collect throughput information for all the brokers
// 3. adjust the topic partitions to the brokers
// 4. notify the brokers to add/remove partitions to host
// 5. the brokers will stop process incoming messages if not the right partition
// 6. unlock the topic
2022-08-01 04:23:44 +08:00
/*
The messages are buffered in memory, and saved to filer under
2022-08-01 04:23:44 +08:00
/topics/<topic>/<date>/<hour>/<segment>/*.msg
/topics/<topic>/<date>/<hour>/segment
/topics/<topic>/info/segment_<id>.meta
2022-08-01 04:23:44 +08:00
*/
func (broker *MessageQueueBroker) Publish(stream mq_pb.SeaweedMessaging_PublishServer) error {
// 1. write to the volume server
// 2. find the topic metadata owning filer
// 3. write to the filer
var localTopicPartition *topic.LocalPartition
2023-08-28 08:50:59 +08:00
req, err := stream.Recv()
if err != nil {
return err
}
response := &mq_pb.PublishResponse{}
// TODO check whether current broker should be the leader for the topic partition
2023-09-07 13:39:46 +08:00
ackInterval := 1
2023-08-29 00:02:12 +08:00
initMessage := req.GetInit()
if initMessage != nil {
t, p := topic.FromPbTopic(initMessage.Topic), topic.FromPbPartition(initMessage.Partition)
localTopicPartition = broker.localTopicManager.GetTopicPartition(t, p)
2023-08-28 08:50:59 +08:00
if localTopicPartition == nil {
2023-08-29 00:02:12 +08:00
localTopicPartition = topic.NewLocalPartition(t, p, true, nil)
2023-09-01 15:36:51 +08:00
broker.localTopicManager.AddTopicPartition(t, localTopicPartition)
2023-08-28 08:50:59 +08:00
}
2023-09-07 13:39:46 +08:00
ackInterval = int(initMessage.AckInterval)
2023-09-05 12:43:30 +08:00
stream.Send(response)
2023-08-29 00:02:12 +08:00
} else {
response.Error = fmt.Sprintf("topic %v partition %v not found", initMessage.Topic, initMessage.Partition)
glog.Errorf("topic %v partition %v not found", initMessage.Topic, initMessage.Partition)
return stream.Send(response)
2023-08-28 08:50:59 +08:00
}
2023-09-07 13:39:46 +08:00
ackCounter := 0
var ackSequence int64
var isStopping int32
2023-09-07 13:39:46 +08:00
respChan := make(chan *mq_pb.PublishResponse, 128)
defer func() {
atomic.StoreInt32(&isStopping, 1)
response := &mq_pb.PublishResponse{
Error: "end of stream",
}
respChan <- response
close(respChan)
}()
2023-09-07 13:39:46 +08:00
go func() {
ticker := time.NewTicker(1 * time.Second)
2023-09-07 13:39:46 +08:00
for {
select {
case resp := <-respChan:
if resp != nil {
if err := stream.Send(resp); err != nil {
glog.Errorf("Error sending setup response: %v", err)
}
} else {
return
2023-09-07 13:39:46 +08:00
}
case <-ticker.C:
if atomic.LoadInt32(&isStopping) == 0 {
response := &mq_pb.PublishResponse{
AckSequence: ackSequence,
}
respChan <- response
} else {
return
2023-09-07 13:39:46 +08:00
}
}
}
}()
2023-08-28 08:50:59 +08:00
// process each published messages
for {
2023-09-07 13:39:46 +08:00
// receive a message
req, err := stream.Recv()
if err != nil {
return err
}
// Process the received message
if dataMessage := req.GetData(); dataMessage != nil {
2023-08-28 08:50:59 +08:00
localTopicPartition.Publish(dataMessage)
}
2023-09-07 13:39:46 +08:00
ackCounter++
ackSequence++
if ackCounter >= ackInterval {
ackCounter = 0
// send back the ack
response := &mq_pb.PublishResponse{
AckSequence: ackSequence,
}
respChan <- response
}
}
2023-09-05 12:43:30 +08:00
glog.Infof("publish stream closed")
2022-08-01 04:23:44 +08:00
return nil
}
// AssignTopicPartitions Runs on the assigned broker, to execute the topic partition assignment
func (broker *MessageQueueBroker) AssignTopicPartitions(c context.Context, request *mq_pb.AssignTopicPartitionsRequest) (*mq_pb.AssignTopicPartitionsResponse, error) {
ret := &mq_pb.AssignTopicPartitionsResponse{}
self := pb.ServerAddress(fmt.Sprintf("%s:%d", broker.option.Ip, broker.option.Port))
2023-08-28 09:59:04 +08:00
for _, brokerPartition := range request.BrokerPartitionAssignments {
localPartiton := topic.FromPbBrokerPartitionAssignment(self, brokerPartition)
broker.localTopicManager.AddTopicPartition(
topic.FromPbTopic(request.Topic),
localPartiton)
if request.IsLeader {
for _, follower := range localPartiton.FollowerBrokers {
2023-08-28 08:50:59 +08:00
err := pb.WithBrokerGrpcClient(false, follower.String(), broker.grpcDialOption, func(client mq_pb.SeaweedMessagingClient) error {
_, err := client.AssignTopicPartitions(context.Background(), request)
return err
})
if err != nil {
return ret, err
}
}
}
}
return ret, nil
}