seaweedfs/weed/notification/kafka/kafka_queue.go

83 lines
2.1 KiB
Go
Raw Normal View History

2018-08-13 16:20:49 +08:00
package kafka
import (
"github.com/Shopify/sarama"
"github.com/chrislusf/seaweedfs/weed/glog"
2018-09-16 16:18:30 +08:00
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/util"
2018-08-20 06:18:37 +08:00
"github.com/golang/protobuf/proto"
2018-08-13 16:20:49 +08:00
)
func init() {
2018-09-16 16:18:30 +08:00
notification.MessageQueues = append(notification.MessageQueues, &KafkaQueue{})
2018-08-13 16:20:49 +08:00
}
type KafkaQueue struct {
topic string
producer sarama.AsyncProducer
}
func (k *KafkaQueue) GetName() string {
return "kafka"
}
func (k *KafkaQueue) Initialize(configuration util.Configuration) (err error) {
2018-09-17 02:20:08 +08:00
glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice("hosts"))
glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString("topic"))
2018-08-13 16:20:49 +08:00
return k.initialize(
configuration.GetStringSlice("hosts"),
configuration.GetString("topic"),
)
}
func (k *KafkaQueue) initialize(hosts []string, topic string) (err error) {
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForLocal
config.Producer.Partitioner = sarama.NewHashPartitioner
config.Producer.Return.Successes = true
config.Producer.Return.Errors = true
k.producer, err = sarama.NewAsyncProducer(hosts, config)
2018-10-14 14:30:00 +08:00
if err != nil {
return err
}
2018-08-19 16:27:30 +08:00
k.topic = topic
2018-08-13 16:20:49 +08:00
go k.handleSuccess()
go k.handleError()
return nil
}
func (k *KafkaQueue) SendMessage(key string, message proto.Message) (err error) {
bytes, err := proto.Marshal(message)
if err != nil {
return
}
msg := &sarama.ProducerMessage{
Topic: k.topic,
Key: sarama.StringEncoder(key),
Value: sarama.ByteEncoder(bytes),
}
k.producer.Input() <- msg
return nil
}
func (k *KafkaQueue) handleSuccess() {
for {
pm := <-k.producer.Successes()
if pm != nil {
2018-08-19 16:27:30 +08:00
glog.V(3).Infof("producer message success, partition:%d offset:%d key:%v", pm.Partition, pm.Offset, pm.Key)
2018-08-13 16:20:49 +08:00
}
}
}
func (k *KafkaQueue) handleError() {
for {
err := <-k.producer.Errors()
if err != nil {
2018-08-19 16:27:30 +08:00
glog.Errorf("producer message error, partition:%d offset:%d key:%v valus:%s error(%v) topic:%s", err.Msg.Partition, err.Msg.Offset, err.Msg.Key, err.Msg.Value, err.Err, k.topic)
2018-08-13 16:20:49 +08:00
}
}
}