seaweedfs/weed/server/volume_grpc_client_to_master.go

201 lines
6.5 KiB
Go
Raw Normal View History

2017-01-10 17:01:12 +08:00
package weed_server
import (
"fmt"
"net"
2019-04-15 14:00:37 +08:00
"time"
"google.golang.org/grpc"
2020-03-04 16:39:47 +08:00
"github.com/chrislusf/seaweedfs/weed/pb"
2019-02-19 04:11:52 +08:00
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"golang.org/x/net/context"
2017-01-10 17:01:12 +08:00
"github.com/chrislusf/seaweedfs/weed/glog"
2018-05-10 14:11:54 +08:00
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
2018-07-04 10:07:55 +08:00
"github.com/chrislusf/seaweedfs/weed/util"
2017-01-10 17:01:12 +08:00
)
func (vs *VolumeServer) GetMaster() string {
return vs.currentMaster
}
2017-01-10 17:01:12 +08:00
func (vs *VolumeServer) heartbeat() {
glog.V(0).Infof("Volume server start with seed master nodes: %v", vs.SeedMasterNodes)
2017-01-10 17:01:12 +08:00
vs.store.SetDataCenter(vs.dataCenter)
vs.store.SetRack(vs.rack)
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.volume")
2019-02-19 04:11:52 +08:00
var err error
var newLeader string
2017-01-10 17:01:12 +08:00
for {
for _, master := range vs.SeedMasterNodes {
if newLeader != "" {
// the new leader may actually is the same master
// need to wait a bit before adding itself
time.Sleep(3 * time.Second)
master = newLeader
}
2020-03-04 16:39:47 +08:00
masterGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(master)
if parseErr != nil {
2019-04-16 16:15:30 +08:00
glog.V(0).Infof("failed to parse master grpc %v: %v", masterGrpcAddress, parseErr)
continue
}
2019-05-28 13:54:58 +08:00
vs.store.MasterAddress = master
2020-02-26 14:23:59 +08:00
newLeader, err = vs.doHeartbeat(master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second)
if err != nil {
glog.V(0).Infof("heartbeat error: %v", err)
time.Sleep(time.Duration(vs.pulseSeconds) * time.Second)
2019-04-16 00:09:46 +08:00
newLeader = ""
2019-05-28 13:54:58 +08:00
vs.store.MasterAddress = ""
}
2017-01-10 17:01:12 +08:00
}
}
}
2020-02-26 14:23:59 +08:00
func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) {
2017-01-10 17:01:12 +08:00
2020-03-04 16:39:47 +08:00
grpcConection, err := pb.GrpcDial(context.Background(), masterGrpcAddress, grpcDialOption)
2017-01-10 17:01:12 +08:00
if err != nil {
2019-01-11 18:23:31 +08:00
return "", fmt.Errorf("fail to dial %s : %v", masterNode, err)
2017-01-10 17:01:12 +08:00
}
defer grpcConection.Close()
2018-05-10 14:11:54 +08:00
client := master_pb.NewSeaweedClient(grpcConection)
2020-02-26 14:23:59 +08:00
stream, err := client.SendHeartbeat(context.Background())
2017-01-10 17:01:12 +08:00
if err != nil {
glog.V(0).Infof("SendHeartbeat to %s: %v", masterNode, err)
return "", err
2017-01-10 17:01:12 +08:00
}
glog.V(0).Infof("Heartbeat to: %v", masterNode)
vs.currentMaster = masterNode
2017-01-10 17:01:12 +08:00
doneChan := make(chan error, 1)
2017-01-10 17:01:12 +08:00
go func() {
for {
in, err := stream.Recv()
if err != nil {
doneChan <- err
2017-01-10 17:01:12 +08:00
return
}
if in.GetVolumeSizeLimit() != 0 && vs.store.GetVolumeSizeLimit() != in.GetVolumeSizeLimit() {
2019-04-15 14:00:37 +08:00
vs.store.SetVolumeSizeLimit(in.GetVolumeSizeLimit())
if vs.store.MaybeAdjustVolumeMax() {
if err = stream.Send(vs.store.CollectHeartbeat()); err != nil {
glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
}
}
}
if in.GetLeader() != "" && masterNode != in.GetLeader() && !isSameIP(in.GetLeader(), masterNode) {
glog.V(0).Infof("Volume Server found a new master newLeader: %v instead of %v", in.GetLeader(), masterNode)
newLeader = in.GetLeader()
doneChan <- nil
return
}
if in.GetMetricsAddress() != "" && vs.MetricsAddress != in.GetMetricsAddress() {
vs.MetricsAddress = in.GetMetricsAddress()
vs.MetricsIntervalSec = int(in.GetMetricsIntervalSeconds())
}
if len(in.StorageBackends) > 0 {
backend.LoadFromPbStorageBackends(in.StorageBackends)
}
2017-01-10 17:01:12 +08:00
}
}()
if err = stream.Send(vs.store.CollectHeartbeat()); err != nil {
glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
return "", err
}
if err = stream.Send(vs.store.CollectErasureCodingHeartbeat()); err != nil {
glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
return "", err
}
volumeTickChan := time.Tick(sleepInterval)
ecShardTickChan := time.Tick(17 * sleepInterval)
2017-01-10 17:01:12 +08:00
for {
select {
2019-04-21 02:35:20 +08:00
case volumeMessage := <-vs.store.NewVolumesChan:
deltaBeat := &master_pb.Heartbeat{
2019-04-30 11:22:19 +08:00
NewVolumes: []*master_pb.VolumeShortInformationMessage{
2019-04-21 02:35:20 +08:00
&volumeMessage,
},
}
2019-04-21 02:35:20 +08:00
glog.V(1).Infof("volume server %s:%d adds volume %d", vs.store.Ip, vs.store.Port, volumeMessage.Id)
if err = stream.Send(deltaBeat); err != nil {
glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err)
return "", err
}
case ecShardMessage := <-vs.store.NewEcShardsChan:
deltaBeat := &master_pb.Heartbeat{
NewEcShards: []*master_pb.VolumeEcShardInformationMessage{
&ecShardMessage,
},
}
glog.V(1).Infof("volume server %s:%d adds ec shard %d:%d", vs.store.Ip, vs.store.Port, ecShardMessage.Id,
erasure_coding.ShardBits(ecShardMessage.EcIndexBits).ShardIds())
if err = stream.Send(deltaBeat); err != nil {
glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err)
return "", err
}
2019-04-21 02:35:20 +08:00
case volumeMessage := <-vs.store.DeletedVolumesChan:
deltaBeat := &master_pb.Heartbeat{
2019-04-30 11:22:19 +08:00
DeletedVolumes: []*master_pb.VolumeShortInformationMessage{
2019-04-21 02:35:20 +08:00
&volumeMessage,
},
}
2019-04-21 02:35:20 +08:00
glog.V(1).Infof("volume server %s:%d deletes volume %d", vs.store.Ip, vs.store.Port, volumeMessage.Id)
if err = stream.Send(deltaBeat); err != nil {
glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err)
return "", err
}
case ecShardMessage := <-vs.store.DeletedEcShardsChan:
deltaBeat := &master_pb.Heartbeat{
DeletedEcShards: []*master_pb.VolumeEcShardInformationMessage{
&ecShardMessage,
},
}
glog.V(1).Infof("volume server %s:%d deletes ec shard %d:%d", vs.store.Ip, vs.store.Port, ecShardMessage.Id,
erasure_coding.ShardBits(ecShardMessage.EcIndexBits).ShardIds())
if err = stream.Send(deltaBeat); err != nil {
glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err)
return "", err
}
case <-volumeTickChan:
2019-04-27 00:32:07 +08:00
glog.V(4).Infof("volume server %s:%d heartbeat", vs.store.Ip, vs.store.Port)
if err = stream.Send(vs.store.CollectHeartbeat()); err != nil {
glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
return "", err
}
case <-ecShardTickChan:
glog.V(4).Infof("volume server %s:%d ec heartbeat", vs.store.Ip, vs.store.Port)
if err = stream.Send(vs.store.CollectErasureCodingHeartbeat()); err != nil {
glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
return "", err
}
case err = <-doneChan:
return
2017-01-10 17:01:12 +08:00
}
}
}
func isSameIP(ip string, host string) bool {
ips, err := net.LookupIP(host)
if err != nil {
return false
}
for _, t := range ips {
if ip == t.String() {
return true
}
}
return false
}