fix naming convention

notify volume server of duplicate directoris
improve searching efficiency
This commit is contained in:
guol-fnst 2022-05-17 14:51:01 +08:00 committed by guol-fnst
parent 076595fbdd
commit b12944f9c6
7 changed files with 648 additions and 637 deletions

View File

@ -70,7 +70,7 @@ message Heartbeat {
map<string, uint32> max_volume_counts = 4;
uint32 grpc_port = 20;
repeated string LocationUUIDs = 21;
repeated string location_uuids = 21;
}
message HeartbeatResponse {
@ -79,7 +79,7 @@ message HeartbeatResponse {
string metrics_address = 3;
uint32 metrics_interval_seconds = 4;
repeated StorageBackend storage_backends = 5;
bool has_duplicated_directory = 6;
repeated string duplicated_uuids = 6;
}
message VolumeInformationMessage {

File diff suppressed because it is too large Load Diff

View File

@ -22,35 +22,39 @@ import (
"github.com/chrislusf/seaweedfs/weed/topology"
)
func (ms *MasterServer) RegisterUUIDs(heartbeat *master_pb.Heartbeat) error {
ms.Topo.UUIDAccessLock.Lock()
defer ms.Topo.UUIDAccessLock.Unlock()
func (ms *MasterServer) RegisterUuids(heartbeat *master_pb.Heartbeat) (duplicated_uuids []string, err error) {
ms.Topo.UuidAccessLock.Lock()
defer ms.Topo.UuidAccessLock.Unlock()
key := fmt.Sprintf("%s:%d", heartbeat.Ip, heartbeat.Port)
if ms.Topo.UUIDMap == nil {
ms.Topo.UUIDMap = make(map[string][]string)
if ms.Topo.UuidMap == nil {
ms.Topo.UuidMap = make(map[string][]string)
}
// find whether new UUID exists
for k, v := range ms.Topo.UUIDMap {
for _, id := range heartbeat.LocationUUIDs {
sort.Strings(v)
// find whether new uuid exists
for k, v := range ms.Topo.UuidMap {
sort.Strings(v)
for _, id := range heartbeat.LocationUuids {
index := sort.SearchStrings(v, id)
if index < len(v) && v[index] == id {
glog.Error("directory of ", id, " on ", k, " has been loaded")
return errors.New("volume: Duplicated volume directories were loaded")
duplicated_uuids = append(duplicated_uuids, id)
glog.Errorf("directory of %s on %s has been loaded", id, k)
}
}
}
ms.Topo.UUIDMap[key] = heartbeat.LocationUUIDs
glog.V(0).Infof("found new UUID:%v %v , %v", key, heartbeat.LocationUUIDs, ms.Topo.UUIDMap)
return nil
if len(duplicated_uuids) > 0 {
return duplicated_uuids, errors.New("volume: Duplicated volume directories were loaded")
}
ms.Topo.UuidMap[key] = heartbeat.LocationUuids
glog.V(0).Infof("found new uuid:%v %v , %v", key, heartbeat.LocationUuids, ms.Topo.UuidMap)
return nil, nil
}
func (ms *MasterServer) UnRegisterUUIDs(ip string, port int) {
ms.Topo.UUIDAccessLock.Lock()
defer ms.Topo.UUIDAccessLock.Unlock()
func (ms *MasterServer) UnRegisterUuids(ip string, port int) {
ms.Topo.UuidAccessLock.Lock()
defer ms.Topo.UuidAccessLock.Unlock()
key := fmt.Sprintf("%s:%d", ip, port)
delete(ms.Topo.UUIDMap, key)
glog.V(0).Infof("remove volume server %v, online volume server: %v", key, ms.Topo.UUIDMap)
delete(ms.Topo.UuidMap, key)
glog.V(0).Infof("remove volume server %v, online volume server: %v", key, ms.Topo.UuidMap)
}
func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServer) error {
@ -67,7 +71,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
// the unregister and register can race with each other
ms.Topo.UnRegisterDataNode(dn)
glog.V(0).Infof("unregister disconnected volume server %s:%d", dn.Ip, dn.Port)
ms.UnRegisterUUIDs(dn.Ip, dn.Port)
ms.UnRegisterUuids(dn.Ip, dn.Port)
message := &master_pb.VolumeLocation{
Url: dn.Url(),
@ -105,11 +109,11 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
dc := ms.Topo.GetOrCreateDataCenter(dcName)
rack := dc.GetOrCreateRack(rackName)
dn = rack.GetOrCreateDataNode(heartbeat.Ip, int(heartbeat.Port), int(heartbeat.GrpcPort), heartbeat.PublicUrl, heartbeat.MaxVolumeCounts)
glog.V(0).Infof("added volume server %d: %v:%d %v", dn.Counter, heartbeat.GetIp(), heartbeat.GetPort(), heartbeat.LocationUUIDs)
err := ms.RegisterUUIDs(heartbeat)
glog.V(0).Infof("added volume server %d: %v:%d %v", dn.Counter, heartbeat.GetIp(), heartbeat.GetPort(), heartbeat.LocationUuids)
uuidlist, err := ms.RegisterUuids(heartbeat)
if err != nil {
if stream_err := stream.Send(&master_pb.HeartbeatResponse{
HasDuplicatedDirectory: true,
DuplicatedUuids: uuidlist,
}); stream_err != nil {
glog.Warningf("SendHeartbeat.Send DuplicatedDirectory response to %s:%d %v", dn.Ip, dn.Port, stream_err)
return stream_err

View File

@ -118,8 +118,16 @@ func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOpti
doneChan <- err
return
}
if in.HasDuplicatedDirectory {
glog.Error("Shut Down Volume Server due to duplicated volume directory")
if len(in.DuplicatedUuids) > 0 {
var duplictedDir []string
for _, loc := range vs.store.Locations {
for _, uuid := range in.DuplicatedUuids {
if uuid == loc.DirectoryUuid {
duplictedDir = append(duplictedDir, loc.Directory)
}
}
}
glog.Errorf("Shut down Volume Server due to duplicated volume directories: %v", duplictedDir)
os.Exit(1)
}
if in.GetVolumeSizeLimit() != 0 && vs.store.GetVolumeSizeLimit() != in.GetVolumeSizeLimit() {

View File

@ -19,7 +19,7 @@ import (
type DiskLocation struct {
Directory string
DirectoryUUID string
DirectoryUuid string
IdxDirectory string
DiskType types.DiskType
MaxVolumeCount int
@ -35,27 +35,27 @@ type DiskLocation struct {
isDiskSpaceLow bool
}
func GenerateDirUUID(dir string) (dirUUIDString string, err error) {
glog.V(1).Infof("Getting UUID of volume directory:%s", dir)
dirUUIDString = ""
func GenerateDirUuid(dir string) (dirUuidString string, err error) {
glog.V(1).Infof("Getting uuid of volume directory:%s", dir)
dirUuidString = ""
fileName := dir + "/vol_dir.uuid"
if !util.FileExists(fileName) {
dirUUID, _ := uuid.NewRandom()
dirUUIDString = dirUUID.String()
writeErr := util.WriteFile(fileName, []byte(dirUUIDString), 0644)
dirUuid, _ := uuid.NewRandom()
dirUuidString = dirUuid.String()
writeErr := util.WriteFile(fileName, []byte(dirUuidString), 0644)
if writeErr != nil {
glog.Warningf("failed to write UUID to %s : %v", fileName, writeErr)
return "", fmt.Errorf("failed to write UUID to %s : %v", fileName, writeErr)
glog.Warningf("failed to write uuid to %s : %v", fileName, writeErr)
return "", fmt.Errorf("failed to write uuid to %s : %v", fileName, writeErr)
}
} else {
uuidData, readErr := os.ReadFile(fileName)
if readErr != nil {
glog.Warningf("failed to read UUID from %s : %v", fileName, readErr)
return "", fmt.Errorf("failed to read UUID from %s : %v", fileName, readErr)
glog.Warningf("failed to read uuid from %s : %v", fileName, readErr)
return "", fmt.Errorf("failed to read uuid from %s : %v", fileName, readErr)
}
dirUUIDString = string(uuidData)
dirUuidString = string(uuidData)
}
return dirUUIDString, nil
return dirUuidString, nil
}
func NewDiskLocation(dir string, maxVolumeCount int, minFreeSpace util.MinFreeSpace, idxDir string, diskType types.DiskType) *DiskLocation {
@ -65,10 +65,10 @@ func NewDiskLocation(dir string, maxVolumeCount int, minFreeSpace util.MinFreeSp
} else {
idxDir = util.ResolvePath(idxDir)
}
dirUUID, _ := GenerateDirUUID(dir)
dirUuid, _ := GenerateDirUuid(dir)
location := &DiskLocation{
Directory: dir,
DirectoryUUID: dirUUID,
DirectoryUuid: dirUuid,
IdxDirectory: idxDir,
DiskType: diskType,
MaxVolumeCount: maxVolumeCount,

View File

@ -301,9 +301,9 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
}
}
var UUIDList []string
var uuidList []string
for _, loc := range s.Locations {
UUIDList = append(UUIDList, loc.DirectoryUUID)
uuidList = append(uuidList, loc.DirectoryUuid)
}
for col, size := range collectionVolumeSize {
@ -327,7 +327,7 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
Rack: s.rack,
Volumes: volumeMessages,
HasNoVolumes: len(volumeMessages) == 0,
LocationUUIDs: UUIDList,
LocationUuids: uuidList,
}
}

View File

@ -45,8 +45,8 @@ type Topology struct {
RaftServer raft.Server
HashicorpRaft *hashicorpRaft.Raft
UUIDAccessLock sync.RWMutex
UUIDMap map[string][]string
UuidAccessLock sync.RWMutex
UuidMap map[string][]string
}
func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, pulse int, replicationAsMin bool) *Topology {