2019-05-23 13:44:28 +08:00
|
|
|
package topology
|
|
|
|
|
|
|
|
import (
|
2022-07-29 15:17:28 +08:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
2019-05-23 13:44:28 +08:00
|
|
|
)
|
|
|
|
|
2019-05-23 15:42:28 +08:00
|
|
|
func (dn *DataNode) GetEcShards() (ret []*erasure_coding.EcVolumeInfo) {
|
2019-05-23 13:44:28 +08:00
|
|
|
dn.RLock()
|
2021-02-16 18:47:02 +08:00
|
|
|
for _, c := range dn.children {
|
|
|
|
disk := c.(*Disk)
|
|
|
|
ret = append(ret, disk.GetEcShards()...)
|
2019-05-23 13:44:28 +08:00
|
|
|
}
|
|
|
|
dn.RUnlock()
|
|
|
|
return ret
|
|
|
|
}
|
2019-05-23 15:42:28 +08:00
|
|
|
|
|
|
|
func (dn *DataNode) UpdateEcShards(actualShards []*erasure_coding.EcVolumeInfo) (newShards, deletedShards []*erasure_coding.EcVolumeInfo) {
|
2019-05-24 13:51:18 +08:00
|
|
|
// prepare the new ec shard map
|
|
|
|
actualEcShardMap := make(map[needle.VolumeId]*erasure_coding.EcVolumeInfo)
|
|
|
|
for _, ecShards := range actualShards {
|
2019-05-24 14:34:29 +08:00
|
|
|
actualEcShardMap[ecShards.VolumeId] = ecShards
|
2019-05-24 13:51:18 +08:00
|
|
|
}
|
|
|
|
|
2021-02-16 18:47:02 +08:00
|
|
|
existingEcShards := dn.GetEcShards()
|
|
|
|
|
2021-03-05 19:50:58 +08:00
|
|
|
// find out the newShards and deletedShards
|
2021-02-16 18:47:02 +08:00
|
|
|
for _, ecShards := range existingEcShards {
|
|
|
|
|
2024-10-11 01:00:30 +08:00
|
|
|
var newShardCount, deletedShardCount int
|
2021-02-16 18:47:02 +08:00
|
|
|
disk := dn.getOrCreateDisk(ecShards.DiskType)
|
|
|
|
|
|
|
|
vid := ecShards.VolumeId
|
2019-05-24 13:51:18 +08:00
|
|
|
if actualEcShards, ok := actualEcShardMap[vid]; !ok {
|
|
|
|
// dn registered ec shards not found in the new set of ec shards
|
|
|
|
deletedShards = append(deletedShards, ecShards)
|
2019-06-05 16:58:37 +08:00
|
|
|
deletedShardCount += ecShards.ShardIdCount()
|
2019-05-24 13:51:18 +08:00
|
|
|
} else {
|
|
|
|
// found, but maybe the actual shard could be missing
|
|
|
|
a := actualEcShards.Minus(ecShards)
|
2019-05-25 17:02:44 +08:00
|
|
|
if a.ShardIdCount() > 0 {
|
2019-05-24 13:51:18 +08:00
|
|
|
newShards = append(newShards, a)
|
2019-06-05 16:58:37 +08:00
|
|
|
newShardCount += a.ShardIdCount()
|
2019-05-24 13:51:18 +08:00
|
|
|
}
|
|
|
|
d := ecShards.Minus(actualEcShards)
|
2019-05-25 17:02:44 +08:00
|
|
|
if d.ShardIdCount() > 0 {
|
2019-05-24 13:51:18 +08:00
|
|
|
deletedShards = append(deletedShards, d)
|
2019-06-05 16:58:37 +08:00
|
|
|
deletedShardCount += d.ShardIdCount()
|
2019-05-24 13:51:18 +08:00
|
|
|
}
|
|
|
|
}
|
2021-02-16 18:47:02 +08:00
|
|
|
|
2024-10-11 01:00:30 +08:00
|
|
|
if (newShardCount - deletedShardCount) != 0 {
|
|
|
|
disk.UpAdjustDiskUsageDelta(types.ToDiskType(ecShards.DiskType), &DiskUsageCounts{
|
|
|
|
ecShardCount: int64(newShardCount - deletedShardCount),
|
|
|
|
})
|
|
|
|
}
|
2021-02-16 18:47:02 +08:00
|
|
|
|
2019-05-24 13:51:18 +08:00
|
|
|
}
|
2021-03-05 19:50:58 +08:00
|
|
|
|
2019-05-24 13:51:18 +08:00
|
|
|
for _, ecShards := range actualShards {
|
2022-04-06 10:03:02 +08:00
|
|
|
if dn.HasEcShards(ecShards.VolumeId) {
|
2021-03-05 19:50:58 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
newShards = append(newShards, ecShards)
|
2021-02-16 18:47:02 +08:00
|
|
|
|
|
|
|
disk := dn.getOrCreateDisk(ecShards.DiskType)
|
2024-10-11 01:00:30 +08:00
|
|
|
disk.UpAdjustDiskUsageDelta(types.ToDiskType(ecShards.DiskType), &DiskUsageCounts{
|
|
|
|
ecShardCount: int64(ecShards.ShardIdCount()),
|
|
|
|
})
|
2019-05-24 13:51:18 +08:00
|
|
|
}
|
|
|
|
|
2019-05-24 14:34:29 +08:00
|
|
|
if len(newShards) > 0 || len(deletedShards) > 0 {
|
2019-05-24 13:51:18 +08:00
|
|
|
// if changed, set to the new ec shard map
|
2021-02-16 18:47:02 +08:00
|
|
|
dn.doUpdateEcShards(actualShards)
|
2019-05-24 13:51:18 +08:00
|
|
|
}
|
|
|
|
|
2019-05-23 15:42:28 +08:00
|
|
|
return
|
|
|
|
}
|
2019-05-26 15:21:17 +08:00
|
|
|
|
2022-04-06 10:03:02 +08:00
|
|
|
func (dn *DataNode) HasEcShards(volumeId needle.VolumeId) (found bool) {
|
2021-02-16 18:47:02 +08:00
|
|
|
dn.RLock()
|
|
|
|
defer dn.RUnlock()
|
|
|
|
for _, c := range dn.children {
|
|
|
|
disk := c.(*Disk)
|
|
|
|
_, found = disk.ecShards[volumeId]
|
|
|
|
if found {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (dn *DataNode) doUpdateEcShards(actualShards []*erasure_coding.EcVolumeInfo) {
|
|
|
|
dn.Lock()
|
|
|
|
for _, c := range dn.children {
|
|
|
|
disk := c.(*Disk)
|
|
|
|
disk.ecShards = make(map[needle.VolumeId]*erasure_coding.EcVolumeInfo)
|
|
|
|
}
|
|
|
|
for _, shard := range actualShards {
|
|
|
|
disk := dn.getOrCreateDisk(shard.DiskType)
|
|
|
|
disk.ecShards[shard.VolumeId] = shard
|
|
|
|
}
|
|
|
|
dn.Unlock()
|
|
|
|
}
|
|
|
|
|
2019-05-26 15:21:17 +08:00
|
|
|
func (dn *DataNode) DeltaUpdateEcShards(newShards, deletedShards []*erasure_coding.EcVolumeInfo) {
|
|
|
|
|
|
|
|
for _, newShard := range newShards {
|
|
|
|
dn.AddOrUpdateEcShard(newShard)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, deletedShard := range deletedShards {
|
|
|
|
dn.DeleteEcShard(deletedShard)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (dn *DataNode) AddOrUpdateEcShard(s *erasure_coding.EcVolumeInfo) {
|
2021-02-16 18:47:02 +08:00
|
|
|
disk := dn.getOrCreateDisk(s.DiskType)
|
|
|
|
disk.AddOrUpdateEcShard(s)
|
2019-05-26 15:21:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (dn *DataNode) DeleteEcShard(s *erasure_coding.EcVolumeInfo) {
|
2021-02-16 18:47:02 +08:00
|
|
|
disk := dn.getOrCreateDisk(s.DiskType)
|
|
|
|
disk.DeleteEcShard(s)
|
2019-05-26 15:21:17 +08:00
|
|
|
}
|
2019-05-26 15:49:15 +08:00
|
|
|
|
2021-02-16 18:47:02 +08:00
|
|
|
func (dn *DataNode) HasVolumesById(volumeId needle.VolumeId) (hasVolumeId bool) {
|
2019-05-26 15:49:15 +08:00
|
|
|
|
|
|
|
dn.RLock()
|
2021-02-16 18:47:02 +08:00
|
|
|
defer dn.RUnlock()
|
|
|
|
for _, c := range dn.children {
|
|
|
|
disk := c.(*Disk)
|
|
|
|
if disk.HasVolumesById(volumeId) {
|
|
|
|
return true
|
|
|
|
}
|
2019-05-26 15:49:15 +08:00
|
|
|
}
|
2021-02-16 18:47:02 +08:00
|
|
|
return false
|
2019-05-26 15:49:15 +08:00
|
|
|
|
|
|
|
}
|