2014-04-13 16:29:52 +08:00
|
|
|
package topology
|
2012-08-28 04:52:02 +08:00
|
|
|
|
|
|
|
import (
|
2021-02-19 11:10:20 +08:00
|
|
|
"encoding/json"
|
2014-03-14 03:13:39 +08:00
|
|
|
"fmt"
|
2022-07-29 15:17:28 +08:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
2013-02-27 14:54:22 +08:00
|
|
|
"math/rand"
|
2013-01-17 16:56:56 +08:00
|
|
|
"sync"
|
2023-06-07 00:08:27 +08:00
|
|
|
"time"
|
2014-10-27 02:34:55 +08:00
|
|
|
|
2019-04-19 12:43:36 +08:00
|
|
|
"google.golang.org/grpc"
|
|
|
|
|
2022-07-29 15:17:28 +08:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/storage"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
2012-08-28 04:52:02 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
/*
|
|
|
|
This package is created to resolve these replica placement issues:
|
|
|
|
1. growth factor for each replica level, e.g., add 10 volumes for 1 copy, 20 volumes for 2 copies, 30 volumes for 3 copies
|
|
|
|
2. in time of tight storage, how to reduce replica level
|
|
|
|
3. optimizing for hot data on faster disk, cold data on cheaper storage,
|
|
|
|
4. volume allocation for each bucket
|
|
|
|
*/
|
|
|
|
|
2021-05-06 18:46:14 +08:00
|
|
|
type VolumeGrowRequest struct {
|
|
|
|
Option *VolumeGrowOption
|
|
|
|
Count int
|
|
|
|
}
|
|
|
|
|
2024-04-19 00:09:45 +08:00
|
|
|
type volumeGrowthStrategy struct {
|
|
|
|
Copy1Count int
|
|
|
|
Copy2Count int
|
|
|
|
Copy3Count int
|
|
|
|
CopyOtherCount int
|
|
|
|
Threshold float64
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
VolumeGrowStrategy = volumeGrowthStrategy{
|
|
|
|
Copy1Count: 7,
|
|
|
|
Copy2Count: 6,
|
|
|
|
Copy3Count: 3,
|
|
|
|
CopyOtherCount: 1,
|
|
|
|
Threshold: 0.9,
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2014-04-13 16:29:52 +08:00
|
|
|
type VolumeGrowOption struct {
|
2021-02-19 11:10:20 +08:00
|
|
|
Collection string `json:"collection,omitempty"`
|
|
|
|
ReplicaPlacement *super_block.ReplicaPlacement `json:"replication,omitempty"`
|
|
|
|
Ttl *needle.TTL `json:"ttl,omitempty"`
|
|
|
|
DiskType types.DiskType `json:"disk,omitempty"`
|
2021-05-06 18:46:14 +08:00
|
|
|
Preallocate int64 `json:"preallocate,omitempty"`
|
2021-02-19 11:10:20 +08:00
|
|
|
DataCenter string `json:"dataCenter,omitempty"`
|
|
|
|
Rack string `json:"rack,omitempty"`
|
|
|
|
DataNode string `json:"dataNode,omitempty"`
|
|
|
|
MemoryMapMaxSizeMb uint32 `json:"memoryMapMaxSizeMb,omitempty"`
|
2014-04-13 16:29:52 +08:00
|
|
|
}
|
|
|
|
|
2012-08-28 04:52:02 +08:00
|
|
|
type VolumeGrowth struct {
|
2013-01-17 16:56:56 +08:00
|
|
|
accessLock sync.Mutex
|
2012-08-28 04:52:02 +08:00
|
|
|
}
|
|
|
|
|
2015-01-08 15:54:50 +08:00
|
|
|
func (o *VolumeGrowOption) String() string {
|
2021-02-19 11:10:20 +08:00
|
|
|
blob, _ := json.Marshal(o)
|
|
|
|
return string(blob)
|
2015-01-08 15:54:50 +08:00
|
|
|
}
|
|
|
|
|
2012-09-17 08:31:15 +08:00
|
|
|
func NewDefaultVolumeGrowth() *VolumeGrowth {
|
2014-03-03 14:16:54 +08:00
|
|
|
return &VolumeGrowth{}
|
2012-09-17 08:31:15 +08:00
|
|
|
}
|
|
|
|
|
2014-03-03 14:16:54 +08:00
|
|
|
// one replication type may need rp.GetCopyCount() actual volumes
|
|
|
|
// given copyCount, how many logical volumes to create
|
|
|
|
func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) {
|
|
|
|
switch copyCount {
|
2024-04-19 00:09:45 +08:00
|
|
|
case 1: count = VolumeGrowStrategy.Copy1Count
|
2014-03-03 14:16:54 +08:00
|
|
|
case 2:
|
2024-04-19 00:09:45 +08:00
|
|
|
count = VolumeGrowStrategy.Copy2Count
|
2014-03-03 14:16:54 +08:00
|
|
|
case 3:
|
2024-04-19 00:09:45 +08:00
|
|
|
count = VolumeGrowStrategy.Copy3Count
|
2013-07-25 01:31:51 +08:00
|
|
|
default:
|
2024-04-19 00:09:45 +08:00
|
|
|
count = VolumeGrowStrategy.CopyOtherCount
|
2012-09-17 08:31:15 +08:00
|
|
|
}
|
2014-03-03 14:16:54 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-06-23 15:41:33 +08:00
|
|
|
func (vg *VolumeGrowth) AutomaticGrowByType(option *VolumeGrowOption, grpcDialOption grpc.DialOption, topo *Topology, targetCount int) (result []*master_pb.VolumeLocation, err error) {
|
2019-11-10 20:11:03 +08:00
|
|
|
if targetCount == 0 {
|
|
|
|
targetCount = vg.findVolumeCount(option.ReplicaPlacement.GetCopyCount())
|
|
|
|
}
|
2022-06-23 15:41:33 +08:00
|
|
|
result, err = vg.GrowByCountAndType(grpcDialOption, targetCount, option, topo)
|
|
|
|
if len(result) > 0 && len(result)%option.ReplicaPlacement.GetCopyCount() == 0 {
|
|
|
|
return result, nil
|
2013-07-25 01:31:51 +08:00
|
|
|
}
|
2022-06-23 15:41:33 +08:00
|
|
|
return result, err
|
2012-09-17 08:31:15 +08:00
|
|
|
}
|
2022-06-23 15:41:33 +08:00
|
|
|
func (vg *VolumeGrowth) GrowByCountAndType(grpcDialOption grpc.DialOption, targetCount int, option *VolumeGrowOption, topo *Topology) (result []*master_pb.VolumeLocation, err error) {
|
2013-01-17 16:56:56 +08:00
|
|
|
vg.accessLock.Lock()
|
|
|
|
defer vg.accessLock.Unlock()
|
2012-11-14 04:13:40 +08:00
|
|
|
|
2014-03-03 14:16:54 +08:00
|
|
|
for i := 0; i < targetCount; i++ {
|
2022-06-23 15:41:33 +08:00
|
|
|
if res, e := vg.findAndGrow(grpcDialOption, topo, option); e == nil {
|
|
|
|
result = append(result, res...)
|
2014-03-03 14:16:54 +08:00
|
|
|
} else {
|
2022-06-23 15:41:33 +08:00
|
|
|
glog.V(0).Infof("create %d volume, created %d: %v", targetCount, len(result), e)
|
|
|
|
return result, e
|
2012-09-30 17:20:33 +08:00
|
|
|
}
|
2014-03-03 14:16:54 +08:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-06-23 15:41:33 +08:00
|
|
|
func (vg *VolumeGrowth) findAndGrow(grpcDialOption grpc.DialOption, topo *Topology, option *VolumeGrowOption) (result []*master_pb.VolumeLocation, err error) {
|
2014-04-13 16:29:52 +08:00
|
|
|
servers, e := vg.findEmptySlotsForOneVolume(topo, option)
|
2014-03-03 14:16:54 +08:00
|
|
|
if e != nil {
|
2022-06-23 15:41:33 +08:00
|
|
|
return nil, e
|
2014-03-03 14:16:54 +08:00
|
|
|
}
|
2019-02-25 10:47:41 +08:00
|
|
|
vid, raftErr := topo.NextVolumeId()
|
|
|
|
if raftErr != nil {
|
2022-06-23 15:41:33 +08:00
|
|
|
return nil, raftErr
|
2019-02-25 10:47:41 +08:00
|
|
|
}
|
2022-06-23 15:41:33 +08:00
|
|
|
if err = vg.grow(grpcDialOption, topo, vid, option, servers...); err == nil {
|
|
|
|
for _, server := range servers {
|
|
|
|
result = append(result, &master_pb.VolumeLocation{
|
2022-08-05 08:35:00 +08:00
|
|
|
Url: server.Url(),
|
|
|
|
PublicUrl: server.PublicUrl,
|
|
|
|
DataCenter: server.GetDataCenterId(),
|
|
|
|
NewVids: []uint32{uint32(vid)},
|
2022-06-23 15:41:33 +08:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
2014-03-03 14:16:54 +08:00
|
|
|
}
|
|
|
|
|
2014-04-13 18:06:58 +08:00
|
|
|
// 1. find the main data node
|
|
|
|
// 1.1 collect all data nodes that have 1 slots
|
|
|
|
// 2.2 collect all racks that have rp.SameRackCount+1
|
|
|
|
// 2.2 collect all data centers that have DiffRackCount+rp.SameRackCount+1
|
|
|
|
// 2. find rest data nodes
|
2014-04-13 16:29:52 +08:00
|
|
|
func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *VolumeGrowOption) (servers []*DataNode, err error) {
|
2014-03-03 14:16:54 +08:00
|
|
|
//find main datacenter and other data centers
|
2014-04-13 16:29:52 +08:00
|
|
|
rp := option.ReplicaPlacement
|
2020-12-18 05:25:05 +08:00
|
|
|
mainDataCenter, otherDataCenters, dc_err := topo.PickNodesByWeight(rp.DiffDataCenterCount+1, option, func(node Node) error {
|
2014-04-13 16:29:52 +08:00
|
|
|
if option.DataCenter != "" && node.IsDataCenter() && node.Id() != NodeId(option.DataCenter) {
|
|
|
|
return fmt.Errorf("Not matching preferred data center:%s", option.DataCenter)
|
2012-09-03 16:50:04 +08:00
|
|
|
}
|
2014-04-13 17:16:45 +08:00
|
|
|
if len(node.Children()) < rp.DiffRackCount+1 {
|
|
|
|
return fmt.Errorf("Only has %d racks, not enough for %d.", len(node.Children()), rp.DiffRackCount+1)
|
|
|
|
}
|
2020-12-18 05:25:05 +08:00
|
|
|
if node.AvailableSpaceFor(option) < int64(rp.DiffRackCount+rp.SameRackCount+1) {
|
|
|
|
return fmt.Errorf("Free:%d < Expected:%d", node.AvailableSpaceFor(option), rp.DiffRackCount+rp.SameRackCount+1)
|
2014-03-14 03:13:39 +08:00
|
|
|
}
|
2014-04-13 18:06:58 +08:00
|
|
|
possibleRacksCount := 0
|
|
|
|
for _, rack := range node.Children() {
|
|
|
|
possibleDataNodesCount := 0
|
|
|
|
for _, n := range rack.Children() {
|
2020-12-18 05:25:05 +08:00
|
|
|
if n.AvailableSpaceFor(option) >= 1 {
|
2014-04-13 18:06:58 +08:00
|
|
|
possibleDataNodesCount++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if possibleDataNodesCount >= rp.SameRackCount+1 {
|
|
|
|
possibleRacksCount++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if possibleRacksCount < rp.DiffRackCount+1 {
|
|
|
|
return fmt.Errorf("Only has %d racks with more than %d free data nodes, not enough for %d.", possibleRacksCount, rp.SameRackCount+1, rp.DiffRackCount+1)
|
|
|
|
}
|
2014-03-14 03:13:39 +08:00
|
|
|
return nil
|
2014-03-03 14:16:54 +08:00
|
|
|
})
|
|
|
|
if dc_err != nil {
|
|
|
|
return nil, dc_err
|
|
|
|
}
|
|
|
|
|
|
|
|
//find main rack and other racks
|
2020-12-18 05:25:05 +08:00
|
|
|
mainRack, otherRacks, rackErr := mainDataCenter.(*DataCenter).PickNodesByWeight(rp.DiffRackCount+1, option, func(node Node) error {
|
2014-04-13 16:29:52 +08:00
|
|
|
if option.Rack != "" && node.IsRack() && node.Id() != NodeId(option.Rack) {
|
|
|
|
return fmt.Errorf("Not matching preferred rack:%s", option.Rack)
|
|
|
|
}
|
2020-12-18 05:25:05 +08:00
|
|
|
if node.AvailableSpaceFor(option) < int64(rp.SameRackCount+1) {
|
|
|
|
return fmt.Errorf("Free:%d < Expected:%d", node.AvailableSpaceFor(option), rp.SameRackCount+1)
|
2014-04-13 17:26:22 +08:00
|
|
|
}
|
2014-04-13 17:16:45 +08:00
|
|
|
if len(node.Children()) < rp.SameRackCount+1 {
|
2014-04-13 18:06:58 +08:00
|
|
|
// a bit faster way to test free racks
|
2014-04-13 17:16:45 +08:00
|
|
|
return fmt.Errorf("Only has %d data nodes, not enough for %d.", len(node.Children()), rp.SameRackCount+1)
|
|
|
|
}
|
2014-04-13 18:06:58 +08:00
|
|
|
possibleDataNodesCount := 0
|
2014-04-13 17:26:22 +08:00
|
|
|
for _, n := range node.Children() {
|
2020-12-18 05:25:05 +08:00
|
|
|
if n.AvailableSpaceFor(option) >= 1 {
|
2014-04-13 18:06:58 +08:00
|
|
|
possibleDataNodesCount++
|
2014-04-13 17:26:22 +08:00
|
|
|
}
|
|
|
|
}
|
2014-04-13 18:06:58 +08:00
|
|
|
if possibleDataNodesCount < rp.SameRackCount+1 {
|
|
|
|
return fmt.Errorf("Only has %d data nodes with a slot, not enough for %d.", possibleDataNodesCount, rp.SameRackCount+1)
|
2014-03-14 03:13:39 +08:00
|
|
|
}
|
|
|
|
return nil
|
2014-03-03 14:16:54 +08:00
|
|
|
})
|
2019-01-17 09:17:19 +08:00
|
|
|
if rackErr != nil {
|
|
|
|
return nil, rackErr
|
2014-03-03 14:16:54 +08:00
|
|
|
}
|
|
|
|
|
2022-05-09 14:21:16 +08:00
|
|
|
//find main server and other servers
|
2020-12-18 05:25:05 +08:00
|
|
|
mainServer, otherServers, serverErr := mainRack.(*Rack).PickNodesByWeight(rp.SameRackCount+1, option, func(node Node) error {
|
2014-04-13 16:29:52 +08:00
|
|
|
if option.DataNode != "" && node.IsDataNode() && node.Id() != NodeId(option.DataNode) {
|
|
|
|
return fmt.Errorf("Not matching preferred data node:%s", option.DataNode)
|
|
|
|
}
|
2020-12-18 05:25:05 +08:00
|
|
|
if node.AvailableSpaceFor(option) < 1 {
|
|
|
|
return fmt.Errorf("Free:%d < Expected:%d", node.AvailableSpaceFor(option), 1)
|
2014-03-14 03:13:39 +08:00
|
|
|
}
|
|
|
|
return nil
|
2014-03-03 14:16:54 +08:00
|
|
|
})
|
2019-01-17 09:17:19 +08:00
|
|
|
if serverErr != nil {
|
|
|
|
return nil, serverErr
|
2014-03-03 14:16:54 +08:00
|
|
|
}
|
|
|
|
|
2014-04-13 16:29:52 +08:00
|
|
|
servers = append(servers, mainServer.(*DataNode))
|
2014-03-03 14:16:54 +08:00
|
|
|
for _, server := range otherServers {
|
2014-04-13 16:29:52 +08:00
|
|
|
servers = append(servers, server.(*DataNode))
|
2014-03-03 14:16:54 +08:00
|
|
|
}
|
|
|
|
for _, rack := range otherRacks {
|
2020-12-18 05:25:05 +08:00
|
|
|
r := rand.Int63n(rack.AvailableSpaceFor(option))
|
|
|
|
if server, e := rack.ReserveOneVolume(r, option); e == nil {
|
2014-03-03 14:16:54 +08:00
|
|
|
servers = append(servers, server)
|
|
|
|
} else {
|
|
|
|
return servers, e
|
2012-09-03 16:50:04 +08:00
|
|
|
}
|
2014-03-03 14:16:54 +08:00
|
|
|
}
|
|
|
|
for _, datacenter := range otherDataCenters {
|
2020-12-18 05:25:05 +08:00
|
|
|
r := rand.Int63n(datacenter.AvailableSpaceFor(option))
|
|
|
|
if server, e := datacenter.ReserveOneVolume(r, option); e == nil {
|
2014-03-03 14:16:54 +08:00
|
|
|
servers = append(servers, server)
|
|
|
|
} else {
|
|
|
|
return servers, e
|
2012-09-03 16:50:04 +08:00
|
|
|
}
|
2012-08-28 04:52:02 +08:00
|
|
|
}
|
2012-09-17 14:18:47 +08:00
|
|
|
return
|
2012-08-28 04:52:02 +08:00
|
|
|
}
|
2014-03-03 14:16:54 +08:00
|
|
|
|
2022-10-27 03:32:24 +08:00
|
|
|
func (vg *VolumeGrowth) grow(grpcDialOption grpc.DialOption, topo *Topology, vid needle.VolumeId, option *VolumeGrowOption, servers ...*DataNode) (growErr error) {
|
|
|
|
var createdVolumes []storage.VolumeInfo
|
2012-09-03 16:50:04 +08:00
|
|
|
for _, server := range servers {
|
2019-02-19 04:11:52 +08:00
|
|
|
if err := AllocateVolume(server, grpcDialOption, vid, option); err == nil {
|
2022-10-27 03:32:24 +08:00
|
|
|
createdVolumes = append(createdVolumes, storage.VolumeInfo{
|
2014-09-21 03:38:59 +08:00
|
|
|
Id: vid,
|
|
|
|
Size: 0,
|
|
|
|
Collection: option.Collection,
|
|
|
|
ReplicaPlacement: option.ReplicaPlacement,
|
|
|
|
Ttl: option.Ttl,
|
2019-04-19 12:43:36 +08:00
|
|
|
Version: needle.CurrentVersion,
|
2021-08-11 04:04:33 +08:00
|
|
|
DiskType: option.DiskType.String(),
|
2023-06-07 00:08:27 +08:00
|
|
|
ModifiedAtSecond: time.Now().Unix(),
|
2022-10-27 03:32:24 +08:00
|
|
|
})
|
|
|
|
glog.V(0).Infof("Created Volume %d on %s", vid, server.NodeImpl.String())
|
|
|
|
} else {
|
|
|
|
glog.Warningf("Failed to assign volume %d on %s: %v", vid, server.NodeImpl.String(), err)
|
|
|
|
growErr = fmt.Errorf("failed to assign volume %d on %s: %v", vid, server.NodeImpl.String(), err)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if growErr == nil {
|
|
|
|
for i, vi := range createdVolumes {
|
|
|
|
server := servers[i]
|
2012-09-17 08:31:15 +08:00
|
|
|
server.AddOrUpdateVolume(vi)
|
2014-03-19 19:48:13 +08:00
|
|
|
topo.RegisterVolumeLayout(vi, server)
|
2022-10-27 03:32:24 +08:00
|
|
|
glog.V(0).Infof("Registered Volume %d on %s", vid, server.NodeImpl.String())
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// cleaning up created volume replicas
|
|
|
|
for i, vi := range createdVolumes {
|
|
|
|
server := servers[i]
|
|
|
|
if err := DeleteVolume(server, grpcDialOption, vi.Id); err != nil {
|
|
|
|
glog.Warningf("Failed to clean up volume %d on %s", vid, server.NodeImpl.String())
|
|
|
|
}
|
2012-09-17 08:31:15 +08:00
|
|
|
}
|
2012-09-03 16:50:04 +08:00
|
|
|
}
|
2022-10-27 03:32:24 +08:00
|
|
|
|
|
|
|
return growErr
|
2012-09-03 16:50:04 +08:00
|
|
|
}
|