2019-05-30 16:38:59 +08:00
|
|
|
package shell
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"flag"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"math"
|
|
|
|
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
commands = append(commands, &commandEcBalance{})
|
|
|
|
}
|
|
|
|
|
|
|
|
type commandEcBalance struct {
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandEcBalance) Name() string {
|
|
|
|
return "ec.balance"
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandEcBalance) Help() string {
|
|
|
|
return `balance all ec shards among volume servers
|
|
|
|
|
2019-06-05 15:11:37 +08:00
|
|
|
ec.balance [-c EACH_COLLECTION|<collection_name>] [-force] [-dataCenter <data_center>]
|
2019-05-30 16:38:59 +08:00
|
|
|
|
|
|
|
Algorithm:
|
|
|
|
|
|
|
|
For each type of volume server (different max volume count limit){
|
|
|
|
for each collection {
|
|
|
|
balanceEcVolumes()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func balanceEcVolumes(){
|
|
|
|
idealWritableVolumes = totalWritableVolumes / numVolumeServers
|
|
|
|
for {
|
|
|
|
sort all volume servers ordered by the number of local writable volumes
|
|
|
|
pick the volume server A with the lowest number of writable volumes x
|
|
|
|
pick the volume server B with the highest number of writable volumes y
|
|
|
|
if y > idealWritableVolumes and x +1 <= idealWritableVolumes {
|
|
|
|
if B has a writable volume id v that A does not have {
|
|
|
|
move writable volume v from A to B
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandEcBalance) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) {
|
|
|
|
|
|
|
|
balanceCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
2019-06-03 17:26:31 +08:00
|
|
|
collection := balanceCommand.String("collection", "EACH_COLLECTION", "collection name, or \"EACH_COLLECTION\" for each collection")
|
2019-05-30 16:38:59 +08:00
|
|
|
dc := balanceCommand.String("dataCenter", "", "only apply the balancing for this dataCenter")
|
2019-06-03 17:26:31 +08:00
|
|
|
applyBalancing := balanceCommand.Bool("force", false, "apply the balancing plan")
|
2019-05-30 16:38:59 +08:00
|
|
|
if err = balanceCommand.Parse(args); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var resp *master_pb.VolumeListResponse
|
|
|
|
ctx := context.Background()
|
|
|
|
err = commandEnv.masterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
|
|
|
|
resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{})
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
typeToNodes := collectVolumeServersByType(resp.TopologyInfo, *dc)
|
|
|
|
for _, volumeServers := range typeToNodes {
|
|
|
|
|
|
|
|
fmt.Printf("balanceEcVolumes servers %d\n", len(volumeServers))
|
|
|
|
|
|
|
|
if len(volumeServers) < 2 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if *collection == "EACH_COLLECTION" {
|
2019-05-31 00:27:23 +08:00
|
|
|
collections, err := ListCollectionNames(commandEnv, false, true)
|
2019-05-30 16:38:59 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
fmt.Printf("balanceEcVolumes collections %+v\n", len(collections))
|
|
|
|
for _, c := range collections {
|
|
|
|
fmt.Printf("balanceEcVolumes collection %+v\n", c)
|
|
|
|
if err = balanceEcVolumes(commandEnv, c, *applyBalancing); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err = balanceEcVolumes(commandEnv, *collection, *applyBalancing); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func balanceEcVolumes(commandEnv *commandEnv, collection string, applyBalancing bool) error {
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
fmt.Printf("balanceEcVolumes %s\n", collection)
|
|
|
|
|
|
|
|
// collect all ec nodes
|
|
|
|
allEcNodes, totalFreeEcSlots, err := collectEcNodes(ctx, commandEnv)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if totalFreeEcSlots < 1 {
|
|
|
|
return fmt.Errorf("no free ec shard slots. only %d left", totalFreeEcSlots)
|
|
|
|
}
|
|
|
|
|
|
|
|
// vid => []ecNode
|
|
|
|
vidLocations := make(map[needle.VolumeId][]*EcNode)
|
|
|
|
for _, ecNode := range allEcNodes {
|
|
|
|
for _, shardInfo := range ecNode.info.EcShardInfos {
|
|
|
|
vidLocations[needle.VolumeId(shardInfo.Id)] = append(vidLocations[needle.VolumeId(shardInfo.Id)], ecNode)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for vid, locations := range vidLocations {
|
|
|
|
|
2019-06-04 11:25:02 +08:00
|
|
|
if err := doDeduplicateEcShards(ctx, commandEnv, collection, vid, locations, applyBalancing); err != nil {
|
|
|
|
return err
|
2019-05-30 16:38:59 +08:00
|
|
|
}
|
|
|
|
|
2019-06-04 11:25:02 +08:00
|
|
|
if err := doBalanceEcShards(ctx, commandEnv, collection, vid, locations, allEcNodes, applyBalancing); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-05-30 16:38:59 +08:00
|
|
|
|
2019-06-04 11:25:02 +08:00
|
|
|
}
|
2019-05-30 16:38:59 +08:00
|
|
|
|
2019-06-04 11:25:02 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func doBalanceEcShards(ctx context.Context, commandEnv *commandEnv, collection string, vid needle.VolumeId, locations []*EcNode, allEcNodes []*EcNode, applyBalancing bool) error {
|
|
|
|
// collect all ec nodes with at least one free slot
|
|
|
|
var possibleDestinationEcNodes []*EcNode
|
|
|
|
for _, ecNode := range allEcNodes {
|
|
|
|
if ecNode.freeEcSlot > 0 {
|
|
|
|
possibleDestinationEcNodes = append(possibleDestinationEcNodes, ecNode)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// calculate average number of shards an ec node should have for one volume
|
|
|
|
averageShardsPerEcNode := int(math.Ceil(float64(erasure_coding.TotalShardsCount) / float64(len(possibleDestinationEcNodes))))
|
|
|
|
fmt.Printf("vid %d averageShardsPerEcNode %+v\n", vid, averageShardsPerEcNode)
|
|
|
|
// check whether this volume has ecNodes that are over average
|
|
|
|
isOverLimit := false
|
|
|
|
for _, ecNode := range locations {
|
|
|
|
shardBits := findEcVolumeShards(ecNode, vid)
|
|
|
|
if shardBits.ShardIdCount() > averageShardsPerEcNode {
|
|
|
|
isOverLimit = true
|
|
|
|
fmt.Printf("vid %d %s has %d shards, isOverLimit %+v\n", vid, ecNode.info.Id, shardBits.ShardIdCount(), isOverLimit)
|
|
|
|
break
|
2019-05-30 16:38:59 +08:00
|
|
|
}
|
2019-06-04 11:25:02 +08:00
|
|
|
}
|
|
|
|
if isOverLimit {
|
|
|
|
if err := spreadShardsIntoMoreDataNodes(ctx, commandEnv, averageShardsPerEcNode, collection, vid, locations, possibleDestinationEcNodes, applyBalancing); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2019-05-30 16:38:59 +08:00
|
|
|
|
2019-06-04 11:25:02 +08:00
|
|
|
func doDeduplicateEcShards(ctx context.Context, commandEnv *commandEnv, collection string, vid needle.VolumeId, locations []*EcNode, applyBalancing bool) error {
|
2019-05-30 16:38:59 +08:00
|
|
|
|
2019-06-04 11:25:02 +08:00
|
|
|
// check whether this volume has ecNodes that are over average
|
|
|
|
shardToLocations := make([][]*EcNode, erasure_coding.TotalShardsCount)
|
|
|
|
for _, ecNode := range locations {
|
|
|
|
shardBits := findEcVolumeShards(ecNode, vid)
|
|
|
|
for _, shardId := range shardBits.ShardIds() {
|
|
|
|
shardToLocations[shardId] = append(shardToLocations[shardId], ecNode)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for shardId, ecNodes := range shardToLocations {
|
|
|
|
if len(ecNodes) <= 1 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
sortEcNodes(ecNodes)
|
2019-06-04 16:32:36 +08:00
|
|
|
fmt.Printf("ec shard %d.%d has %d copies, keeping %v\n", vid, shardId, len(ecNodes), ecNodes[0].info.Id)
|
2019-06-04 11:25:02 +08:00
|
|
|
if !applyBalancing {
|
|
|
|
continue
|
|
|
|
}
|
2019-06-04 16:32:36 +08:00
|
|
|
|
|
|
|
duplicatedShardIds := []uint32{uint32(shardId)}
|
2019-06-04 11:25:02 +08:00
|
|
|
for _, ecNode := range ecNodes[1:] {
|
|
|
|
if err := unmountEcShards(ctx, commandEnv.option.GrpcDialOption, vid, ecNode.info.Id, duplicatedShardIds); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, vid, ecNode.info.Id, duplicatedShardIds); err != nil {
|
2019-05-30 16:38:59 +08:00
|
|
|
return err
|
|
|
|
}
|
2019-06-04 16:32:36 +08:00
|
|
|
ecNode.freeEcSlot++
|
2019-05-30 16:38:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-01 16:41:22 +08:00
|
|
|
func spreadShardsIntoMoreDataNodes(ctx context.Context, commandEnv *commandEnv, averageShardsPerEcNode int, collection string, vid needle.VolumeId, existingLocations, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error {
|
2019-05-30 16:38:59 +08:00
|
|
|
|
|
|
|
for _, ecNode := range existingLocations {
|
|
|
|
|
|
|
|
shardBits := findEcVolumeShards(ecNode, vid)
|
|
|
|
overLimitCount := shardBits.ShardIdCount() - averageShardsPerEcNode
|
|
|
|
|
|
|
|
for _, shardId := range shardBits.ShardIds() {
|
|
|
|
|
|
|
|
if overLimitCount <= 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Printf("%s has %d overlimit, moving ec shard %d.%d\n", ecNode.info.Id, overLimitCount, vid, shardId)
|
|
|
|
|
2019-06-01 16:41:22 +08:00
|
|
|
err := pickOneEcNodeAndMoveOneShard(ctx, commandEnv, averageShardsPerEcNode, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing)
|
2019-05-30 16:38:59 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
overLimitCount--
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-01 16:41:22 +08:00
|
|
|
func pickOneEcNodeAndMoveOneShard(ctx context.Context, commandEnv *commandEnv, averageShardsPerEcNode int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error {
|
2019-05-30 16:38:59 +08:00
|
|
|
|
|
|
|
sortEcNodes(possibleDestinationEcNodes)
|
|
|
|
|
|
|
|
for _, destEcNode := range possibleDestinationEcNodes {
|
|
|
|
if destEcNode.info.Id == existingLocation.info.Id {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if destEcNode.freeEcSlot <= 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if findEcVolumeShards(destEcNode, vid).ShardIdCount() >= averageShardsPerEcNode {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Printf("%s moves ec shard %d.%d to %s\n", existingLocation.info.Id, vid, shardId, destEcNode.info.Id)
|
|
|
|
|
2019-06-03 17:26:31 +08:00
|
|
|
err := moveMountedShardToEcNode(ctx, commandEnv, existingLocation, collection, vid, shardId, destEcNode, applyBalancing)
|
2019-05-30 16:38:59 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
destEcNode.freeEcSlot--
|
2019-06-04 16:49:51 +08:00
|
|
|
existingLocation.freeEcSlot++
|
2019-05-30 16:38:59 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.ShardBits {
|
|
|
|
|
|
|
|
for _, shardInfo := range ecNode.info.EcShardInfos {
|
|
|
|
if needle.VolumeId(shardInfo.Id) == vid {
|
|
|
|
return erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0
|
|
|
|
}
|