2021-02-16 18:47:02 +08:00
|
|
|
package shell
|
|
|
|
|
|
|
|
import (
|
|
|
|
"flag"
|
|
|
|
"fmt"
|
2021-08-13 18:09:28 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2021-09-13 13:47:52 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
2021-02-16 18:47:02 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
2021-02-22 17:44:18 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
2021-02-16 18:47:02 +08:00
|
|
|
"io"
|
2021-07-28 04:53:01 +08:00
|
|
|
"path/filepath"
|
2021-08-10 17:50:28 +08:00
|
|
|
"sync"
|
2021-02-16 18:47:02 +08:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
Commands = append(Commands, &commandVolumeTierMove{})
|
|
|
|
}
|
|
|
|
|
2022-01-26 23:22:31 +08:00
|
|
|
type volumeTierMoveJob struct {
|
|
|
|
src pb.ServerAddress
|
|
|
|
vid needle.VolumeId
|
|
|
|
}
|
|
|
|
|
2021-02-16 18:47:02 +08:00
|
|
|
type commandVolumeTierMove struct {
|
2022-01-26 23:22:31 +08:00
|
|
|
activeServers sync.Map
|
2022-01-29 11:11:46 +08:00
|
|
|
queues map[pb.ServerAddress]chan volumeTierMoveJob
|
2022-01-26 23:22:31 +08:00
|
|
|
//activeServers map[pb.ServerAddress]struct{}
|
|
|
|
//activeServersLock sync.Mutex
|
|
|
|
//activeServersCond *sync.Cond
|
2021-02-16 18:47:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandVolumeTierMove) Name() string {
|
2021-02-19 19:39:19 +08:00
|
|
|
return "volume.tier.move"
|
2021-02-16 18:47:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandVolumeTierMove) Help() string {
|
2021-02-22 17:30:07 +08:00
|
|
|
return `change a volume from one disk type to another
|
2021-02-16 18:47:02 +08:00
|
|
|
|
2022-01-29 11:11:46 +08:00
|
|
|
volume.tier.move -fromDiskType=hdd -toDiskType=ssd [-collectionPattern=""] [-fullPercent=95] [-quietFor=1h] [-parallelLimit=4]
|
2021-02-22 16:28:42 +08:00
|
|
|
|
|
|
|
Even if the volume is replicated, only one replica will be changed and the rest replicas will be dropped.
|
|
|
|
So "volume.fix.replication" and "volume.balance" should be followed.
|
2021-02-16 18:47:02 +08:00
|
|
|
|
|
|
|
`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandVolumeTierMove) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
|
|
|
|
|
|
|
tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
2021-07-28 04:53:01 +08:00
|
|
|
collectionPattern := tierCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'")
|
2021-02-16 18:47:02 +08:00
|
|
|
fullPercentage := tierCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size")
|
|
|
|
quietPeriod := tierCommand.Duration("quietFor", 24*time.Hour, "select volumes without no writes for this period")
|
|
|
|
source := tierCommand.String("fromDiskType", "", "the source disk type")
|
|
|
|
target := tierCommand.String("toDiskType", "", "the target disk type")
|
2022-01-29 11:11:46 +08:00
|
|
|
parallelLimit := tierCommand.Int("parallelLimit", 0, "limit the number of parallel copying jobs")
|
2021-02-22 17:30:07 +08:00
|
|
|
applyChange := tierCommand.Bool("force", false, "actually apply the changes")
|
2021-02-16 18:47:02 +08:00
|
|
|
if err = tierCommand.Parse(args); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
2022-06-01 05:48:46 +08:00
|
|
|
infoAboutSimulationMode(writer, *applyChange, "-force")
|
2021-02-16 18:47:02 +08:00
|
|
|
|
2021-12-11 05:24:38 +08:00
|
|
|
if err = commandEnv.confirmIsLocked(args); err != nil {
|
2021-09-14 13:13:34 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-02-22 16:28:42 +08:00
|
|
|
fromDiskType := types.ToDiskType(*source)
|
|
|
|
toDiskType := types.ToDiskType(*target)
|
2021-02-16 18:47:02 +08:00
|
|
|
|
2021-02-22 16:28:42 +08:00
|
|
|
if fromDiskType == toDiskType {
|
|
|
|
return fmt.Errorf("source tier %s is the same as target tier %s", fromDiskType, toDiskType)
|
|
|
|
}
|
2021-02-16 18:47:02 +08:00
|
|
|
|
2021-02-22 16:28:42 +08:00
|
|
|
// collect topology information
|
2022-02-08 16:53:55 +08:00
|
|
|
topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv, 0)
|
2021-02-22 16:28:42 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2021-02-16 18:47:02 +08:00
|
|
|
}
|
|
|
|
|
2021-02-22 17:30:07 +08:00
|
|
|
// collect all volumes that should change
|
2021-07-28 04:53:01 +08:00
|
|
|
volumeIds, err := collectVolumeIdsForTierChange(commandEnv, topologyInfo, volumeSizeLimitMb, fromDiskType, *collectionPattern, *fullPercentage, *quietPeriod)
|
2021-02-16 18:47:02 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
fmt.Printf("tier move volumes: %v\n", volumeIds)
|
|
|
|
|
2021-02-22 17:30:07 +08:00
|
|
|
_, allLocations := collectVolumeReplicaLocations(topologyInfo)
|
2022-01-26 23:22:31 +08:00
|
|
|
allLocations = filterLocationsByDiskType(allLocations, toDiskType)
|
|
|
|
keepDataNodesSorted(allLocations, toDiskType)
|
2022-01-29 11:11:46 +08:00
|
|
|
|
|
|
|
if len(allLocations) > 0 && *parallelLimit > 0 && *parallelLimit < len(allLocations) {
|
|
|
|
allLocations = allLocations[:*parallelLimit]
|
2022-01-26 23:22:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
bufferLen := len(allLocations)
|
|
|
|
c.queues = make(map[pb.ServerAddress]chan volumeTierMoveJob)
|
2022-01-29 11:11:46 +08:00
|
|
|
|
2022-01-26 23:22:31 +08:00
|
|
|
for _, dst := range allLocations {
|
|
|
|
destServerAddress := pb.NewServerAddressFromDataNode(dst.dataNode)
|
|
|
|
c.queues[destServerAddress] = make(chan volumeTierMoveJob, bufferLen)
|
|
|
|
|
|
|
|
wg.Add(1)
|
2022-01-29 11:11:46 +08:00
|
|
|
go func(dst location, jobs <-chan volumeTierMoveJob, applyChanges bool) {
|
|
|
|
defer wg.Done()
|
2022-01-26 23:22:31 +08:00
|
|
|
for job := range jobs {
|
|
|
|
fmt.Fprintf(writer, "moving volume %d from %s to %s with disk type %s ...\n", job.vid, job.src, dst.dataNode.Id, toDiskType.ReadableString())
|
|
|
|
|
|
|
|
locations, found := commandEnv.MasterClient.GetLocations(uint32(job.vid))
|
|
|
|
if !found {
|
|
|
|
fmt.Printf("volume %d not found", job.vid)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
unlock := c.Lock(job.src)
|
|
|
|
|
2022-02-08 16:57:35 +08:00
|
|
|
if applyChanges {
|
|
|
|
if err := c.doMoveOneVolume(commandEnv, writer, job.vid, toDiskType, locations, job.src, dst); err != nil {
|
|
|
|
fmt.Fprintf(writer, "move volume %d %s => %s: %v\n", job.vid, job.src, dst.dataNode.Id, err)
|
|
|
|
}
|
2022-01-26 23:22:31 +08:00
|
|
|
}
|
|
|
|
unlock()
|
|
|
|
}
|
|
|
|
}(dst, c.queues[destServerAddress], *applyChange)
|
|
|
|
}
|
|
|
|
|
2021-02-22 17:30:07 +08:00
|
|
|
for _, vid := range volumeIds {
|
2022-01-26 23:22:31 +08:00
|
|
|
if err = c.doVolumeTierMove(commandEnv, writer, vid, toDiskType, allLocations); err != nil {
|
2021-02-22 17:30:07 +08:00
|
|
|
fmt.Printf("tier move volume %d: %v\n", vid, err)
|
|
|
|
}
|
2022-01-26 23:22:31 +08:00
|
|
|
allLocations = rotateDataNodes(allLocations)
|
2021-02-22 17:30:07 +08:00
|
|
|
}
|
2022-01-26 23:22:31 +08:00
|
|
|
for key, _ := range c.queues {
|
|
|
|
close(c.queues[key])
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
2021-02-22 17:30:07 +08:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-01-26 23:22:31 +08:00
|
|
|
func (c *commandVolumeTierMove) Lock(key pb.ServerAddress) func() {
|
|
|
|
value, _ := c.activeServers.LoadOrStore(key, &sync.Mutex{})
|
|
|
|
mtx := value.(*sync.Mutex)
|
|
|
|
mtx.Lock()
|
|
|
|
|
|
|
|
return func() { mtx.Unlock() }
|
|
|
|
}
|
|
|
|
|
|
|
|
func filterLocationsByDiskType(dataNodes []location, diskType types.DiskType) (ret []location) {
|
|
|
|
for _, loc := range dataNodes {
|
|
|
|
_, found := loc.dataNode.DiskInfos[string(diskType)]
|
|
|
|
if found {
|
|
|
|
ret = append(ret, loc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func rotateDataNodes(dataNodes []location) []location {
|
|
|
|
if len(dataNodes) > 0 {
|
|
|
|
return append(dataNodes[1:], dataNodes[0])
|
|
|
|
} else {
|
|
|
|
return dataNodes
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-22 17:44:18 +08:00
|
|
|
func isOneOf(server string, locations []wdclient.Location) bool {
|
|
|
|
for _, loc := range locations {
|
|
|
|
if server == loc.Url {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2022-01-26 23:22:31 +08:00
|
|
|
func (c *commandVolumeTierMove) doVolumeTierMove(commandEnv *CommandEnv, writer io.Writer, vid needle.VolumeId, toDiskType types.DiskType, allLocations []location) (err error) {
|
2021-02-22 17:30:07 +08:00
|
|
|
// find volume location
|
|
|
|
locations, found := commandEnv.MasterClient.GetLocations(uint32(vid))
|
|
|
|
if !found {
|
|
|
|
return fmt.Errorf("volume %d not found", vid)
|
|
|
|
}
|
|
|
|
|
|
|
|
// find one server with the most empty volume slots with target disk type
|
|
|
|
hasFoundTarget := false
|
|
|
|
fn := capacityByFreeVolumeCount(toDiskType)
|
|
|
|
for _, dst := range allLocations {
|
2021-02-23 19:49:14 +08:00
|
|
|
if fn(dst.dataNode) > 0 && !hasFoundTarget {
|
2021-02-22 17:30:07 +08:00
|
|
|
// ask the volume server to replicate the volume
|
2021-02-22 17:44:18 +08:00
|
|
|
if isOneOf(dst.dataNode.Id, locations) {
|
|
|
|
continue
|
|
|
|
}
|
2021-09-13 13:47:52 +08:00
|
|
|
var sourceVolumeServer pb.ServerAddress
|
2021-02-22 17:30:07 +08:00
|
|
|
for _, loc := range locations {
|
|
|
|
if loc.Url != dst.dataNode.Id {
|
2021-09-13 13:47:52 +08:00
|
|
|
sourceVolumeServer = loc.ServerAddress()
|
2021-02-22 17:30:07 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if sourceVolumeServer == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
hasFoundTarget = true
|
|
|
|
|
2022-01-26 23:22:31 +08:00
|
|
|
// adjust volume count
|
|
|
|
dst.dataNode.DiskInfos[string(toDiskType)].VolumeCount++
|
2021-02-22 17:30:07 +08:00
|
|
|
|
2021-09-13 13:47:52 +08:00
|
|
|
destServerAddress := pb.NewServerAddressFromDataNode(dst.dataNode)
|
2022-01-26 23:22:31 +08:00
|
|
|
c.queues[destServerAddress] <- volumeTierMoveJob{sourceVolumeServer, vid}
|
2021-02-22 17:30:07 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !hasFoundTarget {
|
2021-02-22 17:59:03 +08:00
|
|
|
fmt.Fprintf(writer, "can not find disk type %s for volume %d\n", toDiskType.ReadableString(), vid)
|
2021-02-22 17:30:07 +08:00
|
|
|
}
|
|
|
|
|
2021-02-16 18:47:02 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-02-08 16:57:35 +08:00
|
|
|
func (c *commandVolumeTierMove) doMoveOneVolume(commandEnv *CommandEnv, writer io.Writer, vid needle.VolumeId, toDiskType types.DiskType, locations []wdclient.Location, sourceVolumeServer pb.ServerAddress, dst location) (err error) {
|
2021-08-10 17:50:28 +08:00
|
|
|
|
|
|
|
// mark all replicas as read only
|
2022-02-08 16:57:35 +08:00
|
|
|
if err = markVolumeReplicasWritable(commandEnv.option.GrpcDialOption, vid, locations, false); err != nil {
|
|
|
|
return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err)
|
|
|
|
}
|
|
|
|
if err = LiveMoveVolume(commandEnv.option.GrpcDialOption, writer, vid, sourceVolumeServer, pb.NewServerAddressFromDataNode(dst.dataNode), 5*time.Second, toDiskType.ReadableString(), true); err != nil {
|
2021-08-13 18:09:28 +08:00
|
|
|
|
2022-02-08 16:57:35 +08:00
|
|
|
// mark all replicas as writable
|
|
|
|
if err = markVolumeReplicasWritable(commandEnv.option.GrpcDialOption, vid, locations, true); err != nil {
|
|
|
|
glog.Errorf("mark volume %d as writable on %s: %v", vid, locations[0].Url, err)
|
2022-01-29 11:11:46 +08:00
|
|
|
}
|
2021-08-10 17:50:28 +08:00
|
|
|
|
2022-02-08 16:57:35 +08:00
|
|
|
return fmt.Errorf("move volume %d %s => %s : %v", vid, locations[0].Url, dst.dataNode.Id, err)
|
|
|
|
}
|
2021-08-10 17:50:28 +08:00
|
|
|
|
|
|
|
// remove the remaining replicas
|
|
|
|
for _, loc := range locations {
|
2021-09-13 13:47:52 +08:00
|
|
|
if loc.Url != dst.dataNode.Id && loc.ServerAddress() != sourceVolumeServer {
|
2022-02-08 16:57:35 +08:00
|
|
|
if err = deleteVolume(commandEnv.option.GrpcDialOption, vid, loc.ServerAddress()); err != nil {
|
|
|
|
fmt.Fprintf(writer, "failed to delete volume %d on %s: %v\n", vid, loc.Url, err)
|
2021-08-10 17:50:28 +08:00
|
|
|
}
|
2022-01-29 11:11:46 +08:00
|
|
|
// reduce volume count? Not really necessary since they are "more" full and will not be a candidate to move to
|
2021-08-10 17:50:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-07-28 04:53:01 +08:00
|
|
|
func collectVolumeIdsForTierChange(commandEnv *CommandEnv, topologyInfo *master_pb.TopologyInfo, volumeSizeLimitMb uint64, sourceTier types.DiskType, collectionPattern string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) {
|
2021-02-16 18:47:02 +08:00
|
|
|
|
|
|
|
quietSeconds := int64(quietPeriod / time.Second)
|
|
|
|
nowUnixSeconds := time.Now().Unix()
|
|
|
|
|
|
|
|
fmt.Printf("collect %s volumes quiet for: %d seconds\n", sourceTier, quietSeconds)
|
|
|
|
|
|
|
|
vidMap := make(map[uint32]bool)
|
2021-02-22 16:28:42 +08:00
|
|
|
eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
2021-02-16 18:47:02 +08:00
|
|
|
for _, diskInfo := range dn.DiskInfos {
|
|
|
|
for _, v := range diskInfo.VolumeInfos {
|
2021-07-28 04:53:01 +08:00
|
|
|
// check collection name pattern
|
|
|
|
if collectionPattern != "" {
|
|
|
|
matched, err := filepath.Match(collectionPattern, v.Collection)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !matched {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if v.ModifiedAtSecond+quietSeconds < nowUnixSeconds && types.ToDiskType(v.DiskType) == sourceTier {
|
2021-02-22 16:28:42 +08:00
|
|
|
if float64(v.Size) > fullPercentage/100*float64(volumeSizeLimitMb)*1024*1024 {
|
2021-02-16 18:47:02 +08:00
|
|
|
vidMap[v.Id] = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
for vid := range vidMap {
|
|
|
|
vids = append(vids, needle.VolumeId(vid))
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|