2019-03-18 11:27:08 +08:00
|
|
|
package shell
|
|
|
|
|
|
|
|
import (
|
2021-02-16 18:47:02 +08:00
|
|
|
"bytes"
|
2021-12-06 13:54:40 +08:00
|
|
|
"flag"
|
2019-03-18 11:27:08 +08:00
|
|
|
"fmt"
|
2022-07-29 15:17:28 +08:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
|
2022-04-18 10:35:43 +08:00
|
|
|
"golang.org/x/exp/slices"
|
2022-04-19 14:44:41 +08:00
|
|
|
"path/filepath"
|
2023-09-26 00:35:16 +08:00
|
|
|
"strings"
|
2024-08-16 15:20:00 +08:00
|
|
|
"time"
|
2019-05-25 04:28:44 +08:00
|
|
|
|
2019-03-18 11:27:08 +08:00
|
|
|
"io"
|
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
2019-06-05 16:30:24 +08:00
|
|
|
Commands = append(Commands, &commandVolumeList{})
|
2019-03-18 11:27:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
type commandVolumeList struct {
|
2022-04-19 14:44:41 +08:00
|
|
|
collectionPattern *string
|
2022-11-30 22:30:55 +08:00
|
|
|
dataCenter *string
|
|
|
|
rack *string
|
|
|
|
dataNode *string
|
2022-04-19 14:44:41 +08:00
|
|
|
readonly *bool
|
|
|
|
volumeId *uint64
|
2019-03-18 11:27:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandVolumeList) Name() string {
|
|
|
|
return "volume.list"
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandVolumeList) Help() string {
|
2019-03-24 02:54:26 +08:00
|
|
|
return `list all volumes
|
|
|
|
|
|
|
|
This command list all volumes as a tree of dataCenter > rack > dataNode > volume.
|
|
|
|
|
|
|
|
`
|
2019-03-18 11:27:08 +08:00
|
|
|
}
|
|
|
|
|
2024-09-30 01:38:22 +08:00
|
|
|
func (c *commandVolumeList) HasTag(CommandTag) bool {
|
2024-09-29 11:22:57 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-06-05 16:30:24 +08:00
|
|
|
func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
2019-03-18 11:27:08 +08:00
|
|
|
|
2021-12-06 13:54:40 +08:00
|
|
|
volumeListCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
|
|
|
verbosityLevel := volumeListCommand.Int("v", 5, "verbose mode: 0, 1, 2, 3, 4, 5")
|
2022-04-19 14:44:41 +08:00
|
|
|
c.collectionPattern = volumeListCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'")
|
|
|
|
c.readonly = volumeListCommand.Bool("readonly", false, "show only readonly")
|
|
|
|
c.volumeId = volumeListCommand.Uint64("volumeId", 0, "show only volume id")
|
2022-11-30 22:30:55 +08:00
|
|
|
c.dataCenter = volumeListCommand.String("dataCenter", "", "show volumes only from the specified data center")
|
|
|
|
c.rack = volumeListCommand.String("rack", "", "show volumes only from the specified rack")
|
|
|
|
c.dataNode = volumeListCommand.String("dataNode", "", "show volumes only from the specified data node")
|
2022-04-19 14:44:41 +08:00
|
|
|
|
2021-12-06 13:54:40 +08:00
|
|
|
if err = volumeListCommand.Parse(args); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-02-22 16:28:42 +08:00
|
|
|
// collect topology information
|
2022-02-08 16:53:55 +08:00
|
|
|
topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv, 0)
|
2019-03-18 11:27:08 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-04-19 14:44:41 +08:00
|
|
|
c.writeTopologyInfo(writer, topologyInfo, volumeSizeLimitMb, *verbosityLevel)
|
2019-03-18 11:27:08 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-02-16 18:47:02 +08:00
|
|
|
func diskInfosToString(diskInfos map[string]*master_pb.DiskInfo) string {
|
|
|
|
var buf bytes.Buffer
|
|
|
|
for diskType, diskInfo := range diskInfos {
|
2021-02-17 02:55:30 +08:00
|
|
|
if diskType == "" {
|
|
|
|
diskType = "hdd"
|
|
|
|
}
|
2021-02-16 18:47:02 +08:00
|
|
|
fmt.Fprintf(&buf, " %s(volume:%d/%d active:%d free:%d remote:%d)", diskType, diskInfo.VolumeCount, diskInfo.MaxVolumeCount, diskInfo.ActiveVolumeCount, diskInfo.FreeVolumeCount, diskInfo.RemoteVolumeCount)
|
|
|
|
}
|
|
|
|
return buf.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
func diskInfoToString(diskInfo *master_pb.DiskInfo) string {
|
|
|
|
var buf bytes.Buffer
|
|
|
|
fmt.Fprintf(&buf, "volume:%d/%d active:%d free:%d remote:%d", diskInfo.VolumeCount, diskInfo.MaxVolumeCount, diskInfo.ActiveVolumeCount, diskInfo.FreeVolumeCount, diskInfo.RemoteVolumeCount)
|
|
|
|
return buf.String()
|
|
|
|
}
|
|
|
|
|
2022-04-19 14:44:41 +08:00
|
|
|
func (c *commandVolumeList) writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64, verbosityLevel int) statistics {
|
2021-12-06 13:54:40 +08:00
|
|
|
output(verbosityLevel >= 0, writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(t.DiskInfos))
|
2023-09-26 00:35:16 +08:00
|
|
|
slices.SortFunc(t.DataCenterInfos, func(a, b *master_pb.DataCenterInfo) int {
|
|
|
|
return strings.Compare(a.Id, b.Id)
|
2019-05-06 11:23:50 +08:00
|
|
|
})
|
2019-04-07 00:25:29 +08:00
|
|
|
var s statistics
|
2019-03-18 11:27:08 +08:00
|
|
|
for _, dc := range t.DataCenterInfos {
|
2022-11-30 22:30:55 +08:00
|
|
|
if *c.dataCenter != "" && *c.dataCenter != dc.Id {
|
|
|
|
continue
|
|
|
|
}
|
2022-04-19 14:44:41 +08:00
|
|
|
s = s.plus(c.writeDataCenterInfo(writer, dc, verbosityLevel))
|
2019-03-18 11:27:08 +08:00
|
|
|
}
|
2021-12-06 13:54:40 +08:00
|
|
|
output(verbosityLevel >= 0, writer, "%+v \n", s)
|
2019-04-07 00:25:29 +08:00
|
|
|
return s
|
2019-03-18 11:27:08 +08:00
|
|
|
}
|
2022-04-19 14:44:41 +08:00
|
|
|
|
|
|
|
func (c *commandVolumeList) writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo, verbosityLevel int) statistics {
|
2021-12-06 13:54:40 +08:00
|
|
|
output(verbosityLevel >= 1, writer, " DataCenter %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
2019-04-07 00:25:29 +08:00
|
|
|
var s statistics
|
2023-09-26 00:35:16 +08:00
|
|
|
slices.SortFunc(t.RackInfos, func(a, b *master_pb.RackInfo) int {
|
|
|
|
return strings.Compare(a.Id, b.Id)
|
2019-05-06 11:23:50 +08:00
|
|
|
})
|
2019-03-18 11:27:08 +08:00
|
|
|
for _, r := range t.RackInfos {
|
2022-11-30 22:30:55 +08:00
|
|
|
if *c.rack != "" && *c.rack != r.Id {
|
|
|
|
continue
|
|
|
|
}
|
2022-04-19 14:44:41 +08:00
|
|
|
s = s.plus(c.writeRackInfo(writer, r, verbosityLevel))
|
2019-03-18 11:27:08 +08:00
|
|
|
}
|
2021-12-06 13:54:40 +08:00
|
|
|
output(verbosityLevel >= 1, writer, " DataCenter %s %+v \n", t.Id, s)
|
2019-04-07 00:25:29 +08:00
|
|
|
return s
|
2019-03-18 11:27:08 +08:00
|
|
|
}
|
2022-04-19 14:44:41 +08:00
|
|
|
|
|
|
|
func (c *commandVolumeList) writeRackInfo(writer io.Writer, t *master_pb.RackInfo, verbosityLevel int) statistics {
|
2021-12-06 13:54:40 +08:00
|
|
|
output(verbosityLevel >= 2, writer, " Rack %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
2019-04-07 00:25:29 +08:00
|
|
|
var s statistics
|
2023-09-26 00:35:16 +08:00
|
|
|
slices.SortFunc(t.DataNodeInfos, func(a, b *master_pb.DataNodeInfo) int {
|
|
|
|
return strings.Compare(a.Id, b.Id)
|
2019-05-06 11:23:50 +08:00
|
|
|
})
|
2019-03-18 11:27:08 +08:00
|
|
|
for _, dn := range t.DataNodeInfos {
|
2022-11-30 22:30:55 +08:00
|
|
|
if *c.dataNode != "" && *c.dataNode != dn.Id {
|
|
|
|
continue
|
|
|
|
}
|
2022-04-19 14:44:41 +08:00
|
|
|
s = s.plus(c.writeDataNodeInfo(writer, dn, verbosityLevel))
|
2019-03-18 11:27:08 +08:00
|
|
|
}
|
2021-12-06 13:54:40 +08:00
|
|
|
output(verbosityLevel >= 2, writer, " Rack %s %+v \n", t.Id, s)
|
2019-04-07 00:25:29 +08:00
|
|
|
return s
|
2019-03-18 11:27:08 +08:00
|
|
|
}
|
2022-04-19 14:44:41 +08:00
|
|
|
|
|
|
|
func (c *commandVolumeList) writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo, verbosityLevel int) statistics {
|
2021-12-06 13:54:40 +08:00
|
|
|
output(verbosityLevel >= 3, writer, " DataNode %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
2019-04-07 00:25:29 +08:00
|
|
|
var s statistics
|
2021-02-16 18:47:02 +08:00
|
|
|
for _, diskInfo := range t.DiskInfos {
|
2022-04-19 14:44:41 +08:00
|
|
|
s = s.plus(c.writeDiskInfo(writer, diskInfo, verbosityLevel))
|
2021-02-16 18:47:02 +08:00
|
|
|
}
|
2021-12-06 13:54:40 +08:00
|
|
|
output(verbosityLevel >= 3, writer, " DataNode %s %+v \n", t.Id, s)
|
2021-02-16 18:47:02 +08:00
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
2022-04-19 14:44:41 +08:00
|
|
|
func (c *commandVolumeList) isNotMatchDiskInfo(readOnly bool, collection string, volumeId uint32) bool {
|
|
|
|
if *c.readonly && !readOnly {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if *c.collectionPattern != "" {
|
|
|
|
if matched, _ := filepath.Match(*c.collectionPattern, collection); !matched {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if *c.volumeId > 0 && *c.volumeId != uint64(volumeId) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandVolumeList) writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo, verbosityLevel int) statistics {
|
2021-02-16 18:47:02 +08:00
|
|
|
var s statistics
|
2021-02-17 02:55:30 +08:00
|
|
|
diskType := t.Type
|
|
|
|
if diskType == "" {
|
|
|
|
diskType = "hdd"
|
|
|
|
}
|
2021-12-06 13:54:40 +08:00
|
|
|
output(verbosityLevel >= 4, writer, " Disk %s(%s)\n", diskType, diskInfoToString(t))
|
2023-09-26 00:35:16 +08:00
|
|
|
slices.SortFunc(t.VolumeInfos, func(a, b *master_pb.VolumeInformationMessage) int {
|
|
|
|
return int(a.Id - b.Id)
|
2019-05-06 11:23:50 +08:00
|
|
|
})
|
2019-03-18 11:27:08 +08:00
|
|
|
for _, vi := range t.VolumeInfos {
|
2022-04-19 14:44:41 +08:00
|
|
|
if c.isNotMatchDiskInfo(vi.ReadOnly, vi.Collection, vi.Id) {
|
|
|
|
continue
|
|
|
|
}
|
2021-12-06 13:54:40 +08:00
|
|
|
s = s.plus(writeVolumeInformationMessage(writer, vi, verbosityLevel))
|
2019-03-18 11:27:08 +08:00
|
|
|
}
|
2019-05-25 02:52:23 +08:00
|
|
|
for _, ecShardInfo := range t.EcShardInfos {
|
2022-04-19 14:44:41 +08:00
|
|
|
if c.isNotMatchDiskInfo(false, ecShardInfo.Collection, ecShardInfo.Id) {
|
|
|
|
continue
|
|
|
|
}
|
2024-08-16 15:20:00 +08:00
|
|
|
|
2024-10-25 12:41:39 +08:00
|
|
|
var expireAtString string
|
2024-10-25 12:35:11 +08:00
|
|
|
destroyTime := ecShardInfo.ExpireAtSec
|
2024-08-16 15:20:00 +08:00
|
|
|
if destroyTime > 0 {
|
2024-10-25 12:41:39 +08:00
|
|
|
expireAtString = fmt.Sprintf("expireAt:%s", time.Unix(int64(destroyTime), 0).Format("2006-01-02 15:04:05"))
|
2024-08-16 15:20:00 +08:00
|
|
|
}
|
2024-10-25 12:41:39 +08:00
|
|
|
output(verbosityLevel >= 5, writer, " ec volume id:%v collection:%v shards:%v %s\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds(), expireAtString)
|
2019-05-25 02:52:23 +08:00
|
|
|
}
|
2021-12-06 13:54:40 +08:00
|
|
|
output(verbosityLevel >= 4, writer, " Disk %s %+v \n", diskType, s)
|
2019-04-07 00:25:29 +08:00
|
|
|
return s
|
2019-03-18 11:27:08 +08:00
|
|
|
}
|
2021-02-16 18:47:02 +08:00
|
|
|
|
2021-12-06 13:54:40 +08:00
|
|
|
func writeVolumeInformationMessage(writer io.Writer, t *master_pb.VolumeInformationMessage, verbosityLevel int) statistics {
|
|
|
|
output(verbosityLevel >= 5, writer, " volume %+v \n", t)
|
2019-09-29 14:17:37 +08:00
|
|
|
return newStatistics(t)
|
2019-04-07 00:25:29 +08:00
|
|
|
}
|
|
|
|
|
2021-12-06 13:54:40 +08:00
|
|
|
func output(condition bool, w io.Writer, format string, a ...interface{}) {
|
|
|
|
if condition {
|
|
|
|
fmt.Fprintf(w, format, a...)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:25:29 +08:00
|
|
|
type statistics struct {
|
|
|
|
Size uint64
|
|
|
|
FileCount uint64
|
|
|
|
DeletedFileCount uint64
|
|
|
|
DeletedBytes uint64
|
|
|
|
}
|
|
|
|
|
2019-09-29 14:17:37 +08:00
|
|
|
func newStatistics(t *master_pb.VolumeInformationMessage) statistics {
|
2019-04-07 00:25:29 +08:00
|
|
|
return statistics{
|
|
|
|
Size: t.Size,
|
|
|
|
FileCount: t.FileCount,
|
|
|
|
DeletedFileCount: t.DeleteCount,
|
|
|
|
DeletedBytes: t.DeletedByteCount,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s statistics) plus(t statistics) statistics {
|
|
|
|
return statistics{
|
|
|
|
Size: s.Size + t.Size,
|
|
|
|
FileCount: s.FileCount + t.FileCount,
|
|
|
|
DeletedFileCount: s.DeletedFileCount + t.DeletedFileCount,
|
|
|
|
DeletedBytes: s.DeletedBytes + t.DeletedBytes,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s statistics) String() string {
|
2019-04-07 02:12:35 +08:00
|
|
|
if s.DeletedFileCount > 0 {
|
2019-04-07 00:25:29 +08:00
|
|
|
return fmt.Sprintf("total size:%d file_count:%d deleted_file:%d deleted_bytes:%d", s.Size, s.FileCount, s.DeletedFileCount, s.DeletedBytes)
|
|
|
|
}
|
|
|
|
return fmt.Sprintf("total size:%d file_count:%d", s.Size, s.FileCount)
|
2019-03-18 11:27:08 +08:00
|
|
|
}
|