2021-03-22 15:03:16 +08:00
|
|
|
package shell
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"flag"
|
|
|
|
"fmt"
|
2022-07-29 15:17:28 +08:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/operation"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/needle_map"
|
2022-04-18 10:35:43 +08:00
|
|
|
"golang.org/x/exp/slices"
|
2021-03-22 15:03:16 +08:00
|
|
|
"io"
|
|
|
|
"math"
|
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
Commands = append(Commands, &commandVolumeCheckDisk{})
|
|
|
|
}
|
|
|
|
|
|
|
|
type commandVolumeCheckDisk struct {
|
|
|
|
env *CommandEnv
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandVolumeCheckDisk) Name() string {
|
|
|
|
return "volume.check.disk"
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandVolumeCheckDisk) Help() string {
|
2022-01-13 07:04:48 +08:00
|
|
|
return `check all replicated volumes to find and fix inconsistencies. It is optional and resource intensive.
|
2021-03-22 15:03:16 +08:00
|
|
|
|
|
|
|
How it works:
|
|
|
|
|
|
|
|
find all volumes that are replicated
|
|
|
|
for each volume id, if there are more than 2 replicas, find one pair with the largest 2 in file count.
|
|
|
|
for the pair volume A and B
|
|
|
|
append entries in A and not in B to B
|
|
|
|
append entries in B and not in A to A
|
|
|
|
|
|
|
|
`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
|
|
|
|
|
|
|
fsckCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
2021-03-23 12:01:43 +08:00
|
|
|
slowMode := fsckCommand.Bool("slow", false, "slow mode checks all replicas even file counts are the same")
|
2021-03-22 15:03:16 +08:00
|
|
|
verbose := fsckCommand.Bool("v", false, "verbose mode")
|
2022-04-25 21:27:14 +08:00
|
|
|
volumeId := fsckCommand.Uint("volumeId", 0, "the volume id")
|
2021-03-22 15:03:16 +08:00
|
|
|
applyChanges := fsckCommand.Bool("force", false, "apply the fix")
|
2021-03-25 01:07:13 +08:00
|
|
|
nonRepairThreshold := fsckCommand.Float64("nonRepairThreshold", 0.3, "repair when missing keys is not more than this limit")
|
2021-03-22 15:03:16 +08:00
|
|
|
if err = fsckCommand.Parse(args); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
2022-06-01 05:48:46 +08:00
|
|
|
infoAboutSimulationMode(writer, *applyChanges, "-force")
|
2021-03-22 15:03:16 +08:00
|
|
|
|
2021-12-11 05:24:38 +08:00
|
|
|
if err = commandEnv.confirmIsLocked(args); err != nil {
|
2021-09-14 13:13:34 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-03-22 15:03:16 +08:00
|
|
|
c.env = commandEnv
|
|
|
|
|
|
|
|
// collect topology information
|
2022-02-08 16:53:55 +08:00
|
|
|
topologyInfo, _, err := collectTopologyInfo(commandEnv, 0)
|
2021-03-22 15:03:16 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo)
|
|
|
|
|
|
|
|
// pick 1 pairs of volume replica
|
|
|
|
fileCount := func(replica *VolumeReplica) uint64 {
|
|
|
|
return replica.info.FileCount - replica.info.DeleteCount
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, replicas := range volumeReplicas {
|
2022-04-25 21:27:14 +08:00
|
|
|
if *volumeId > 0 && replicas[0].info.Id != uint32(*volumeId) {
|
|
|
|
continue
|
|
|
|
}
|
2022-04-18 10:35:43 +08:00
|
|
|
slices.SortFunc(replicas, func(a, b *VolumeReplica) bool {
|
|
|
|
return fileCount(a) > fileCount(b)
|
2021-03-22 15:03:16 +08:00
|
|
|
})
|
|
|
|
for len(replicas) >= 2 {
|
|
|
|
a, b := replicas[0], replicas[1]
|
2021-03-23 12:01:43 +08:00
|
|
|
if !*slowMode {
|
|
|
|
if fileCount(a) == fileCount(b) {
|
|
|
|
replicas = replicas[1:]
|
|
|
|
continue
|
|
|
|
}
|
2021-03-22 15:03:16 +08:00
|
|
|
}
|
|
|
|
if a.info.ReadOnly || b.info.ReadOnly {
|
|
|
|
fmt.Fprintf(writer, "skipping readonly volume %d on %s and %s\n", a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id)
|
2021-04-15 03:40:13 +08:00
|
|
|
replicas = replicas[1:]
|
2021-03-22 15:03:16 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-01-13 06:59:29 +08:00
|
|
|
if err := c.syncTwoReplicas(a, b, *applyChanges, *nonRepairThreshold, *verbose, writer); err != nil {
|
2021-07-31 21:23:52 +08:00
|
|
|
fmt.Fprintf(writer, "sync volume %d on %s and %s: %v\n", a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id, err)
|
2021-03-22 15:03:16 +08:00
|
|
|
}
|
|
|
|
replicas = replicas[1:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-01-13 06:59:29 +08:00
|
|
|
func (c *commandVolumeCheckDisk) syncTwoReplicas(a *VolumeReplica, b *VolumeReplica, applyChanges bool, nonRepairThreshold float64, verbose bool, writer io.Writer) (err error) {
|
2021-07-16 17:03:32 +08:00
|
|
|
aHasChanges, bHasChanges := true, true
|
|
|
|
for aHasChanges || bHasChanges {
|
2022-01-13 06:59:29 +08:00
|
|
|
if aHasChanges, bHasChanges, err = c.checkBoth(a, b, applyChanges, nonRepairThreshold, verbose, writer); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandVolumeCheckDisk) checkBoth(a *VolumeReplica, b *VolumeReplica, applyChanges bool, nonRepairThreshold float64, verbose bool, writer io.Writer) (aHasChanges bool, bHasChanges bool, err error) {
|
|
|
|
aDB, bDB := needle_map.NewMemDb(), needle_map.NewMemDb()
|
|
|
|
defer func() {
|
2021-07-16 17:03:32 +08:00
|
|
|
aDB.Close()
|
|
|
|
bDB.Close()
|
2022-01-13 06:59:29 +08:00
|
|
|
}()
|
2021-07-16 17:03:32 +08:00
|
|
|
|
2022-01-13 06:59:29 +08:00
|
|
|
// read index db
|
|
|
|
if err = c.readIndexDatabase(aDB, a.info.Collection, a.info.Id, pb.NewServerAddressFromDataNode(a.location.dataNode), verbose, writer); err != nil {
|
|
|
|
return true, true, fmt.Errorf("readIndexDatabase %s volume %d: %v", a.location.dataNode, a.info.Id, err)
|
|
|
|
}
|
|
|
|
if err := c.readIndexDatabase(bDB, b.info.Collection, b.info.Id, pb.NewServerAddressFromDataNode(b.location.dataNode), verbose, writer); err != nil {
|
|
|
|
return true, true, fmt.Errorf("readIndexDatabase %s volume %d: %v", b.location.dataNode, b.info.Id, err)
|
|
|
|
}
|
2021-07-16 17:03:32 +08:00
|
|
|
|
2022-01-13 06:59:29 +08:00
|
|
|
// find and make up the differences
|
|
|
|
if aHasChanges, err = c.doVolumeCheckDisk(bDB, aDB, b, a, verbose, writer, applyChanges, nonRepairThreshold); err != nil {
|
|
|
|
return true, true, fmt.Errorf("doVolumeCheckDisk source:%s target:%s volume %d: %v", b.location.dataNode, a.location.dataNode, b.info.Id, err)
|
2021-07-16 17:03:32 +08:00
|
|
|
}
|
2022-01-13 06:59:29 +08:00
|
|
|
if bHasChanges, err = c.doVolumeCheckDisk(aDB, bDB, a, b, verbose, writer, applyChanges, nonRepairThreshold); err != nil {
|
|
|
|
return true, true, fmt.Errorf("doVolumeCheckDisk source:%s target:%s volume %d: %v", a.location.dataNode, b.location.dataNode, a.info.Id, err)
|
|
|
|
}
|
|
|
|
return
|
2021-07-16 17:03:32 +08:00
|
|
|
}
|
|
|
|
|
2021-12-06 06:32:04 +08:00
|
|
|
func (c *commandVolumeCheckDisk) doVolumeCheckDisk(minuend, subtrahend *needle_map.MemDb, source, target *VolumeReplica, verbose bool, writer io.Writer, applyChanges bool, nonRepairThreshold float64) (hasChanges bool, err error) {
|
2021-03-22 15:03:16 +08:00
|
|
|
|
|
|
|
// find missing keys
|
|
|
|
// hash join, can be more efficient
|
|
|
|
var missingNeedles []needle_map.NeedleValue
|
|
|
|
var counter int
|
2022-02-01 23:48:28 +08:00
|
|
|
minuend.AscendingVisit(func(value needle_map.NeedleValue) error {
|
2021-03-22 15:03:16 +08:00
|
|
|
counter++
|
2022-02-01 23:48:28 +08:00
|
|
|
if _, found := subtrahend.Get(value.Key); !found {
|
2021-03-22 15:03:16 +08:00
|
|
|
missingNeedles = append(missingNeedles, value)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2021-03-23 12:01:43 +08:00
|
|
|
fmt.Fprintf(writer, "volume %d %s has %d entries, %s missed %d entries\n", source.info.Id, source.location.dataNode.Id, counter, target.location.dataNode.Id, len(missingNeedles))
|
2021-03-25 01:51:22 +08:00
|
|
|
|
|
|
|
if counter == 0 || len(missingNeedles) == 0 {
|
2021-07-14 02:19:56 +08:00
|
|
|
return false, nil
|
2021-03-25 01:51:22 +08:00
|
|
|
}
|
|
|
|
|
2021-03-24 00:42:35 +08:00
|
|
|
missingNeedlesFraction := float64(len(missingNeedles)) / float64(counter)
|
2021-03-25 01:07:13 +08:00
|
|
|
if missingNeedlesFraction > nonRepairThreshold {
|
2021-07-14 02:19:56 +08:00
|
|
|
return false, fmt.Errorf(
|
2021-03-23 18:04:07 +08:00
|
|
|
"failed to start repair volume %d, percentage of missing keys is greater than the threshold: %.2f > %.2f",
|
2021-03-25 01:07:13 +08:00
|
|
|
source.info.Id, missingNeedlesFraction, nonRepairThreshold)
|
2021-03-23 18:04:07 +08:00
|
|
|
}
|
2021-03-22 15:03:16 +08:00
|
|
|
|
|
|
|
for _, needleValue := range missingNeedles {
|
|
|
|
|
2021-09-13 13:47:52 +08:00
|
|
|
needleBlob, err := c.readSourceNeedleBlob(pb.NewServerAddressFromDataNode(source.location.dataNode), source.info.Id, needleValue)
|
2021-03-22 15:03:16 +08:00
|
|
|
if err != nil {
|
2021-07-14 02:40:21 +08:00
|
|
|
return hasChanges, err
|
2021-03-22 15:03:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if !applyChanges {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if verbose {
|
2021-03-22 15:12:53 +08:00
|
|
|
fmt.Fprintf(writer, "read %d,%x %s => %s \n", source.info.Id, needleValue.Key, source.location.dataNode.Id, target.location.dataNode.Id)
|
2021-03-22 15:03:16 +08:00
|
|
|
}
|
|
|
|
|
2021-07-14 02:19:56 +08:00
|
|
|
hasChanges = true
|
|
|
|
|
2021-09-13 13:47:52 +08:00
|
|
|
if err = c.writeNeedleBlobToTarget(pb.NewServerAddressFromDataNode(target.location.dataNode), source.info.Id, needleValue, needleBlob); err != nil {
|
2021-07-14 02:40:21 +08:00
|
|
|
return hasChanges, err
|
2021-03-22 15:03:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2021-07-14 02:19:56 +08:00
|
|
|
return
|
2021-03-22 15:03:16 +08:00
|
|
|
}
|
|
|
|
|
2021-09-13 13:47:52 +08:00
|
|
|
func (c *commandVolumeCheckDisk) readSourceNeedleBlob(sourceVolumeServer pb.ServerAddress, volumeId uint32, needleValue needle_map.NeedleValue) (needleBlob []byte, err error) {
|
2021-03-22 15:03:16 +08:00
|
|
|
|
2021-12-26 16:15:03 +08:00
|
|
|
err = operation.WithVolumeServerClient(false, sourceVolumeServer, c.env.option.GrpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
|
2021-03-22 15:03:16 +08:00
|
|
|
resp, err := client.ReadNeedleBlob(context.Background(), &volume_server_pb.ReadNeedleBlobRequest{
|
|
|
|
VolumeId: volumeId,
|
|
|
|
NeedleId: uint64(needleValue.Key),
|
2021-03-23 18:04:07 +08:00
|
|
|
Offset: needleValue.Offset.ToActualOffset(),
|
|
|
|
Size: int32(needleValue.Size),
|
2021-03-22 15:03:16 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
needleBlob = resp.NeedleBlob
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-09-13 13:47:52 +08:00
|
|
|
func (c *commandVolumeCheckDisk) writeNeedleBlobToTarget(targetVolumeServer pb.ServerAddress, volumeId uint32, needleValue needle_map.NeedleValue, needleBlob []byte) error {
|
2021-03-22 15:03:16 +08:00
|
|
|
|
2021-12-26 16:15:03 +08:00
|
|
|
return operation.WithVolumeServerClient(false, targetVolumeServer, c.env.option.GrpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
|
2021-03-22 15:03:16 +08:00
|
|
|
_, err := client.WriteNeedleBlob(context.Background(), &volume_server_pb.WriteNeedleBlobRequest{
|
2021-03-23 18:04:07 +08:00
|
|
|
VolumeId: volumeId,
|
|
|
|
NeedleId: uint64(needleValue.Key),
|
|
|
|
Size: int32(needleValue.Size),
|
2021-03-22 15:03:16 +08:00
|
|
|
NeedleBlob: needleBlob,
|
|
|
|
})
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2021-09-13 13:47:52 +08:00
|
|
|
func (c *commandVolumeCheckDisk) readIndexDatabase(db *needle_map.MemDb, collection string, volumeId uint32, volumeServer pb.ServerAddress, verbose bool, writer io.Writer) error {
|
2021-03-22 15:03:16 +08:00
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
if err := c.copyVolumeIndexFile(collection, volumeId, volumeServer, &buf, verbose, writer); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if verbose {
|
|
|
|
fmt.Fprintf(writer, "load collection %s volume %d index size %d from %s ...\n", collection, volumeId, buf.Len(), volumeServer)
|
|
|
|
}
|
|
|
|
|
|
|
|
return db.LoadFromReaderAt(bytes.NewReader(buf.Bytes()))
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2021-09-13 13:47:52 +08:00
|
|
|
func (c *commandVolumeCheckDisk) copyVolumeIndexFile(collection string, volumeId uint32, volumeServer pb.ServerAddress, buf *bytes.Buffer, verbose bool, writer io.Writer) error {
|
2021-03-22 15:03:16 +08:00
|
|
|
|
2021-12-26 16:15:03 +08:00
|
|
|
return operation.WithVolumeServerClient(true, volumeServer, c.env.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
2021-03-22 15:03:16 +08:00
|
|
|
|
|
|
|
ext := ".idx"
|
|
|
|
|
|
|
|
copyFileClient, err := volumeServerClient.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{
|
|
|
|
VolumeId: volumeId,
|
|
|
|
Ext: ".idx",
|
|
|
|
CompactionRevision: math.MaxUint32,
|
|
|
|
StopOffset: math.MaxInt64,
|
|
|
|
Collection: collection,
|
|
|
|
IsEcVolume: false,
|
|
|
|
IgnoreSourceFileNotFound: false,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to start copying volume %d%s: %v", volumeId, ext, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = writeToBuffer(copyFileClient, buf)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to copy %d%s from %s: %v", volumeId, ext, volumeServer, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func writeToBuffer(client volume_server_pb.VolumeServer_CopyFileClient, buf *bytes.Buffer) error {
|
|
|
|
for {
|
|
|
|
resp, receiveErr := client.Recv()
|
|
|
|
if receiveErr == io.EOF {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if receiveErr != nil {
|
|
|
|
return fmt.Errorf("receiving: %v", receiveErr)
|
|
|
|
}
|
|
|
|
buf.Write(resp.FileContent)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|