seaweedfs/weed/shell/command_fs_verify.go

307 lines
9.6 KiB
Go
Raw Normal View History

package shell
import (
"bytes"
"context"
"flag"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage"
"github.com/seaweedfs/seaweedfs/weed/util"
2024-02-15 17:31:51 +08:00
"go.uber.org/atomic"
"golang.org/x/exp/slices"
"io"
"math"
"strings"
"sync"
"time"
)
func init() {
Commands = append(Commands, &commandFsVerify{})
}
type commandFsVerify struct {
env *CommandEnv
volumeServers []pb.ServerAddress
volumeIds map[uint32][]pb.ServerAddress
verbose *bool
metadataFromLog *bool
concurrency *int
modifyTimeAgoAtSec int64
writer io.Writer
waitChan map[string]chan struct{}
waitChanLock sync.RWMutex
}
func (c *commandFsVerify) Name() string {
return "fs.verify"
}
func (c *commandFsVerify) Help() string {
return `recursively verify all files under a directory
fs.verify [-v] [-modifyTimeAgo 1h] /buckets/dir
`
}
func (c *commandFsVerify) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
c.env = commandEnv
c.writer = writer
fsVerifyCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
c.verbose = fsVerifyCommand.Bool("v", false, "print out each processed files")
modifyTimeAgo := fsVerifyCommand.Duration("modifyTimeAgo", 0, "only include files after this modify time to verify")
c.concurrency = fsVerifyCommand.Int("concurrency", 0, "number of parallel verification per volume server")
c.metadataFromLog = fsVerifyCommand.Bool("metadataFromLog", false, "Using filer log to get metadata")
if err = fsVerifyCommand.Parse(args); err != nil {
return err
}
path, parseErr := commandEnv.parseUrl(findInputDirectory(fsVerifyCommand.Args()))
if parseErr != nil {
return parseErr
}
c.modifyTimeAgoAtSec = int64(modifyTimeAgo.Seconds())
c.volumeIds = make(map[uint32][]pb.ServerAddress)
c.waitChan = make(map[string]chan struct{})
c.volumeServers = []pb.ServerAddress{}
2023-03-20 09:48:40 +08:00
defer func() {
c.modifyTimeAgoAtSec = 0
c.volumeIds = nil
c.waitChan = nil
c.volumeServers = nil
}()
if err := c.collectVolumeIds(); err != nil {
return parseErr
}
if *c.concurrency > 0 {
for _, volumeServer := range c.volumeServers {
volumeServerStr := string(volumeServer)
c.waitChan[volumeServerStr] = make(chan struct{}, *c.concurrency)
defer close(c.waitChan[volumeServerStr])
}
}
var fCount, eCount uint64
if *c.metadataFromLog {
var wg sync.WaitGroup
fCount, eCount, err = c.verifyProcessMetadata(path, &wg)
wg.Wait()
if err != nil {
return err
}
} else {
fCount, eCount, err = c.verifyTraverseBfs(path)
}
fmt.Fprintf(writer, "verified %d files, error %d files \n", fCount, eCount)
return err
}
func (c *commandFsVerify) collectVolumeIds() error {
topologyInfo, _, err := collectTopologyInfo(c.env, 0)
if err != nil {
return err
}
eachDataNode(topologyInfo, func(dc string, rack RackId, nodeInfo *master_pb.DataNodeInfo) {
for _, diskInfo := range nodeInfo.DiskInfos {
for _, vi := range diskInfo.VolumeInfos {
volumeServer := pb.NewServerAddressFromDataNode(nodeInfo)
c.volumeIds[vi.Id] = append(c.volumeIds[vi.Id], volumeServer)
if !slices.Contains(c.volumeServers, volumeServer) {
c.volumeServers = append(c.volumeServers, volumeServer)
}
}
}
})
return nil
}
func (c *commandFsVerify) verifyChunk(volumeServer pb.ServerAddress, fileId *filer_pb.FileId) error {
err := operation.WithVolumeServerClient(false, volumeServer, c.env.option.GrpcDialOption,
func(client volume_server_pb.VolumeServerClient) error {
_, err := client.VolumeNeedleStatus(context.Background(),
&volume_server_pb.VolumeNeedleStatusRequest{
VolumeId: fileId.VolumeId,
NeedleId: fileId.FileKey})
return err
},
)
if err != nil && !strings.Contains(err.Error(), storage.ErrorDeleted.Error()) {
return err
}
return nil
}
type ItemEntry struct {
chunks []*filer_pb.FileChunk
path util.FullPath
}
func (c *commandFsVerify) verifyProcessMetadata(path string, wg *sync.WaitGroup) (fileCount uint64, errCount uint64, err error) {
processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {
message := resp.EventNotification
if resp.EventNotification.NewEntry == nil {
return nil
}
chunkCount := len(message.NewEntry.Chunks)
if chunkCount == 0 {
return nil
}
entryPath := fmt.Sprintf("%s/%s", message.NewParentPath, message.NewEntry.Name)
errorChunksCount := atomic.NewUint64(0)
if !c.verifyEntry(entryPath, message.NewEntry.Chunks, errorChunksCount, wg) {
if err = c.env.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
entryResp, errReq := client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{
Directory: message.NewParentPath,
Name: message.NewEntry.Name,
})
2024-06-21 06:24:31 +08:00
if errReq != nil {
if strings.HasSuffix(errReq.Error(), "no entry is found in filer store") {
return nil
}
return errReq
}
if entryResp.Entry.Attributes.Mtime == message.NewEntry.Attributes.Mtime &&
bytes.Equal(entryResp.Entry.Attributes.Md5, message.NewEntry.Attributes.Md5) {
fmt.Fprintf(c.writer, "file: %s needles:%d failed:%d\n", entryPath, chunkCount, errorChunksCount.Load())
errCount++
}
return nil
}); err != nil {
return err
}
return nil
}
if *c.verbose {
fmt.Fprintf(c.writer, "file: %s needles:%d verifed\n", entryPath, chunkCount)
}
fileCount++
return nil
}
metadataFollowOption := &pb.MetadataFollowOption{
ClientName: "shell_verify",
ClientId: util.RandomInt32(),
ClientEpoch: 0,
SelfSignature: 0,
PathPrefix: path,
AdditionalPathPrefixes: nil,
DirectoriesToWatch: nil,
StartTsNs: time.Now().Add(-1 * time.Second * time.Duration(c.modifyTimeAgoAtSec)).UnixNano(),
StopTsNs: time.Now().UnixNano(),
EventErrorType: pb.DontLogError,
}
return fileCount, errCount, pb.FollowMetadata(c.env.option.FilerAddress, c.env.option.GrpcDialOption, metadataFollowOption, processEventFn)
}
func (c *commandFsVerify) verifyEntry(path string, chunks []*filer_pb.FileChunk, errorCount *atomic.Uint64, wg *sync.WaitGroup) bool {
fileMsg := fmt.Sprintf("file:%s", path)
itemIsVerifed := atomic.NewBool(true)
for _, chunk := range chunks {
if volumeIds, ok := c.volumeIds[chunk.Fid.VolumeId]; ok {
for _, volumeServer := range volumeIds {
if *c.concurrency == 0 {
if err := c.verifyChunk(volumeServer, chunk.Fid); err != nil {
if !(*c.metadataFromLog && strings.HasSuffix(err.Error(), "not found")) {
fmt.Fprintf(c.writer, "%s failed verify fileId %s: %+v\n",
fileMsg, chunk.GetFileIdString(), err)
}
if itemIsVerifed.Load() {
itemIsVerifed.Store(false)
errorCount.Add(1)
}
}
continue
}
c.waitChanLock.RLock()
waitChan, ok := c.waitChan[string(volumeServer)]
c.waitChanLock.RUnlock()
if !ok {
fmt.Fprintf(c.writer, "%s failed to get channel for %s fileId: %s\n",
string(volumeServer), fileMsg, chunk.GetFileIdString())
if itemIsVerifed.Load() {
itemIsVerifed.Store(false)
errorCount.Add(1)
}
continue
}
wg.Add(1)
waitChan <- struct{}{}
go func(fChunk *filer_pb.FileChunk, path string, volumeServer pb.ServerAddress, msg string) {
defer wg.Done()
if err := c.verifyChunk(volumeServer, fChunk.Fid); err != nil {
if !(*c.metadataFromLog && strings.HasSuffix(err.Error(), "not found")) {
fmt.Fprintf(c.writer, "%s failed verify fileId %s: %+v\n",
msg, fChunk.GetFileIdString(), err)
}
if itemIsVerifed.Load() {
itemIsVerifed.Store(false)
errorCount.Add(1)
}
}
<-waitChan
}(chunk, path, volumeServer, fileMsg)
}
} else {
if !*c.metadataFromLog {
err := fmt.Errorf("volumeId %d not found", chunk.Fid.VolumeId)
fmt.Fprintf(c.writer, "%s failed verify fileId %s: %+v\n",
fileMsg, chunk.GetFileIdString(), err)
}
if itemIsVerifed.Load() {
itemIsVerifed.Store(false)
errorCount.Add(1)
}
break
}
}
return itemIsVerifed.Load()
}
2024-02-15 17:31:51 +08:00
func (c *commandFsVerify) verifyTraverseBfs(path string) (fileCount uint64, errCount uint64, err error) {
timeNowAtSec := time.Now().Unix()
more solid weed mount (#4089) * compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation
2023-01-03 15:20:45 +08:00
return fileCount, errCount, doTraverseBfsAndSaving(c.env, c.writer, path, false,
func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) {
if c.modifyTimeAgoAtSec > 0 {
if entry.Entry.Attributes != nil && c.modifyTimeAgoAtSec < timeNowAtSec-entry.Entry.Attributes.Mtime {
return nil
}
}
dataChunks, manifestChunks, resolveErr := filer.ResolveChunkManifest(filer.LookupFn(c.env), entry.Entry.GetChunks(), 0, math.MaxInt64)
if resolveErr != nil {
return fmt.Errorf("failed to ResolveChunkManifest: %+v", resolveErr)
}
dataChunks = append(dataChunks, manifestChunks...)
if len(dataChunks) > 0 {
outputChan <- &ItemEntry{
chunks: dataChunks,
path: util.NewFullPath(entry.Dir, entry.Entry.Name),
}
}
return nil
},
func(outputChan chan interface{}) {
2024-02-15 17:31:51 +08:00
var wg sync.WaitGroup
itemErrCount := atomic.NewUint64(0)
for itemEntry := range outputChan {
i := itemEntry.(*ItemEntry)
itemPath := string(i.path)
if c.verifyEntry(itemPath, i.chunks, itemErrCount, &wg) {
2024-02-15 17:31:51 +08:00
if *c.verbose {
fmt.Fprintf(c.writer, "file: %s needles:%d verifed\n", itemPath, len(i.chunks))
2024-02-15 17:31:51 +08:00
}
fileCount++
}
}
2024-02-15 17:31:51 +08:00
wg.Wait()
errCount = itemErrCount.Load()
})
}