seaweedfs/weed/storage/volume_checking.go

89 lines
3.0 KiB
Go
Raw Normal View History

2016-07-03 14:50:58 +08:00
package storage
import (
"fmt"
"os"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/idx"
2019-04-19 12:43:36 +08:00
"github.com/chrislusf/seaweedfs/weed/storage/needle"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
2016-07-03 14:50:58 +08:00
"github.com/chrislusf/seaweedfs/weed/util"
)
func CheckVolumeDataIntegrity(v *Volume, indexFile *os.File) (lastAppendAtNs uint64, e error) {
2016-07-03 14:50:58 +08:00
var indexSize int64
if indexSize, e = verifyIndexFileIntegrity(indexFile); e != nil {
return 0, fmt.Errorf("verifyIndexFileIntegrity %s failed: %v", indexFile.Name(), e)
2016-07-03 14:50:58 +08:00
}
if indexSize == 0 {
2019-04-30 11:22:19 +08:00
return 0, nil
2016-07-03 14:50:58 +08:00
}
var lastIdxEntry []byte
2019-04-19 15:39:34 +08:00
if lastIdxEntry, e = readIndexEntryAtOffset(indexFile, indexSize-NeedleMapEntrySize); e != nil {
return 0, fmt.Errorf("readLastIndexEntry %s failed: %v", indexFile.Name(), e)
2016-07-03 14:50:58 +08:00
}
key, offset, size := idx.IdxFileEntry(lastIdxEntry)
if offset.IsZero() {
2019-04-30 11:22:19 +08:00
return 0, nil
}
2020-09-12 02:34:10 +08:00
if size < 0 {
// read the deletion entry
if lastAppendAtNs, e = verifyDeletedNeedleIntegrity(v.DataBackend, v.Version(), key); e != nil {
return lastAppendAtNs, fmt.Errorf("verifyNeedleIntegrity %s failed: %v", indexFile.Name(), e)
}
} else {
if lastAppendAtNs, e = verifyNeedleIntegrity(v.DataBackend, v.Version(), offset.ToAcutalOffset(), key, size); e != nil {
return lastAppendAtNs, fmt.Errorf("verifyNeedleIntegrity %s failed: %v", indexFile.Name(), e)
}
}
return
2016-07-03 14:50:58 +08:00
}
func verifyIndexFileIntegrity(indexFile *os.File) (indexSize int64, err error) {
if indexSize, err = util.GetFileSize(indexFile); err == nil {
2019-04-19 15:39:34 +08:00
if indexSize%NeedleMapEntrySize != 0 {
2016-07-03 14:50:58 +08:00
err = fmt.Errorf("index file's size is %d bytes, maybe corrupted", indexSize)
}
}
return
}
func readIndexEntryAtOffset(indexFile *os.File, offset int64) (bytes []byte, err error) {
if offset < 0 {
err = fmt.Errorf("offset %d for index file is invalid", offset)
return
}
2019-04-19 15:39:34 +08:00
bytes = make([]byte, NeedleMapEntrySize)
2016-07-03 14:50:58 +08:00
_, err = indexFile.ReadAt(bytes, offset)
return
}
func verifyNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version, offset int64, key NeedleId, size Size) (lastAppendAtNs uint64, err error) {
2019-04-19 12:43:36 +08:00
n := new(needle.Needle)
if err = n.ReadData(datFile, offset, size, v); err != nil {
return n.AppendAtNs, fmt.Errorf("read data [%d,%d) : %v", offset, offset+int64(size), err)
2016-07-03 14:50:58 +08:00
}
if n.Id != key {
return n.AppendAtNs, fmt.Errorf("index key %#x does not match needle's Id %#x", key, n.Id)
2016-07-03 14:50:58 +08:00
}
return n.AppendAtNs, err
2016-07-03 14:50:58 +08:00
}
func verifyDeletedNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version, key NeedleId) (lastAppendAtNs uint64, err error) {
n := new(needle.Needle)
size := n.DiskSize(v)
var fileSize int64
fileSize, _, err = datFile.GetStat()
if err != nil {
return 0, fmt.Errorf("GetStat: %v", err)
}
if err = n.ReadData(datFile, fileSize-size, Size(0), v); err != nil {
return n.AppendAtNs, fmt.Errorf("read data [%d,%d) : %v", fileSize-size, size, err)
}
if n.Id != key {
return n.AppendAtNs, fmt.Errorf("index key %#x does not match needle's Id %#x", key, n.Id)
}
return n.AppendAtNs, err
}