seaweedfs/unmaintained/fix_dat/fix_dat.go

142 lines
4.3 KiB
Go
Raw Normal View History

package main
import (
"flag"
"fmt"
"io"
"os"
"path"
"strconv"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
2019-04-19 12:43:36 +08:00
"github.com/chrislusf/seaweedfs/weed/storage/needle"
2018-07-10 13:48:30 +08:00
"github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
)
var (
fixVolumePath = flag.String("dir", "/tmp", "data directory to store files")
fixVolumeCollection = flag.String("collection", "", "the volume collection name")
fixVolumeId = flag.Int("volumeId", -1, "a volume id. The volume should already exist in the dir. The volume index file should not exist.")
)
/*
This is to resolve an one-time issue that caused inconsistency with .dat and .idx files.
2015-03-18 05:22:56 +08:00
In this case, the .dat file contains all data, but some of deletion caused incorrect offset.
The .idx has all correct offsets.
1. fix the .dat file, a new .dat_fixed file will be generated.
go run fix_dat.go -volumeId=9 -dir=/Users/chrislu/Downloads
2. move the original .dat and .idx files to some backup folder, and rename .dat_fixed to .dat file
mv 9.dat_fixed 9.dat
3. fix the .idx file with the "weed fix"
weed fix -volumeId=9 -dir=/Users/chrislu/Downloads
*/
func main() {
flag.Parse()
fileName := strconv.Itoa(*fixVolumeId)
if *fixVolumeCollection != "" {
fileName = *fixVolumeCollection + "_" + fileName
}
indexFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".idx"), os.O_RDONLY, 0644)
if err != nil {
2018-06-25 02:37:08 +08:00
glog.Fatalf("Read Volume Index %v", err)
}
defer indexFile.Close()
datFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".dat"), os.O_RDONLY, 0644)
if err != nil {
2018-06-25 02:37:08 +08:00
glog.Fatalf("Read Volume Data %v", err)
}
defer datFile.Close()
newDatFile, err := os.Create(path.Join(*fixVolumePath, fileName+".dat_fixed"))
if err != nil {
2018-06-25 02:37:08 +08:00
glog.Fatalf("Write New Volume Data %v", err)
}
defer newDatFile.Close()
2018-06-25 02:37:08 +08:00
superBlock, err := storage.ReadSuperBlock(datFile)
if err != nil {
glog.Fatalf("Read Volume Data superblock %v", err)
}
newDatFile.Write(superBlock.Bytes())
2019-04-19 12:43:36 +08:00
iterateEntries(datFile, indexFile, func(n *needle.Needle, offset int64) {
2018-08-01 14:39:56 +08:00
fmt.Printf("needle id=%v name=%s size=%d dataSize=%d\n", n.Id, string(n.Name), n.Size, n.DataSize)
_, s, _, e := n.Append(newDatFile, superBlock.Version())
fmt.Printf("size %d error %v\n", s, e)
})
}
2019-04-19 12:43:36 +08:00
func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *needle.Needle, offset int64)) {
// start to read index file
var readerOffset int64
bytes := make([]byte, 16)
count, _ := idxFile.ReadAt(bytes, readerOffset)
readerOffset += int64(count)
// start to read dat file
2019-01-17 09:17:19 +08:00
superBlock, err := storage.ReadSuperBlock(datFile)
2018-06-25 02:37:08 +08:00
if err != nil {
fmt.Printf("cannot read dat file super block: %v", err)
return
}
2019-01-17 09:17:19 +08:00
offset := int64(superBlock.BlockSize())
version := superBlock.Version()
2019-04-19 12:43:36 +08:00
n, _, rest, err := needle.ReadNeedleHeader(datFile, version, offset)
if err != nil {
fmt.Printf("cannot read needle header: %v", err)
return
}
fmt.Printf("Needle %+v, rest %d\n", n, rest)
for n != nil && count > 0 {
// parse index file entry
key := util.BytesToUint64(bytes[0:8])
offsetFromIndex := util.BytesToUint32(bytes[8:12])
sizeFromIndex := util.BytesToUint32(bytes[12:16])
count, _ = idxFile.ReadAt(bytes, readerOffset)
readerOffset += int64(count)
if offsetFromIndex != 0 && offset != int64(offsetFromIndex)*8 {
//t := offset
offset = int64(offsetFromIndex) * 8
//fmt.Printf("Offset change %d => %d\n", t, offset)
}
fmt.Printf("key: %d offsetFromIndex %d n.Size %d sizeFromIndex:%d\n", key, offsetFromIndex, n.Size, sizeFromIndex)
2019-04-19 12:43:36 +08:00
rest = needle.NeedleBodyLength(sizeFromIndex, version)
func() {
defer func() {
if r := recover(); r != nil {
fmt.Println("Recovered in f", r)
}
}()
2019-04-19 15:39:34 +08:00
if _, err = n.ReadNeedleBody(datFile, version, offset+int64(types.NeedleHeaderSize), rest); err != nil {
fmt.Printf("cannot read needle body: offset %d body %d %v\n", offset, rest, err)
}
}()
if n.Size <= n.DataSize {
continue
}
visitNeedle(n, offset)
2019-04-19 15:39:34 +08:00
offset += types.NeedleHeaderSize + rest
//fmt.Printf("==> new entry offset %d\n", offset)
2019-04-19 12:43:36 +08:00
if n, _, rest, err = needle.ReadNeedleHeader(datFile, version, offset); err != nil {
if err == io.EOF {
return
}
fmt.Printf("cannot read needle header: %v\n", err)
return
}
//fmt.Printf("new entry needle size:%d rest:%d\n", n.Size, rest)
}
}