rename parameter and reuse functions

rename milestone to  watermark
This commit is contained in:
guol-fnst 2022-07-20 09:26:06 +08:00
parent 91285bb51d
commit ac694f0c8f
7 changed files with 58 additions and 93 deletions

View File

@ -6,6 +6,10 @@ import (
"errors" "errors"
"flag" "flag"
"fmt" "fmt"
"io"
"math"
"os"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
@ -16,9 +20,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"google.golang.org/grpc" "google.golang.org/grpc"
"io"
"math"
"os"
) )
var ( var (
@ -155,7 +156,7 @@ func getVolumeFiles(v uint32, addr pb.ServerAddress) (map[types.NeedleId]needleS
var maxOffset int64 var maxOffset int64
files := map[types.NeedleId]needleState{} files := map[types.NeedleId]needleState{}
err = idx.WalkIndexFile(idxFile, func(key types.NeedleId, offset types.Offset, size types.Size) error { err = idx.WalkIndexFile(idxFile, 0, func(key types.NeedleId, offset types.Offset, size types.Size) error {
if offset.IsZero() || size.IsDeleted() { if offset.IsZero() || size.IsDeleted() {
files[key] = needleState{ files[key] = needleState{
state: stateDeleted, state: stateDeleted,

View File

@ -3,11 +3,12 @@ package main
import ( import (
"flag" "flag"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/util"
"os" "os"
"path" "path"
"strconv" "strconv"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage/idx" "github.com/chrislusf/seaweedfs/weed/storage/idx"
"github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/storage/types"
@ -36,7 +37,7 @@ func main() {
} }
defer indexFile.Close() defer indexFile.Close()
idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size types.Size) error { idx.WalkIndexFile(indexFile, 0, func(key types.NeedleId, offset types.Offset, size types.Size) error {
fmt.Printf("key:%v offset:%v size:%v(%v)\n", key, offset, size, util.BytesToHumanReadable(uint64(size))) fmt.Printf("key:%v offset:%v size:%v(%v)\n", key, offset, size, util.BytesToHumanReadable(uint64(size)))
return nil return nil
}) })

View File

@ -294,7 +294,7 @@ func readNeedleMap(baseFileName string) (*needle_map.MemDb, error) {
defer indexFile.Close() defer indexFile.Close()
cm := needle_map.NewMemDb() cm := needle_map.NewMemDb()
err = idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size types.Size) error { err = idx.WalkIndexFile(indexFile, 0, func(key types.NeedleId, offset types.Offset, size types.Size) error {
if !offset.IsZero() && size != types.TombstoneFileSize { if !offset.IsZero() && size != types.TombstoneFileSize {
cm.Set(key, offset, size) cm.Set(key, offset, size)
} else { } else {

View File

@ -9,42 +9,8 @@ import (
// walks through the index file, calls fn function with each key, offset, size // walks through the index file, calls fn function with each key, offset, size
// stops with the error returned by the fn function // stops with the error returned by the fn function
func WalkIndexFile(r io.ReaderAt, fn func(key types.NeedleId, offset types.Offset, size types.Size) error) error { func WalkIndexFile(r io.ReaderAt, startFrom uint64, fn func(key types.NeedleId, offset types.Offset, size types.Size) error) error {
var readerOffset int64 readerOffset := int64(startFrom * types.NeedleMapEntrySize)
bytes := make([]byte, types.NeedleMapEntrySize*RowsToRead)
count, e := r.ReadAt(bytes, readerOffset)
if count == 0 && e == io.EOF {
return nil
}
glog.V(3).Infof("readerOffset %d count %d err: %v", readerOffset, count, e)
readerOffset += int64(count)
var (
key types.NeedleId
offset types.Offset
size types.Size
i int
)
for count > 0 && e == nil || e == io.EOF {
for i = 0; i+types.NeedleMapEntrySize <= count; i += types.NeedleMapEntrySize {
key, offset, size = IdxFileEntry(bytes[i : i+types.NeedleMapEntrySize])
if e = fn(key, offset, size); e != nil {
return e
}
}
if e == io.EOF {
return nil
}
count, e = r.ReadAt(bytes, readerOffset)
glog.V(3).Infof("readerOffset %d count %d err: %v", readerOffset, count, e)
readerOffset += int64(count)
}
return e
}
//copied from WalkIndexFile, just init readerOffset from milestone
func WalkIndexFileIncrement(r io.ReaderAt, milestone uint64, fn func(key types.NeedleId, offset types.Offset, size types.Size) error) error {
var readerOffset = int64(milestone * types.NeedleMapEntrySize)
bytes := make([]byte, types.NeedleMapEntrySize*RowsToRead) bytes := make([]byte, types.NeedleMapEntrySize*RowsToRead)
count, e := r.ReadAt(bytes, readerOffset) count, e := r.ReadAt(bytes, readerOffset)
if count == 0 && e == io.EOF { if count == 0 && e == io.EOF {

View File

@ -111,7 +111,7 @@ func (cm *MemDb) LoadFromIdx(idxName string) (ret error) {
func (cm *MemDb) LoadFromReaderAt(readerAt io.ReaderAt) (ret error) { func (cm *MemDb) LoadFromReaderAt(readerAt io.ReaderAt) (ret error) {
return idx.WalkIndexFile(readerAt, func(key NeedleId, offset Offset, size Size) error { return idx.WalkIndexFile(readerAt, 0, func(key NeedleId, offset Offset, size Size) error {
if offset.IsZero() || size.IsDeleted() { if offset.IsZero() || size.IsDeleted() {
return cm.Delete(key) return cm.Delete(key)
} }

View File

@ -19,15 +19,16 @@ import (
. "github.com/chrislusf/seaweedfs/weed/storage/types" . "github.com/chrislusf/seaweedfs/weed/storage/types"
) )
//mark it every milestoneCnt operations //mark it every watermarkBatchSize operations
const milestoneCnt = 10000 const watermarkBatchSize = 10000
const milestoneKey = 0xffffffffffffffff - 1
var watermarkKey = []byte("idx_entry_watermark")
type LevelDbNeedleMap struct { type LevelDbNeedleMap struct {
baseNeedleMapper baseNeedleMapper
dbFileName string dbFileName string
db *leveldb.DB db *leveldb.DB
recordNum uint64 recordCount uint64
} }
func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File, opts *opt.Options) (m *LevelDbNeedleMap, err error) { func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File, opts *opt.Options) (m *LevelDbNeedleMap, err error) {
@ -53,12 +54,12 @@ func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File, opts *opt.Option
return return
} }
} }
glog.V(1).Infof("Loading %s... , milestone: %d", dbFileName, getMileStone(m.db)) glog.V(0).Infof("Loading %s... , watermark: %d", dbFileName, getWatermark(m.db))
m.recordNum = uint64(m.indexFileOffset / types.NeedleMapEntrySize) m.recordCount = uint64(m.indexFileOffset / types.NeedleMapEntrySize)
milestone := (m.recordNum / milestoneCnt) * milestoneCnt watermark := (m.recordCount / watermarkBatchSize) * watermarkBatchSize
err = setMileStone(m.db, milestone) err = setWatermark(m.db, watermark)
if err != nil { if err != nil {
glog.Fatalf("set milestone for %s error: %s\n", dbFileName, err) glog.Fatalf("set watermark for %s error: %s\n", dbFileName, err)
return return
} }
mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile) mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile)
@ -93,17 +94,17 @@ func generateLevelDbFile(dbFileName string, indexFile *os.File) error {
} }
defer db.Close() defer db.Close()
milestone := getMileStone(db) watermark := getWatermark(db)
if stat, err := indexFile.Stat(); err != nil { if stat, err := indexFile.Stat(); err != nil {
glog.Fatalf("stat file %s: %v", indexFile.Name(), err) glog.Fatalf("stat file %s: %v", indexFile.Name(), err)
return err return err
} else { } else {
if milestone*types.NeedleMapEntrySize > uint64(stat.Size()) { if watermark*types.NeedleMapEntrySize > uint64(stat.Size()) {
glog.Warningf("wrong milestone %d for filesize %d", milestone, stat.Size()) glog.Warningf("wrong watermark %d for filesize %d", watermark, stat.Size())
} }
glog.V(0).Infof("generateLevelDbFile %s, milestone %d, num of entries:%d", dbFileName, milestone, (uint64(stat.Size())-milestone*types.NeedleMapEntrySize)/types.NeedleMapEntrySize) glog.V(0).Infof("generateLevelDbFile %s, watermark %d, num of entries:%d", dbFileName, watermark, (uint64(stat.Size())-watermark*types.NeedleMapEntrySize)/types.NeedleMapEntrySize)
} }
return idx.WalkIndexFileIncrement(indexFile, milestone, func(key NeedleId, offset Offset, size Size) error { return idx.WalkIndexFile(indexFile, watermark, func(key NeedleId, offset Offset, size Size) error {
if !offset.IsZero() && size.IsValid() { if !offset.IsZero() && size.IsValid() {
levelDbWrite(db, key, offset, size, false, 0) levelDbWrite(db, key, offset, size, false, 0)
} else { } else {
@ -127,7 +128,7 @@ func (m *LevelDbNeedleMap) Get(key NeedleId) (element *needle_map.NeedleValue, o
func (m *LevelDbNeedleMap) Put(key NeedleId, offset Offset, size Size) error { func (m *LevelDbNeedleMap) Put(key NeedleId, offset Offset, size Size) error {
var oldSize Size var oldSize Size
var milestone uint64 var watermark uint64
if oldNeedle, ok := m.Get(key); ok { if oldNeedle, ok := m.Get(key); ok {
oldSize = oldNeedle.Size oldSize = oldNeedle.Size
} }
@ -136,27 +137,25 @@ func (m *LevelDbNeedleMap) Put(key NeedleId, offset Offset, size Size) error {
if err := m.appendToIndexFile(key, offset, size); err != nil { if err := m.appendToIndexFile(key, offset, size); err != nil {
return fmt.Errorf("cannot write to indexfile %s: %v", m.indexFile.Name(), err) return fmt.Errorf("cannot write to indexfile %s: %v", m.indexFile.Name(), err)
} }
m.recordNum++ m.recordCount++
if m.recordNum%milestoneCnt != 0 { if m.recordCount%watermarkBatchSize != 0 {
milestone = 0 watermark = 0
} else { } else {
milestone = (m.recordNum / milestoneCnt) * milestoneCnt watermark = (m.recordCount / watermarkBatchSize) * watermarkBatchSize
glog.V(1).Infof("put cnt:%d for %s,milestone: %d", m.recordNum, m.dbFileName, milestone) glog.V(1).Infof("put cnt:%d for %s,watermark: %d", m.recordCount, m.dbFileName, watermark)
} }
return levelDbWrite(m.db, key, offset, size, milestone == 0, milestone) return levelDbWrite(m.db, key, offset, size, watermark == 0, watermark)
} }
func getMileStone(db *leveldb.DB) uint64 { func getWatermark(db *leveldb.DB) uint64 {
var mskBytes = make([]byte, 8) data, err := db.Get(watermarkKey, nil)
util.Uint64toBytes(mskBytes, milestoneKey)
data, err := db.Get(mskBytes, nil)
if err != nil || len(data) != 8 { if err != nil || len(data) != 8 {
glog.Warningf("get milestone from db error: %v, %d", err, len(data)) glog.Warningf("get watermark from db error: %v, %d", err, len(data))
/* /*
if !strings.Contains(strings.ToLower(err.Error()), "not found") { if !strings.Contains(strings.ToLower(err.Error()), "not found") {
err = setMileStone(db, 0) err = setWatermark(db, 0)
if err != nil { if err != nil {
glog.Errorf("failed to set milestone: %v", err) glog.Errorf("failed to set watermark: %v", err)
} }
} }
*/ */
@ -165,28 +164,26 @@ func getMileStone(db *leveldb.DB) uint64 {
return util.BytesToUint64(data) return util.BytesToUint64(data)
} }
func setMileStone(db *leveldb.DB, milestone uint64) error { func setWatermark(db *leveldb.DB, watermark uint64) error {
glog.V(1).Infof("set milestone %d", milestone) glog.V(1).Infof("set watermark %d", watermark)
var mskBytes = make([]byte, 8) var wmBytes = make([]byte, 8)
util.Uint64toBytes(mskBytes, milestoneKey) util.Uint64toBytes(wmBytes, watermark)
var msBytes = make([]byte, 8) if err := db.Put(watermarkKey, wmBytes, nil); err != nil {
util.Uint64toBytes(msBytes, milestone) return fmt.Errorf("failed to setWatermark: %v", err)
if err := db.Put(mskBytes, msBytes, nil); err != nil {
return fmt.Errorf("failed to setMileStone: %v", err)
} }
return nil return nil
} }
func levelDbWrite(db *leveldb.DB, key NeedleId, offset Offset, size Size, upateMilstone bool, milestone uint64) error { func levelDbWrite(db *leveldb.DB, key NeedleId, offset Offset, size Size, updateWatermark bool, watermark uint64) error {
bytes := needle_map.ToBytes(key, offset, size) bytes := needle_map.ToBytes(key, offset, size)
if err := db.Put(bytes[0:NeedleIdSize], bytes[NeedleIdSize:NeedleIdSize+OffsetSize+SizeSize], nil); err != nil { if err := db.Put(bytes[0:NeedleIdSize], bytes[NeedleIdSize:NeedleIdSize+OffsetSize+SizeSize], nil); err != nil {
return fmt.Errorf("failed to write leveldb: %v", err) return fmt.Errorf("failed to write leveldb: %v", err)
} }
// set milestone // set watermark
if upateMilstone { if updateWatermark {
return setMileStone(db, milestone) return setWatermark(db, watermark)
} }
return nil return nil
} }
@ -197,7 +194,7 @@ func levelDbDelete(db *leveldb.DB, key NeedleId) error {
} }
func (m *LevelDbNeedleMap) Delete(key NeedleId, offset Offset) error { func (m *LevelDbNeedleMap) Delete(key NeedleId, offset Offset) error {
var milestone uint64 var watermark uint64
oldNeedle, found := m.Get(key) oldNeedle, found := m.Get(key)
if !found || oldNeedle.Size.IsDeleted() { if !found || oldNeedle.Size.IsDeleted() {
return nil return nil
@ -208,13 +205,13 @@ func (m *LevelDbNeedleMap) Delete(key NeedleId, offset Offset) error {
if err := m.appendToIndexFile(key, offset, TombstoneFileSize); err != nil { if err := m.appendToIndexFile(key, offset, TombstoneFileSize); err != nil {
return err return err
} }
m.recordNum++ m.recordCount++
if m.recordNum%milestoneCnt != 0 { if m.recordCount%watermarkBatchSize != 0 {
milestone = 0 watermark = 0
} else { } else {
milestone = (m.recordNum / milestoneCnt) * milestoneCnt watermark = (m.recordCount / watermarkBatchSize) * watermarkBatchSize
} }
return levelDbWrite(m.db, key, oldNeedle.Offset, -oldNeedle.Size, milestone == 0, milestone) return levelDbWrite(m.db, key, oldNeedle.Offset, -oldNeedle.Size, watermark == 0, watermark)
} }
func (m *LevelDbNeedleMap) Close() { func (m *LevelDbNeedleMap) Close() {

View File

@ -33,7 +33,7 @@ func LoadCompactNeedleMap(file *os.File) (*NeedleMap, error) {
} }
func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) { func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) {
e := idx.WalkIndexFile(file, func(key NeedleId, offset Offset, size Size) error { e := idx.WalkIndexFile(file, 0, func(key NeedleId, offset Offset, size Size) error {
nm.MaybeSetMaxFileKey(key) nm.MaybeSetMaxFileKey(key)
if !offset.IsZero() && size.IsValid() { if !offset.IsZero() && size.IsValid() {
nm.FileCounter++ nm.FileCounter++