seaweedfs/weed/storage/needle_map_memory.go

125 lines
3.5 KiB
Go
Raw Normal View History

package storage
import (
"io"
"os"
"github.com/chrislusf/seaweedfs/weed/glog"
2019-04-19 12:43:36 +08:00
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
)
type NeedleMap struct {
2019-04-19 12:43:36 +08:00
m needle_map.NeedleValueMap
baseNeedleMapper
}
2017-05-27 13:51:25 +08:00
func NewCompactNeedleMap(file *os.File) *NeedleMap {
nm := &NeedleMap{
2019-04-19 12:43:36 +08:00
m: needle_map.NewCompactMap(),
2017-05-27 13:51:25 +08:00
}
nm.indexFile = file
return nm
}
func NewBtreeNeedleMap(file *os.File) *NeedleMap {
nm := &NeedleMap{
2019-04-19 12:43:36 +08:00
m: needle_map.NewBtreeMap(),
}
nm.indexFile = file
return nm
}
const (
RowsToRead = 1024
)
2017-05-27 13:51:25 +08:00
func LoadCompactNeedleMap(file *os.File) (*NeedleMap, error) {
nm := NewCompactNeedleMap(file)
return doLoading(file, nm)
}
func LoadBtreeNeedleMap(file *os.File) (*NeedleMap, error) {
nm := NewBtreeNeedleMap(file)
return doLoading(file, nm)
}
func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) {
e := WalkIndexFile(file, func(key NeedleId, offset Offset, size uint32) error {
2019-04-15 14:00:37 +08:00
nm.MaybeSetMaxFileKey(key)
2019-04-09 10:40:56 +08:00
if !offset.IsZero() && size != TombstoneFileSize {
nm.FileCounter++
nm.FileByteCounter = nm.FileByteCounter + uint64(size)
oldOffset, oldSize := nm.m.Set(NeedleId(key), offset, size)
// glog.V(3).Infoln("reading key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize)
2019-04-09 10:40:56 +08:00
if !oldOffset.IsZero() && oldSize != TombstoneFileSize {
nm.DeletionCounter++
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
}
} else {
oldSize := nm.m.Delete(NeedleId(key))
// glog.V(3).Infoln("removing key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize)
nm.DeletionCounter++
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
}
return nil
})
2019-04-15 14:00:37 +08:00
glog.V(1).Infof("max file key: %d for file: %s", nm.MaxFileKey(), file.Name())
return nm, e
}
// walks through the index file, calls fn function with each key, offset, size
// stops with the error returned by the fn function
func WalkIndexFile(r *os.File, fn func(key NeedleId, offset Offset, size uint32) error) error {
var readerOffset int64
2019-04-19 15:39:34 +08:00
bytes := make([]byte, NeedleMapEntrySize*RowsToRead)
count, e := r.ReadAt(bytes, readerOffset)
glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e)
readerOffset += int64(count)
var (
key NeedleId
offset Offset
size uint32
i int
)
for count > 0 && e == nil || e == io.EOF {
2019-04-19 15:39:34 +08:00
for i = 0; i+NeedleMapEntrySize <= count; i += NeedleMapEntrySize {
key, offset, size = IdxFileEntry(bytes[i : i+NeedleMapEntrySize])
if e = fn(key, offset, size); e != nil {
return e
}
}
if e == io.EOF {
return nil
}
count, e = r.ReadAt(bytes, readerOffset)
glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e)
readerOffset += int64(count)
}
return e
}
func (nm *NeedleMap) Put(key NeedleId, offset Offset, size uint32) error {
_, oldSize := nm.m.Set(NeedleId(key), offset, size)
nm.logPut(key, oldSize, size)
return nm.appendToIndexFile(key, offset, size)
}
2019-04-19 12:43:36 +08:00
func (nm *NeedleMap) Get(key NeedleId) (element *needle_map.NeedleValue, ok bool) {
element, ok = nm.m.Get(NeedleId(key))
return
}
func (nm *NeedleMap) Delete(key NeedleId, offset Offset) error {
deletedBytes := nm.m.Delete(NeedleId(key))
nm.logDelete(deletedBytes)
2017-01-07 02:22:20 +08:00
return nm.appendToIndexFile(key, offset, TombstoneFileSize)
}
func (nm *NeedleMap) Close() {
_ = nm.indexFile.Close()
}
func (nm *NeedleMap) Destroy() error {
nm.Close()
return os.Remove(nm.indexFile.Name())
}