seaweedfs/weed/storage/needle_map_memory.go

125 lines
3.4 KiB
Go
Raw Normal View History

package storage
import (
"io"
"os"
"github.com/chrislusf/seaweedfs/weed/glog"
2017-05-27 13:51:25 +08:00
"github.com/chrislusf/seaweedfs/weed/storage/needle"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
)
type NeedleMap struct {
2017-05-27 13:51:25 +08:00
m needle.NeedleValueMap
baseNeedleMapper
}
2017-05-27 13:51:25 +08:00
func NewCompactNeedleMap(file *os.File) *NeedleMap {
nm := &NeedleMap{
2017-05-27 13:51:25 +08:00
m: needle.NewCompactMap(),
}
nm.indexFile = file
return nm
}
func NewBtreeNeedleMap(file *os.File) *NeedleMap {
nm := &NeedleMap{
m: needle.NewBtreeMap(),
}
nm.indexFile = file
return nm
}
const (
RowsToRead = 1024
)
2017-05-27 13:51:25 +08:00
func LoadCompactNeedleMap(file *os.File) (*NeedleMap, error) {
nm := NewCompactNeedleMap(file)
return doLoading(file, nm)
}
func LoadBtreeNeedleMap(file *os.File) (*NeedleMap, error) {
nm := NewBtreeNeedleMap(file)
return doLoading(file, nm)
}
func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) {
e := WalkIndexFile(file, func(key NeedleId, offset Offset, size uint32) error {
2019-04-15 14:00:37 +08:00
nm.MaybeSetMaxFileKey(key)
2019-04-09 10:40:56 +08:00
if !offset.IsZero() && size != TombstoneFileSize {
nm.FileCounter++
nm.FileByteCounter = nm.FileByteCounter + uint64(size)
oldOffset, oldSize := nm.m.Set(NeedleId(key), offset, size)
// glog.V(3).Infoln("reading key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize)
2019-04-09 10:40:56 +08:00
if !oldOffset.IsZero() && oldSize != TombstoneFileSize {
nm.DeletionCounter++
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
}
} else {
oldSize := nm.m.Delete(NeedleId(key))
// glog.V(3).Infoln("removing key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize)
nm.DeletionCounter++
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
}
return nil
})
2019-04-15 14:00:37 +08:00
glog.V(1).Infof("max file key: %d for file: %s", nm.MaxFileKey(), file.Name())
return nm, e
}
// walks through the index file, calls fn function with each key, offset, size
// stops with the error returned by the fn function
func WalkIndexFile(r *os.File, fn func(key NeedleId, offset Offset, size uint32) error) error {
var readerOffset int64
bytes := make([]byte, NeedleEntrySize*RowsToRead)
count, e := r.ReadAt(bytes, readerOffset)
glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e)
readerOffset += int64(count)
var (
key NeedleId
offset Offset
size uint32
i int
)
for count > 0 && e == nil || e == io.EOF {
for i = 0; i+NeedleEntrySize <= count; i += NeedleEntrySize {
2018-07-22 08:39:10 +08:00
key, offset, size = IdxFileEntry(bytes[i : i+NeedleEntrySize])
if e = fn(key, offset, size); e != nil {
return e
}
}
if e == io.EOF {
return nil
}
count, e = r.ReadAt(bytes, readerOffset)
glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e)
readerOffset += int64(count)
}
return e
}
func (nm *NeedleMap) Put(key NeedleId, offset Offset, size uint32) error {
_, oldSize := nm.m.Set(NeedleId(key), offset, size)
nm.logPut(key, oldSize, size)
return nm.appendToIndexFile(key, offset, size)
}
func (nm *NeedleMap) Get(key NeedleId) (element *needle.NeedleValue, ok bool) {
element, ok = nm.m.Get(NeedleId(key))
return
}
func (nm *NeedleMap) Delete(key NeedleId, offset Offset) error {
deletedBytes := nm.m.Delete(NeedleId(key))
nm.logDelete(deletedBytes)
2017-01-07 02:22:20 +08:00
return nm.appendToIndexFile(key, offset, TombstoneFileSize)
}
func (nm *NeedleMap) Close() {
_ = nm.indexFile.Close()
}
func (nm *NeedleMap) Destroy() error {
nm.Close()
return os.Remove(nm.indexFile.Name())
}