seaweedfs/weed/util/chunk_cache/on_disk_cache_layer.go

136 lines
3.1 KiB
Go
Raw Normal View History

2020-04-14 12:58:10 +08:00
package chunk_cache
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/storage"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"golang.org/x/exp/slices"
"path"
2020-04-14 12:58:10 +08:00
)
type OnDiskCacheLayer struct {
diskCaches []*ChunkCacheVolume
}
2020-09-28 01:41:29 +08:00
func NewOnDiskCacheLayer(dir, namePrefix string, diskSize int64, segmentCount int) *OnDiskCacheLayer {
2020-09-28 01:41:29 +08:00
volumeCount, volumeSize := int(diskSize/(30000*1024*1024)), int64(30000*1024*1024)
if volumeCount < segmentCount {
2020-09-28 01:41:29 +08:00
volumeCount, volumeSize = segmentCount, diskSize/int64(segmentCount)
}
2020-04-14 12:58:10 +08:00
c := &OnDiskCacheLayer{}
for i := 0; i < volumeCount; i++ {
fileName := path.Join(dir, fmt.Sprintf("%s_%d", namePrefix, i))
2020-09-28 01:58:19 +08:00
diskCache, err := LoadOrCreateChunkCacheVolume(fileName, volumeSize)
2020-04-14 12:58:10 +08:00
if err != nil {
glog.Errorf("failed to add cache %s : %v", fileName, err)
} else {
c.diskCaches = append(c.diskCaches, diskCache)
}
}
// keep newest cache to the front
slices.SortFunc(c.diskCaches, func(a, b *ChunkCacheVolume) int {
return b.lastModTime.Compare(a.lastModTime)
2020-04-14 12:58:10 +08:00
})
return c
}
func (c *OnDiskCacheLayer) setChunk(needleId types.NeedleId, data []byte) {
if len(c.diskCaches) == 0 {
return
}
2020-04-14 12:58:10 +08:00
if c.diskCaches[0].fileSize+int64(len(data)) > c.diskCaches[0].sizeLimit {
t, resetErr := c.diskCaches[len(c.diskCaches)-1].Reset()
if resetErr != nil {
glog.Errorf("failed to reset cache file %s", c.diskCaches[len(c.diskCaches)-1].fileName)
return
}
for i := len(c.diskCaches) - 1; i > 0; i-- {
c.diskCaches[i] = c.diskCaches[i-1]
}
c.diskCaches[0] = t
}
2020-06-27 01:01:55 +08:00
if err := c.diskCaches[0].WriteNeedle(needleId, data); err != nil {
glog.V(0).Infof("cache write %v size %d: %v", needleId, len(data), err)
}
2020-04-14 12:58:10 +08:00
}
2020-04-21 08:48:06 +08:00
func (c *OnDiskCacheLayer) getChunk(needleId types.NeedleId) (data []byte) {
2020-04-14 12:58:10 +08:00
var err error
for _, diskCache := range c.diskCaches {
data, err = diskCache.GetNeedle(needleId)
if err == storage.ErrorNotFound {
continue
}
if err != nil {
glog.Errorf("failed to read cache file %s id %d", diskCache.fileName, needleId)
continue
}
if len(data) != 0 {
return
}
}
return nil
}
func (c *OnDiskCacheLayer) getChunkSlice(needleId types.NeedleId, offset, length uint64) (data []byte) {
var err error
for _, diskCache := range c.diskCaches {
data, err = diskCache.getNeedleSlice(needleId, offset, length)
if err == storage.ErrorNotFound {
continue
}
if err != nil {
2021-10-17 06:57:30 +08:00
glog.Warningf("failed to read cache file %s id %d: %v", diskCache.fileName, needleId, err)
continue
}
if len(data) != 0 {
return
}
}
return nil
}
2022-02-26 13:55:04 +08:00
func (c *OnDiskCacheLayer) readChunkAt(buffer []byte, needleId types.NeedleId, offset uint64) (n int, err error) {
for _, diskCache := range c.diskCaches {
n, err = diskCache.readNeedleSliceAt(buffer, needleId, offset)
if err == storage.ErrorNotFound {
continue
}
if err != nil {
glog.Warningf("failed to read cache file %s id %d: %v", diskCache.fileName, needleId, err)
continue
}
if n > 0 {
return
}
}
return
}
2020-04-21 08:48:06 +08:00
func (c *OnDiskCacheLayer) shutdown() {
2020-04-14 12:58:10 +08:00
for _, diskCache := range c.diskCaches {
diskCache.Shutdown()
}
}