filer: avoid possible timeouts for updates and deletions

This commit is contained in:
Chris Lu 2020-02-20 15:44:17 -08:00
parent 45156cc2fe
commit 621cdbdf58
4 changed files with 95 additions and 27 deletions

View File

@ -13,6 +13,7 @@ import (
"github.com/karlseguin/ccache"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/wdclient"
)
@ -27,7 +28,7 @@ type Filer struct {
store *FilerStoreWrapper
directoryCache *ccache.Cache
MasterClient *wdclient.MasterClient
fileIdDeletionChan chan string
fileIdDeletionQueue *util.UnboundedQueue
GrpcDialOption grpc.DialOption
}
@ -35,7 +36,7 @@ func NewFiler(masters []string, grpcDialOption grpc.DialOption) *Filer {
f := &Filer{
directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)),
MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "filer", masters),
fileIdDeletionChan: make(chan string, PaginationSize),
fileIdDeletionQueue: util.NewUnboundedQueue(),
GrpcDialOption: grpcDialOption,
}

View File

@ -10,8 +10,6 @@ import (
func (f *Filer) loopProcessingDeletion() {
ticker := time.NewTicker(5 * time.Second)
lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) {
m := make(map[string]operation.LookupResult)
for _, vid := range vids {
@ -31,36 +29,35 @@ func (f *Filer) loopProcessingDeletion() {
return m, nil
}
var fileIds []string
var deletionCount int
for {
select {
case fid := <-f.fileIdDeletionChan:
fileIds = append(fileIds, fid)
if len(fileIds) >= 4096 {
glog.V(1).Infof("deleting fileIds len=%d", len(fileIds))
operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc)
fileIds = fileIds[:0]
}
case <-ticker.C:
if len(fileIds) > 0 {
glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds))
operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc)
fileIds = fileIds[:0]
deletionCount = 0
f.fileIdDeletionQueue.Consume(func(fileIds []string) {
deletionCount = len(fileIds)
_, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc)
if err != nil {
glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err)
} else {
glog.V(1).Infof("deleting fileIds len=%d", deletionCount)
}
})
if deletionCount == 0 {
time.Sleep(1123 * time.Millisecond)
}
}
}
func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) {
for _, chunk := range chunks {
f.fileIdDeletionChan <- chunk.GetFileIdString()
f.fileIdDeletionQueue.EnQueue(chunk.GetFileIdString())
}
}
// DeleteFileByFileId direct delete by file id.
// Only used when the fileId is not being managed by snapshots.
func (f *Filer) DeleteFileByFileId(fileId string) {
f.fileIdDeletionChan <- fileId
f.fileIdDeletionQueue.EnQueue(fileId)
}
func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {

View File

@ -0,0 +1,45 @@
package util
import "sync"
type UnboundedQueue struct {
outbound []string
outboundLock sync.RWMutex
inbound []string
inboundLock sync.RWMutex
}
func NewUnboundedQueue() *UnboundedQueue {
q := &UnboundedQueue{}
return q
}
func (q *UnboundedQueue) EnQueue(items ...string) {
q.inboundLock.Lock()
defer q.inboundLock.Unlock()
q.outbound = append(q.outbound, items...)
}
func (q *UnboundedQueue) Consume(fn func([]string)) {
q.outboundLock.Lock()
defer q.outboundLock.Unlock()
if len(q.outbound) == 0 {
q.inboundLock.Lock()
inbountLen := len(q.inbound)
if inbountLen > 0 {
t := q.outbound
q.outbound = q.inbound
q.inbound = t
}
q.inboundLock.Unlock()
}
if len(q.outbound) > 0 {
fn(q.outbound)
q.outbound = q.outbound[:0]
}
}

View File

@ -0,0 +1,25 @@
package util
import "testing"
func TestEnqueueAndConsume(t *testing.T) {
q := NewUnboundedQueue()
q.EnQueue("1", "2", "3")
f := func(items []string) {
for _, t := range items {
println(t)
}
println("-----------------------")
}
q.Consume(f)
q.Consume(f)
q.EnQueue("4", "5")
q.EnQueue("6", "7")
q.Consume(f)
}