1
0
mirror of https://github.com/seaweedfs/seaweedfs.git synced 2024-12-24 17:47:57 +08:00
seaweedfs/weed/mount/filehandle.go
Chris Lu 952afd810c
Fix dead lock ()
* reduce locks to avoid dead lock

Flush->FlushData->uplloadPipeline.FluahAll
uploaderCount>0

goroutine 1 [sync.Cond.Wait, 71 minutes]:
sync.runtime_notifyListWait(0xc0007ae4d0, 0x0)
	/usr/local/go/src/runtime/sema.go:569 +0x159
sync.(*Cond).Wait(0xc001a59290?)
	/usr/local/go/src/sync/cond.go:70 +0x85
github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*UploadPipeline).waitForCurrentWritersToComplete(0xc0002ee4d0)
	/github/workspace/weed/mount/page_writer/upload_pipeline_lock.go:58 +0x32
github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*UploadPipeline).FlushAll(0xc0002ee4d0)
	/github/workspace/weed/mount/page_writer/upload_pipeline.go:151 +0x25
github.com/seaweedfs/seaweedfs/weed/mount.(*ChunkedDirtyPages).FlushData(0xc00087e840)
	/github/workspace/weed/mount/dirty_pages_chunked.go:54 +0x29
github.com/seaweedfs/seaweedfs/weed/mount.(*PageWriter).FlushData(...)
	/github/workspace/weed/mount/page_writer.go:50
github.com/seaweedfs/seaweedfs/weed/mount.(*WFS).doFlush(0xc0006ad600, 0xc00030d380, 0x0, 0x0)
	/github/workspace/weed/mount/weedfs_file_sync.go:101 +0x169
github.com/seaweedfs/seaweedfs/weed/mount.(*WFS).Flush(0xc0006ad600, 0xc001a594a8?, 0xc0004c1ca0)
	/github/workspace/weed/mount/weedfs_file_sync.go:59 +0x48
github.com/hanwen/go-fuse/v2/fuse.doFlush(0xc0000da870?, 0xc0004c1b08)

SaveContent -> MemChunk.RLock ->
	ChunkedDirtyPages.saveChunkedFileIntervalToStorage
	pages.fh.AddChunks([]*filer_pb.FileChunk{chunk})
		fh.entryLock.Lock()

sync.(*RWMutex).Lock(0x0?)
	/usr/local/go/src/sync/rwmutex.go:146 +0x31
github.com/seaweedfs/seaweedfs/weed/mount.(*FileHandle).AddChunks(0xc00030d380, {0xc00028bdc8, 0x1, 0x1})
	/github/workspace/weed/mount/filehandle.go:93 +0x45
github.com/seaweedfs/seaweedfs/weed/mount.(*ChunkedDirtyPages).saveChunkedFileIntervalToStorage(0xc00087e840, {0x2be7ac0, 0xc00018d9e0}, 0x0, 0x121, 0x17e3c624565ace45, 0x1?)
	/github/workspace/weed/mount/dirty_pages_chunked.go:80 +0x2d4
github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*MemChunk).SaveContent(0xc0008d9130, 0xc0008093e0)
	/github/workspace/weed/mount/page_writer/page_chunk_mem.go:115 +0x112
github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*UploadPipeline).moveToSealed.func1()
	/github/workspace/weed/mount/page_writer/upload_pipeline.go:187 +0x55
github.com/seaweedfs/seaweedfs/weed/util.(*LimitedConcurrentExecutor).Execute.func1()
	/github/workspace/weed/util/limited_executor.go:38 +0x62
created by github.com/seaweedfs/seaweedfs/weed/util.(*LimitedConcurrentExecutor).Execute in goroutine 1
	/github/workspace/weed/util/limited_executor.go:33 +0x97

On metadata update
fh.entryLock.Lock()
	fh.dirtyPages.Destroy()
		up.chunksLock.Lock => each sealed chunk.FreeReference => MemChunk.Lock

goroutine 134 [sync.RWMutex.Lock, 71 minutes]:
sync.runtime_SemacquireRWMutex(0xc0007c3558?, 0xea?, 0x3fb0800?)
	/usr/local/go/src/runtime/sema.go:87 +0x25
sync.(*RWMutex).Lock(0xc0007c35a8?)
	/usr/local/go/src/sync/rwmutex.go:151 +0x6a
github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*MemChunk).FreeResource(0xc0008d9130)
	/github/workspace/weed/mount/page_writer/page_chunk_mem.go:38 +0x2a
github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*SealedChunk).FreeReference(0xc00071cdb0, {0xc0006ba1a0, 0x20})
	/github/workspace/weed/mount/page_writer/upload_pipeline.go:38 +0xb7
github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*UploadPipeline).Shutdown(0xc0002ee4d0)
	/github/workspace/weed/mount/page_writer/upload_pipeline.go:220 +0x185
github.com/seaweedfs/seaweedfs/weed/mount.(*ChunkedDirtyPages).Destroy(0xc0008cea40?)
	/github/workspace/weed/mount/dirty_pages_chunked.go:87 +0x17
github.com/seaweedfs/seaweedfs/weed/mount.(*PageWriter).Destroy(...)
	/github/workspace/weed/mount/page_writer.go:78
github.com/seaweedfs/seaweedfs/weed/mount.NewSeaweedFileSystem.func3({0xc00069a6c0, 0x30}, 0x6?)
	/github/workspace/weed/mount/weedfs.go:119 +0x17a
github.com/seaweedfs/seaweedfs/weed/mount/meta_cache.NewMetaCache.func1({0xc00069a6c0?, 0xc00069a480?}, 0x4015b40?)
	/github/workspace/weed/mount/meta_cache/meta_cache.go:37 +0x1c
github.com/seaweedfs/seaweedfs/weed/mount/meta_cache.SubscribeMetaEvents.func1(0xc000661810)
	/github/workspace/weed/mount/meta_cache/meta_cache_subscribe.go:43 +0x570

* use locked entry everywhere

* modifiable remote entry

* skip locking after getting lock from fhLockTable
2024-07-24 23:46:40 -07:00

113 lines
2.6 KiB
Go

package mount
import (
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"os"
"sync"
)
type FileHandleId uint64
var IsDebugFileReadWrite = false
type FileHandle struct {
fh FileHandleId
counter int64
entry *LockedEntry
entryLock sync.RWMutex
entryChunkGroup *filer.ChunkGroup
inode uint64
wfs *WFS
// cache file has been written to
dirtyMetadata bool
dirtyPages *PageWriter
reader *filer.ChunkReadAt
contentType string
isDeleted bool
// for debugging
mirrorFile *os.File
}
func newFileHandle(wfs *WFS, handleId FileHandleId, inode uint64, entry *filer_pb.Entry) *FileHandle {
fh := &FileHandle{
fh: handleId,
counter: 1,
inode: inode,
wfs: wfs,
}
// dirtyPages: newContinuousDirtyPages(file, writeOnly),
fh.dirtyPages = newPageWriter(fh, wfs.option.ChunkSizeLimit)
fh.entry = &LockedEntry{
Entry: entry,
}
if entry != nil {
fh.SetEntry(entry)
}
if IsDebugFileReadWrite {
var err error
fh.mirrorFile, err = os.OpenFile("/tmp/sw/"+entry.Name, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
println("failed to create mirror:", err.Error())
}
}
return fh
}
func (fh *FileHandle) FullPath() util.FullPath {
fp, _ := fh.wfs.inodeToPath.GetPath(fh.inode)
return fp
}
func (fh *FileHandle) GetEntry() *LockedEntry {
return fh.entry
}
func (fh *FileHandle) SetEntry(entry *filer_pb.Entry) {
if entry != nil {
fileSize := filer.FileSize(entry)
entry.Attributes.FileSize = fileSize
var resolveManifestErr error
fh.entryChunkGroup, resolveManifestErr = filer.NewChunkGroup(fh.wfs.LookupFn(), fh.wfs.chunkCache, entry.Chunks)
if resolveManifestErr != nil {
glog.Warningf("failed to resolve manifest chunks in %+v", entry)
}
} else {
glog.Fatalf("setting file handle entry to nil")
}
fh.entry.SetEntry(entry)
}
func (fh *FileHandle) UpdateEntry(fn func(entry *filer_pb.Entry)) *filer_pb.Entry {
return fh.entry.UpdateEntry(fn)
}
func (fh *FileHandle) AddChunks(chunks []*filer_pb.FileChunk) {
fh.entry.AppendChunks(chunks)
}
func (fh *FileHandle) ReleaseHandle() {
fhActiveLock := fh.wfs.fhLockTable.AcquireLock("ReleaseHandle", fh.fh, util.ExclusiveLock)
defer fh.wfs.fhLockTable.ReleaseLock(fh.fh, fhActiveLock)
fh.dirtyPages.Destroy()
if IsDebugFileReadWrite {
fh.mirrorFile.Close()
}
}
func lessThan(a, b *filer_pb.FileChunk) bool {
if a.ModifiedTsNs == b.ModifiedTsNs {
return a.Fid.FileKey < b.Fid.FileKey
}
return a.ModifiedTsNs < b.ModifiedTsNs
}