seaweedfs/weed/mount/filehandle_map.go

92 lines
1.9 KiB
Go
Raw Normal View History

2022-02-14 11:14:34 +08:00
package mount
import (
2024-08-08 01:41:58 +08:00
"github.com/seaweedfs/seaweedfs/weed/util"
2022-02-14 11:14:34 +08:00
"sync"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
2022-02-14 11:14:34 +08:00
)
type FileHandleToInode struct {
sync.RWMutex
nextFh FileHandleId
inode2fh map[uint64]*FileHandle
fh2inode map[FileHandleId]uint64
}
func NewFileHandleToInode() *FileHandleToInode {
return &FileHandleToInode{
inode2fh: make(map[uint64]*FileHandle),
fh2inode: make(map[FileHandleId]uint64),
2024-08-08 01:41:58 +08:00
nextFh: FileHandleId(util.RandomUint64()),
2022-02-14 11:14:34 +08:00
}
}
2022-02-14 14:50:44 +08:00
func (i *FileHandleToInode) GetFileHandle(fh FileHandleId) *FileHandle {
i.RLock()
defer i.RUnlock()
inode, found := i.fh2inode[fh]
if found {
return i.inode2fh[inode]
}
return nil
}
2022-02-14 17:36:10 +08:00
func (i *FileHandleToInode) FindFileHandle(inode uint64) (fh *FileHandle, found bool) {
i.RLock()
defer i.RUnlock()
fh, found = i.inode2fh[inode]
return
}
2022-02-14 14:50:44 +08:00
func (i *FileHandleToInode) AcquireFileHandle(wfs *WFS, inode uint64, entry *filer_pb.Entry) *FileHandle {
2022-02-14 11:14:34 +08:00
i.Lock()
defer i.Unlock()
fh, found := i.inode2fh[inode]
if !found {
2022-02-14 14:50:44 +08:00
fh = newFileHandle(wfs, i.nextFh, inode, entry)
2024-08-08 01:41:58 +08:00
i.nextFh = FileHandleId(util.RandomUint64())
2022-02-14 11:14:34 +08:00
i.inode2fh[inode] = fh
i.fh2inode[fh.fh] = inode
} else {
fh.counter++
}
Fix dead lock (#5815) * reduce locks to avoid dead lock Flush->FlushData->uplloadPipeline.FluahAll uploaderCount>0 goroutine 1 [sync.Cond.Wait, 71 minutes]: sync.runtime_notifyListWait(0xc0007ae4d0, 0x0) /usr/local/go/src/runtime/sema.go:569 +0x159 sync.(*Cond).Wait(0xc001a59290?) /usr/local/go/src/sync/cond.go:70 +0x85 github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*UploadPipeline).waitForCurrentWritersToComplete(0xc0002ee4d0) /github/workspace/weed/mount/page_writer/upload_pipeline_lock.go:58 +0x32 github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*UploadPipeline).FlushAll(0xc0002ee4d0) /github/workspace/weed/mount/page_writer/upload_pipeline.go:151 +0x25 github.com/seaweedfs/seaweedfs/weed/mount.(*ChunkedDirtyPages).FlushData(0xc00087e840) /github/workspace/weed/mount/dirty_pages_chunked.go:54 +0x29 github.com/seaweedfs/seaweedfs/weed/mount.(*PageWriter).FlushData(...) /github/workspace/weed/mount/page_writer.go:50 github.com/seaweedfs/seaweedfs/weed/mount.(*WFS).doFlush(0xc0006ad600, 0xc00030d380, 0x0, 0x0) /github/workspace/weed/mount/weedfs_file_sync.go:101 +0x169 github.com/seaweedfs/seaweedfs/weed/mount.(*WFS).Flush(0xc0006ad600, 0xc001a594a8?, 0xc0004c1ca0) /github/workspace/weed/mount/weedfs_file_sync.go:59 +0x48 github.com/hanwen/go-fuse/v2/fuse.doFlush(0xc0000da870?, 0xc0004c1b08) SaveContent -> MemChunk.RLock -> ChunkedDirtyPages.saveChunkedFileIntervalToStorage pages.fh.AddChunks([]*filer_pb.FileChunk{chunk}) fh.entryLock.Lock() sync.(*RWMutex).Lock(0x0?) /usr/local/go/src/sync/rwmutex.go:146 +0x31 github.com/seaweedfs/seaweedfs/weed/mount.(*FileHandle).AddChunks(0xc00030d380, {0xc00028bdc8, 0x1, 0x1}) /github/workspace/weed/mount/filehandle.go:93 +0x45 github.com/seaweedfs/seaweedfs/weed/mount.(*ChunkedDirtyPages).saveChunkedFileIntervalToStorage(0xc00087e840, {0x2be7ac0, 0xc00018d9e0}, 0x0, 0x121, 0x17e3c624565ace45, 0x1?) /github/workspace/weed/mount/dirty_pages_chunked.go:80 +0x2d4 github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*MemChunk).SaveContent(0xc0008d9130, 0xc0008093e0) /github/workspace/weed/mount/page_writer/page_chunk_mem.go:115 +0x112 github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*UploadPipeline).moveToSealed.func1() /github/workspace/weed/mount/page_writer/upload_pipeline.go:187 +0x55 github.com/seaweedfs/seaweedfs/weed/util.(*LimitedConcurrentExecutor).Execute.func1() /github/workspace/weed/util/limited_executor.go:38 +0x62 created by github.com/seaweedfs/seaweedfs/weed/util.(*LimitedConcurrentExecutor).Execute in goroutine 1 /github/workspace/weed/util/limited_executor.go:33 +0x97 On metadata update fh.entryLock.Lock() fh.dirtyPages.Destroy() up.chunksLock.Lock => each sealed chunk.FreeReference => MemChunk.Lock goroutine 134 [sync.RWMutex.Lock, 71 minutes]: sync.runtime_SemacquireRWMutex(0xc0007c3558?, 0xea?, 0x3fb0800?) /usr/local/go/src/runtime/sema.go:87 +0x25 sync.(*RWMutex).Lock(0xc0007c35a8?) /usr/local/go/src/sync/rwmutex.go:151 +0x6a github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*MemChunk).FreeResource(0xc0008d9130) /github/workspace/weed/mount/page_writer/page_chunk_mem.go:38 +0x2a github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*SealedChunk).FreeReference(0xc00071cdb0, {0xc0006ba1a0, 0x20}) /github/workspace/weed/mount/page_writer/upload_pipeline.go:38 +0xb7 github.com/seaweedfs/seaweedfs/weed/mount/page_writer.(*UploadPipeline).Shutdown(0xc0002ee4d0) /github/workspace/weed/mount/page_writer/upload_pipeline.go:220 +0x185 github.com/seaweedfs/seaweedfs/weed/mount.(*ChunkedDirtyPages).Destroy(0xc0008cea40?) /github/workspace/weed/mount/dirty_pages_chunked.go:87 +0x17 github.com/seaweedfs/seaweedfs/weed/mount.(*PageWriter).Destroy(...) /github/workspace/weed/mount/page_writer.go:78 github.com/seaweedfs/seaweedfs/weed/mount.NewSeaweedFileSystem.func3({0xc00069a6c0, 0x30}, 0x6?) /github/workspace/weed/mount/weedfs.go:119 +0x17a github.com/seaweedfs/seaweedfs/weed/mount/meta_cache.NewMetaCache.func1({0xc00069a6c0?, 0xc00069a480?}, 0x4015b40?) /github/workspace/weed/mount/meta_cache/meta_cache.go:37 +0x1c github.com/seaweedfs/seaweedfs/weed/mount/meta_cache.SubscribeMetaEvents.func1(0xc000661810) /github/workspace/weed/mount/meta_cache/meta_cache_subscribe.go:43 +0x570 * use locked entry everywhere * modifiable remote entry * skip locking after getting lock from fhLockTable
2024-07-25 14:46:40 +08:00
if fh.GetEntry().GetEntry() != entry {
fh.SetEntry(entry)
}
2022-02-14 11:14:34 +08:00
return fh
}
func (i *FileHandleToInode) ReleaseByInode(inode uint64) {
i.Lock()
defer i.Unlock()
fh, found := i.inode2fh[inode]
if found {
fh.counter--
if fh.counter <= 0 {
delete(i.inode2fh, inode)
delete(i.fh2inode, fh.fh)
more solid weed mount (#4089) * compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation
2023-01-03 15:20:45 +08:00
fh.ReleaseHandle()
2022-02-14 11:14:34 +08:00
}
}
}
func (i *FileHandleToInode) ReleaseByHandle(fh FileHandleId) {
i.Lock()
defer i.Unlock()
inode, found := i.fh2inode[fh]
if found {
fhHandle, fhFound := i.inode2fh[inode]
if !fhFound {
delete(i.fh2inode, fh)
} else {
fhHandle.counter--
if fhHandle.counter <= 0 {
delete(i.inode2fh, inode)
delete(i.fh2inode, fhHandle.fh)
more solid weed mount (#4089) * compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation
2023-01-03 15:20:45 +08:00
fhHandle.ReleaseHandle()
2022-02-14 11:14:34 +08:00
}
}
}
}