mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-12-24 01:17:56 +08:00
d4566d4aaa
* compare chunks by timestamp
* fix slab clearing error
* fix test compilation
* move oldest chunk to sealed, instead of by fullness
* lock on fh.entryViewCache
* remove verbose logs
* revert slat clearing
* less logs
* less logs
* track write and read by timestamp
* remove useless logic
* add entry lock on file handle release
* use mem chunk only, swap file chunk has problems
* comment out code that maybe used later
* add debug mode to compare data read and write
* more efficient readResolvedChunks with linked list
* small optimization
* fix test compilation
* minor fix on writer
* add SeparateGarbageChunks
* group chunks into sections
* turn off debug mode
* fix tests
* fix tests
* tmp enable swap file chunk
* Revert "tmp enable swap file chunk"
This reverts commit 985137ec47
.
* simple refactoring
* simple refactoring
* do not re-use swap file chunk. Sealed chunks should not be re-used.
* comment out debugging facilities
* either mem chunk or swap file chunk is fine now
* remove orderedMutex as *semaphore.Weighted
not found impactful
* optimize size calculation for changing large files
* optimize performance to avoid going through the long list of chunks
* still problems with swap file chunk
* rename
* tiny optimization
* swap file chunk save only successfully read data
* fix
* enable both mem and swap file chunk
* resolve chunks with range
* rename
* fix chunk interval list
* also change file handle chunk group when adding chunks
* pick in-active chunk with time-decayed counter
* fix compilation
* avoid nil with empty fh.entry
* refactoring
* rename
* rename
* refactor visible intervals to *list.List
* refactor chunkViews to *list.List
* add IntervalList for generic interval list
* change visible interval to use IntervalList in generics
* cahnge chunkViews to *IntervalList[*ChunkView]
* use NewFileChunkSection to create
* rename variables
* refactor
* fix renaming leftover
* renaming
* renaming
* add insert interval
* interval list adds lock
* incrementally add chunks to readers
Fixes:
1. set start and stop offset for the value object
2. clone the value object
3. use pointer instead of copy-by-value when passing to interval.Value
4. use insert interval since adding chunk could be out of order
* fix tests compilation
* fix tests compilation
90 lines
2.5 KiB
Go
90 lines
2.5 KiB
Go
package mount
|
|
|
|
import (
|
|
"bytes"
|
|
"fmt"
|
|
"io"
|
|
|
|
"github.com/hanwen/go-fuse/v2/fuse"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
)
|
|
|
|
/**
|
|
* Read data
|
|
*
|
|
* Read should send exactly the number of bytes requested except
|
|
* on EOF or error, otherwise the rest of the data will be
|
|
* substituted with zeroes. An exception to this is when the file
|
|
* has been opened in 'direct_io' mode, in which case the return
|
|
* value of the read system call will reflect the return value of
|
|
* this operation.
|
|
*
|
|
* fi->fh will contain the value set by the open method, or will
|
|
* be undefined if the open method didn't set any value.
|
|
*
|
|
* Valid replies:
|
|
* fuse_reply_buf
|
|
* fuse_reply_iov
|
|
* fuse_reply_data
|
|
* fuse_reply_err
|
|
*
|
|
* @param req request handle
|
|
* @param ino the inode number
|
|
* @param size number of bytes to read
|
|
* @param off offset to read from
|
|
* @param fi file information
|
|
*/
|
|
func (wfs *WFS) Read(cancel <-chan struct{}, in *fuse.ReadIn, buff []byte) (fuse.ReadResult, fuse.Status) {
|
|
fh := wfs.GetHandle(FileHandleId(in.Fh))
|
|
if fh == nil {
|
|
return nil, fuse.ENOENT
|
|
}
|
|
|
|
fh.Lock()
|
|
defer fh.Unlock()
|
|
|
|
offset := int64(in.Offset)
|
|
totalRead, err := readDataByFileHandle(buff, fh, offset)
|
|
if err != nil {
|
|
glog.Warningf("file handle read %s %d: %v", fh.FullPath(), totalRead, err)
|
|
return nil, fuse.EIO
|
|
}
|
|
|
|
if IsDebugFileReadWrite {
|
|
// print(".")
|
|
mirrorData := make([]byte, totalRead)
|
|
fh.mirrorFile.ReadAt(mirrorData, offset)
|
|
if bytes.Compare(mirrorData, buff[:totalRead]) != 0 {
|
|
|
|
againBuff := make([]byte, len(buff))
|
|
againRead, _ := readDataByFileHandle(buff, fh, offset)
|
|
againCorrect := bytes.Compare(mirrorData, againBuff[:againRead]) == 0
|
|
againSame := bytes.Compare(buff[:totalRead], againBuff[:againRead]) == 0
|
|
|
|
fmt.Printf("\ncompare %v [%d,%d) size:%d againSame:%v againCorrect:%v\n", fh.mirrorFile.Name(), offset, offset+totalRead, totalRead, againSame, againCorrect)
|
|
//fmt.Printf("read mirrow data: %v\n", mirrorData)
|
|
//fmt.Printf("read actual data: %v\n", buff[:totalRead])
|
|
}
|
|
}
|
|
|
|
return fuse.ReadResultData(buff[:totalRead]), fuse.OK
|
|
}
|
|
|
|
func readDataByFileHandle(buff []byte, fhIn *FileHandle, offset int64) (int64, error) {
|
|
// read data from source file
|
|
size := len(buff)
|
|
fhIn.lockForRead(offset, size)
|
|
defer fhIn.unlockForRead(offset, size)
|
|
|
|
n, tsNs, err := fhIn.readFromChunks(buff, offset)
|
|
if err == nil || err == io.EOF {
|
|
maxStop := fhIn.readFromDirtyPages(buff, offset, tsNs)
|
|
n = max(maxStop-offset, n)
|
|
}
|
|
if err == io.EOF {
|
|
err = nil
|
|
}
|
|
return n, err
|
|
}
|