mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-12-17 03:57:50 +08:00
d4566d4aaa
* compare chunks by timestamp
* fix slab clearing error
* fix test compilation
* move oldest chunk to sealed, instead of by fullness
* lock on fh.entryViewCache
* remove verbose logs
* revert slat clearing
* less logs
* less logs
* track write and read by timestamp
* remove useless logic
* add entry lock on file handle release
* use mem chunk only, swap file chunk has problems
* comment out code that maybe used later
* add debug mode to compare data read and write
* more efficient readResolvedChunks with linked list
* small optimization
* fix test compilation
* minor fix on writer
* add SeparateGarbageChunks
* group chunks into sections
* turn off debug mode
* fix tests
* fix tests
* tmp enable swap file chunk
* Revert "tmp enable swap file chunk"
This reverts commit 985137ec47
.
* simple refactoring
* simple refactoring
* do not re-use swap file chunk. Sealed chunks should not be re-used.
* comment out debugging facilities
* either mem chunk or swap file chunk is fine now
* remove orderedMutex as *semaphore.Weighted
not found impactful
* optimize size calculation for changing large files
* optimize performance to avoid going through the long list of chunks
* still problems with swap file chunk
* rename
* tiny optimization
* swap file chunk save only successfully read data
* fix
* enable both mem and swap file chunk
* resolve chunks with range
* rename
* fix chunk interval list
* also change file handle chunk group when adding chunks
* pick in-active chunk with time-decayed counter
* fix compilation
* avoid nil with empty fh.entry
* refactoring
* rename
* rename
* refactor visible intervals to *list.List
* refactor chunkViews to *list.List
* add IntervalList for generic interval list
* change visible interval to use IntervalList in generics
* cahnge chunkViews to *IntervalList[*ChunkView]
* use NewFileChunkSection to create
* rename variables
* refactor
* fix renaming leftover
* renaming
* renaming
* add insert interval
* interval list adds lock
* incrementally add chunks to readers
Fixes:
1. set start and stop offset for the value object
2. clone the value object
3. use pointer instead of copy-by-value when passing to interval.Value
4. use insert interval since adding chunk could be out of order
* fix tests compilation
* fix tests compilation
44 lines
1.1 KiB
Go
44 lines
1.1 KiB
Go
package repl_util
|
|
|
|
import (
|
|
"github.com/seaweedfs/seaweedfs/weed/filer"
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
"github.com/seaweedfs/seaweedfs/weed/replication/source"
|
|
"github.com/seaweedfs/seaweedfs/weed/util"
|
|
)
|
|
|
|
func CopyFromChunkViews(chunkViews *filer.IntervalList[*filer.ChunkView], filerSource *source.FilerSource, writeFunc func(data []byte) error) error {
|
|
|
|
for x := chunkViews.Front(); x != nil; x = x.Next {
|
|
chunk := x.Value
|
|
|
|
fileUrls, err := filerSource.LookupFileId(chunk.FileId)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
var writeErr error
|
|
var shouldRetry bool
|
|
|
|
for _, fileUrl := range fileUrls {
|
|
shouldRetry, err = util.ReadUrlAsStream(fileUrl, chunk.CipherKey, chunk.IsGzipped, chunk.IsFullChunk(), chunk.OffsetInChunk, int(chunk.ViewSize), func(data []byte) {
|
|
writeErr = writeFunc(data)
|
|
})
|
|
if err != nil {
|
|
glog.V(1).Infof("read from %s: %v", fileUrl, err)
|
|
} else if writeErr != nil {
|
|
glog.V(1).Infof("copy from %s: %v", fileUrl, writeErr)
|
|
} else {
|
|
break
|
|
}
|
|
}
|
|
if shouldRetry && err != nil {
|
|
return err
|
|
}
|
|
if writeErr != nil {
|
|
return writeErr
|
|
}
|
|
}
|
|
return nil
|
|
}
|