fix EC reading on nLargeBlockRows

fix https://github.com/seaweedfs/seaweedfs/issues/5465
This commit is contained in:
chrislu 2024-08-13 12:01:46 -07:00
parent 915f9f5054
commit 3a2e21fee7
3 changed files with 21 additions and 12 deletions

View File

@ -26,8 +26,8 @@ debug_server:
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- server -dir=~/tmp/99 -filer -volume.port=8343 -s3 -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1
debug_volume:
go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- volume -dir=~/tmp/100 -port 8564 -max=30 -preStopSeconds=2
go build -tags=5BytesOffset -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- volume -dir=/Users/chrislu/tmp/x/volume_data -port 8564 -max=1 -preStopSeconds=2
debug_webdav:
go build -gcflags="all=-N -l"

View File

@ -5,25 +5,22 @@ import (
)
type Interval struct {
BlockIndex int
BlockIndex int // the index of the block in either the large blocks or the small blocks
InnerBlockOffset int64
Size types.Size
IsLargeBlock bool
IsLargeBlock bool // whether the block is a large block or a small block
LargeBlockRowsCount int
}
func LocateData(largeBlockLength, smallBlockLength int64, datSize int64, offset int64, size types.Size) (intervals []Interval) {
blockIndex, isLargeBlock, innerBlockOffset := locateOffset(largeBlockLength, smallBlockLength, datSize, offset)
// adding DataShardsCount*smallBlockLength to ensure we can derive the number of large block size from a shard size
nLargeBlockRows := int((datSize + DataShardsCount*smallBlockLength) / (largeBlockLength * DataShardsCount))
blockIndex, isLargeBlock, nLargeBlockRows, innerBlockOffset := locateOffset(largeBlockLength, smallBlockLength, datSize, offset)
for size > 0 {
interval := Interval{
BlockIndex: blockIndex,
InnerBlockOffset: innerBlockOffset,
IsLargeBlock: isLargeBlock,
LargeBlockRowsCount: nLargeBlockRows,
LargeBlockRowsCount: int(nLargeBlockRows),
}
blockRemaining := largeBlockLength - innerBlockOffset
@ -41,7 +38,7 @@ func LocateData(largeBlockLength, smallBlockLength int64, datSize int64, offset
size -= interval.Size
blockIndex += 1
if isLargeBlock && blockIndex == nLargeBlockRows*DataShardsCount {
if isLargeBlock && blockIndex == interval.LargeBlockRowsCount*DataShardsCount {
isLargeBlock = false
blockIndex = 0
}
@ -51,9 +48,9 @@ func LocateData(largeBlockLength, smallBlockLength int64, datSize int64, offset
return
}
func locateOffset(largeBlockLength, smallBlockLength int64, datSize int64, offset int64) (blockIndex int, isLargeBlock bool, innerBlockOffset int64) {
func locateOffset(largeBlockLength, smallBlockLength int64, datSize int64, offset int64) (blockIndex int, isLargeBlock bool, nLargeBlockRows int64, innerBlockOffset int64) {
largeRowSize := largeBlockLength * DataShardsCount
nLargeBlockRows := datSize / (largeBlockLength * DataShardsCount)
nLargeBlockRows = datSize / (largeBlockLength * DataShardsCount)
// if offset is within the large block area
if offset < nLargeBlockRows*largeRowSize {

View File

@ -3,6 +3,7 @@ package erasure_coding
import (
"bytes"
"fmt"
"github.com/stretchr/testify/assert"
"math/rand"
"os"
"testing"
@ -208,3 +209,14 @@ func (this Interval) sameAs(that Interval) bool {
this.BlockIndex == that.BlockIndex &&
this.Size == that.Size
}
func TestLocateData2(t *testing.T) {
intervals := LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, 32205678320, 21479557912, 4194339)
assert.Equal(t, intervals, []Interval{
{BlockIndex: 4, InnerBlockOffset: 527128, Size: 521448, IsLargeBlock: false, LargeBlockRowsCount: 2},
{BlockIndex: 5, InnerBlockOffset: 0, Size: 1048576, IsLargeBlock: false, LargeBlockRowsCount: 2},
{BlockIndex: 6, InnerBlockOffset: 0, Size: 1048576, IsLargeBlock: false, LargeBlockRowsCount: 2},
{BlockIndex: 7, InnerBlockOffset: 0, Size: 1048576, IsLargeBlock: false, LargeBlockRowsCount: 2},
{BlockIndex: 8, InnerBlockOffset: 0, Size: 527163, IsLargeBlock: false, LargeBlockRowsCount: 2},
})
}