seaweedfs/weed/filer2/filechunks_test.go

385 lines
12 KiB
Go
Raw Normal View History

2018-05-21 08:06:09 +08:00
package filer2
import (
"log"
2018-05-28 02:52:26 +08:00
"testing"
2018-05-21 08:08:54 +08:00
2018-11-19 09:05:36 +08:00
"fmt"
2018-11-23 16:26:15 +08:00
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
2018-05-21 08:06:09 +08:00
)
func TestCompactFileChunks(t *testing.T) {
chunks := []*filer_pb.FileChunk{
{Offset: 10, Size: 100, FileId: "abc", Mtime: 50},
{Offset: 100, Size: 100, FileId: "def", Mtime: 100},
{Offset: 200, Size: 100, FileId: "ghi", Mtime: 200},
{Offset: 110, Size: 200, FileId: "jkl", Mtime: 300},
}
2018-11-19 09:05:36 +08:00
compacted, garbage := CompactFileChunks(chunks)
if len(compacted) != 3 {
t.Fatalf("unexpected compacted: %d", len(compacted))
}
2018-11-19 09:05:36 +08:00
if len(garbage) != 1 {
t.Fatalf("unexpected garbage: %d", len(garbage))
}
}
func TestCompactFileChunks2(t *testing.T) {
chunks := []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 50},
{Offset: 100, Size: 100, FileId: "def", Mtime: 100},
{Offset: 200, Size: 100, FileId: "ghi", Mtime: 200},
{Offset: 0, Size: 100, FileId: "abcf", Mtime: 300},
{Offset: 50, Size: 100, FileId: "fhfh", Mtime: 400},
{Offset: 100, Size: 100, FileId: "yuyu", Mtime: 500},
}
k := 3
for n := 0; n < k; n++ {
chunks = append(chunks, &filer_pb.FileChunk{
2018-11-23 16:26:15 +08:00
Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), Mtime: int64(n),
2018-11-19 09:05:36 +08:00
})
chunks = append(chunks, &filer_pb.FileChunk{
2018-11-23 16:26:15 +08:00
Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), Mtime: int64(n + k),
2018-11-19 09:05:36 +08:00
})
}
2018-11-19 09:05:36 +08:00
compacted, garbage := CompactFileChunks(chunks)
2018-11-22 08:35:27 +08:00
if len(compacted) != 4 {
2018-11-19 09:05:36 +08:00
t.Fatalf("unexpected compacted: %d", len(compacted))
}
2018-11-22 08:35:27 +08:00
if len(garbage) != 8 {
2018-11-19 09:05:36 +08:00
t.Fatalf("unexpected garbage: %d", len(garbage))
}
}
2018-05-21 08:06:09 +08:00
func TestIntervalMerging(t *testing.T) {
testcases := []struct {
Chunks []*filer_pb.FileChunk
Expected []*VisibleInterval
2018-05-21 08:06:09 +08:00
}{
// case 0: normal
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 100, Size: 100, FileId: "asdf", Mtime: 134},
{Offset: 200, Size: 100, FileId: "fsad", Mtime: 353},
},
Expected: []*VisibleInterval{
2018-05-21 08:06:09 +08:00
{start: 0, stop: 100, fileId: "abc"},
{start: 100, stop: 200, fileId: "asdf"},
{start: 200, stop: 300, fileId: "fsad"},
},
},
// case 1: updates overwrite full chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
},
Expected: []*VisibleInterval{
2018-05-21 08:06:09 +08:00
{start: 0, stop: 200, fileId: "asdf"},
},
},
// case 2: updates overwrite part of previous chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 50, FileId: "asdf", Mtime: 134},
},
Expected: []*VisibleInterval{
2018-05-21 08:06:09 +08:00
{start: 0, stop: 50, fileId: "asdf"},
{start: 50, stop: 100, fileId: "abc"},
},
},
// case 3: updates overwrite full chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
{Offset: 50, Size: 250, FileId: "xxxx", Mtime: 154},
},
Expected: []*VisibleInterval{
2018-05-21 08:06:09 +08:00
{start: 0, stop: 50, fileId: "asdf"},
{start: 50, stop: 300, fileId: "xxxx"},
},
},
// case 4: updates far away from prev chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
{Offset: 250, Size: 250, FileId: "xxxx", Mtime: 154},
},
Expected: []*VisibleInterval{
2018-05-21 08:06:09 +08:00
{start: 0, stop: 200, fileId: "asdf"},
{start: 250, stop: 500, fileId: "xxxx"},
},
},
// case 5: updates overwrite full chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 184},
{Offset: 70, Size: 150, FileId: "abc", Mtime: 143},
{Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134},
},
Expected: []*VisibleInterval{
2018-05-21 08:06:09 +08:00
{start: 0, stop: 200, fileId: "asdf"},
{start: 200, stop: 220, fileId: "abc"},
},
},
// case 6: same updates
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
},
Expected: []*VisibleInterval{
{start: 0, stop: 100, fileId: "abc"},
},
},
2018-05-29 05:32:16 +08:00
// case 7: real updates
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 2097152, FileId: "7,0294cbb9892b", Mtime: 123},
{Offset: 0, Size: 3145728, FileId: "3,029565bf3092", Mtime: 130},
{Offset: 2097152, Size: 3145728, FileId: "6,029632f47ae2", Mtime: 140},
{Offset: 5242880, Size: 3145728, FileId: "2,029734c5aa10", Mtime: 150},
{Offset: 8388608, Size: 3145728, FileId: "5,02982f80de50", Mtime: 160},
{Offset: 11534336, Size: 2842193, FileId: "7,0299ad723803", Mtime: 170},
},
Expected: []*VisibleInterval{
2018-05-29 05:32:16 +08:00
{start: 0, stop: 2097152, fileId: "3,029565bf3092"},
{start: 2097152, stop: 5242880, fileId: "6,029632f47ae2"},
{start: 5242880, stop: 8388608, fileId: "2,029734c5aa10"},
{start: 8388608, stop: 11534336, fileId: "5,02982f80de50"},
{start: 11534336, stop: 14376529, fileId: "7,0299ad723803"},
},
},
// case 8: real bug
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 77824, FileId: "4,0b3df938e301", Mtime: 123},
2018-11-23 16:26:15 +08:00
{Offset: 471040, Size: 472225 - 471040, FileId: "6,0b3e0650019c", Mtime: 130},
{Offset: 77824, Size: 208896 - 77824, FileId: "4,0b3f0c7202f0", Mtime: 140},
{Offset: 208896, Size: 339968 - 208896, FileId: "2,0b4031a72689", Mtime: 150},
{Offset: 339968, Size: 471040 - 339968, FileId: "3,0b416a557362", Mtime: 160},
},
Expected: []*VisibleInterval{
{start: 0, stop: 77824, fileId: "4,0b3df938e301"},
{start: 77824, stop: 208896, fileId: "4,0b3f0c7202f0"},
{start: 208896, stop: 339968, fileId: "2,0b4031a72689"},
{start: 339968, stop: 471040, fileId: "3,0b416a557362"},
{start: 471040, stop: 472225, fileId: "6,0b3e0650019c"},
},
},
2018-05-21 08:06:09 +08:00
}
for i, testcase := range testcases {
log.Printf("++++++++++ merged test case %d ++++++++++++++++++++", i)
intervals := NonOverlappingVisibleIntervals(testcase.Chunks)
2018-05-21 08:06:09 +08:00
for x, interval := range intervals {
log.Printf("test case %d, interval %d, start=%d, stop=%d, fileId=%s",
i, x, interval.start, interval.stop, interval.fileId)
2018-05-29 05:32:16 +08:00
}
for x, interval := range intervals {
2018-05-21 08:06:09 +08:00
if interval.start != testcase.Expected[x].start {
t.Fatalf("failed on test case %d, interval %d, start %d, expect %d",
i, x, interval.start, testcase.Expected[x].start)
}
if interval.stop != testcase.Expected[x].stop {
t.Fatalf("failed on test case %d, interval %d, stop %d, expect %d",
i, x, interval.stop, testcase.Expected[x].stop)
}
if interval.fileId != testcase.Expected[x].fileId {
t.Fatalf("failed on test case %d, interval %d, chunkId %s, expect %s",
i, x, interval.fileId, testcase.Expected[x].fileId)
}
}
if len(intervals) != len(testcase.Expected) {
t.Fatalf("failed to compact test case %d, len %d expected %d", i, len(intervals), len(testcase.Expected))
}
2018-11-19 03:51:38 +08:00
2018-05-21 08:06:09 +08:00
}
}
func TestChunksReading(t *testing.T) {
testcases := []struct {
Chunks []*filer_pb.FileChunk
Offset int64
Size int
Expected []*ChunkView
}{
// case 0: normal
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 100, Size: 100, FileId: "asdf", Mtime: 134},
{Offset: 200, Size: 100, FileId: "fsad", Mtime: 353},
},
Offset: 0,
Size: 250,
Expected: []*ChunkView{
2018-05-28 02:52:26 +08:00
{Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0},
{Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100},
{Offset: 0, Size: 50, FileId: "fsad", LogicOffset: 200},
},
},
// case 1: updates overwrite full chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
},
Offset: 50,
Size: 100,
Expected: []*ChunkView{
2018-05-28 02:52:26 +08:00
{Offset: 50, Size: 100, FileId: "asdf", LogicOffset: 50},
},
},
// case 2: updates overwrite part of previous chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 50, FileId: "asdf", Mtime: 134},
},
Offset: 25,
Size: 50,
Expected: []*ChunkView{
2018-05-28 02:52:26 +08:00
{Offset: 25, Size: 25, FileId: "asdf", LogicOffset: 25},
{Offset: 0, Size: 25, FileId: "abc", LogicOffset: 50},
},
},
// case 3: updates overwrite full chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
{Offset: 50, Size: 250, FileId: "xxxx", Mtime: 154},
},
Offset: 0,
Size: 200,
Expected: []*ChunkView{
2018-05-28 02:52:26 +08:00
{Offset: 0, Size: 50, FileId: "asdf", LogicOffset: 0},
{Offset: 0, Size: 150, FileId: "xxxx", LogicOffset: 50},
},
},
// case 4: updates far away from prev chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
{Offset: 250, Size: 250, FileId: "xxxx", Mtime: 154},
},
Offset: 0,
Size: 400,
Expected: []*ChunkView{
2018-05-28 02:52:26 +08:00
{Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0},
// {Offset: 0, Size: 150, FileId: "xxxx"}, // missing intervals should not happen
},
},
// case 5: updates overwrite full chunks
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 184},
{Offset: 70, Size: 150, FileId: "abc", Mtime: 143},
{Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134},
},
Offset: 0,
Size: 220,
Expected: []*ChunkView{
2018-05-28 02:52:26 +08:00
{Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0},
{Offset: 0, Size: 20, FileId: "abc", LogicOffset: 200},
},
},
// case 6: same updates
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
},
Offset: 0,
Size: 100,
Expected: []*ChunkView{
2018-05-28 02:52:26 +08:00
{Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0},
},
},
2018-05-25 14:19:56 +08:00
// case 7: edge cases
{
Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 100, Size: 100, FileId: "asdf", Mtime: 134},
{Offset: 200, Size: 100, FileId: "fsad", Mtime: 353},
},
Offset: 0,
Size: 200,
Expected: []*ChunkView{
2018-05-28 02:52:26 +08:00
{Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0},
{Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100},
2018-05-25 14:19:56 +08:00
},
},
}
for i, testcase := range testcases {
log.Printf("++++++++++ read test case %d ++++++++++++++++++++", i)
chunks := ViewFromChunks(testcase.Chunks, testcase.Offset, testcase.Size)
for x, chunk := range chunks {
log.Printf("read case %d, chunk %d, offset=%d, size=%d, fileId=%s",
i, x, chunk.Offset, chunk.Size, chunk.FileId)
if chunk.Offset != testcase.Expected[x].Offset {
t.Fatalf("failed on read case %d, chunk %d, Offset %d, expect %d",
i, x, chunk.Offset, testcase.Expected[x].Offset)
}
if chunk.Size != testcase.Expected[x].Size {
t.Fatalf("failed on read case %d, chunk %d, Size %d, expect %d",
i, x, chunk.Size, testcase.Expected[x].Size)
}
if chunk.FileId != testcase.Expected[x].FileId {
t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s",
i, x, chunk.FileId, testcase.Expected[x].FileId)
}
if chunk.LogicOffset != testcase.Expected[x].LogicOffset {
t.Fatalf("failed on read case %d, chunk %d, LogicOffset %d, expect %d",
i, x, chunk.LogicOffset, testcase.Expected[x].LogicOffset)
}
}
if len(chunks) != len(testcase.Expected) {
t.Fatalf("failed to read test case %d, len %d expected %d", i, len(chunks), len(testcase.Expected))
}
}
}
2018-11-19 09:05:36 +08:00
func BenchmarkCompactFileChunks(b *testing.B) {
2018-11-19 09:05:36 +08:00
var chunks []*filer_pb.FileChunk
k := 1024
for n := 0; n < k; n++ {
chunks = append(chunks, &filer_pb.FileChunk{
2018-11-23 16:26:15 +08:00
Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), Mtime: int64(n),
})
chunks = append(chunks, &filer_pb.FileChunk{
2018-11-23 16:26:15 +08:00
Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), Mtime: int64(n + k),
})
}
for n := 0; n < b.N; n++ {
2018-11-19 09:05:36 +08:00
CompactFileChunks(chunks)
}
}