seaweedfs/weed/topology/volume_growth_test.go

462 lines
11 KiB
Go
Raw Normal View History

package topology
2012-09-03 16:50:04 +08:00
import (
"encoding/json"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/util"
2012-09-03 16:50:04 +08:00
"testing"
"github.com/seaweedfs/seaweedfs/weed/sequence"
"github.com/seaweedfs/seaweedfs/weed/storage"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
2012-09-03 16:50:04 +08:00
)
var topologyLayout = `
{
"dc1":{
"rack1":{
"server111":{
2012-09-03 16:50:04 +08:00
"volumes":[
{"id":1, "size":12312},
{"id":2, "size":12312},
{"id":3, "size":12312}
],
"limit":3
},
"server112":{
2012-09-03 16:50:04 +08:00
"volumes":[
{"id":4, "size":12312},
{"id":5, "size":12312},
{"id":6, "size":12312}
],
"limit":10
}
},
"rack2":{
"server121":{
2012-09-03 16:50:04 +08:00
"volumes":[
{"id":4, "size":12312},
{"id":5, "size":12312},
{"id":6, "size":12312}
],
"limit":4
},
"server122":{
2012-09-03 16:50:04 +08:00
"volumes":[],
"limit":4
},
"server123":{
2012-09-03 16:50:04 +08:00
"volumes":[
{"id":2, "size":12312},
{"id":3, "size":12312},
{"id":4, "size":12312}
],
"limit":5
2012-09-03 16:50:04 +08:00
}
}
},
"dc2":{
},
"dc3":{
"rack2":{
"server321":{
2012-09-03 16:50:04 +08:00
"volumes":[
{"id":1, "size":12312},
{"id":3, "size":12312},
{"id":5, "size":12312}
],
"limit":4
}
}
}
}
`
func setup(topologyLayout string) *Topology {
2012-09-03 16:50:04 +08:00
var data interface{}
err := json.Unmarshal([]byte(topologyLayout), &data)
if err != nil {
fmt.Println("error:", err)
}
fmt.Println("data:", data)
//need to connect all nodes first before server adding volumes
topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5, false)
2012-09-03 16:50:04 +08:00
mTopology := data.(map[string]interface{})
for dcKey, dcValue := range mTopology {
dc := NewDataCenter(dcKey)
2012-09-03 16:50:04 +08:00
dcMap := dcValue.(map[string]interface{})
topo.LinkChildNode(dc)
for rackKey, rackValue := range dcMap {
dcRack := NewRack(rackKey)
2012-09-03 16:50:04 +08:00
rackMap := rackValue.(map[string]interface{})
dc.LinkChildNode(dcRack)
2012-09-03 16:50:04 +08:00
for serverKey, serverValue := range rackMap {
server := NewDataNode(serverKey)
2012-09-03 16:50:04 +08:00
serverMap := serverValue.(map[string]interface{})
if ip, ok := serverMap["ip"]; ok {
server.Ip = ip.(string)
}
dcRack.LinkChildNode(server)
2012-09-03 16:50:04 +08:00
for _, v := range serverMap["volumes"].([]interface{}) {
m := v.(map[string]interface{})
2013-02-27 14:54:22 +08:00
vi := storage.VolumeInfo{
2019-04-19 12:43:36 +08:00
Id: needle.VolumeId(int64(m["id"].(float64))),
2013-02-27 14:54:22 +08:00
Size: uint64(m["size"].(float64)),
Version: needle.CurrentVersion,
}
if mVal, ok := m["collection"]; ok {
vi.Collection = mVal.(string)
}
if mVal, ok := m["replication"]; ok {
rp, _ := super_block.NewReplicaPlacementFromString(mVal.(string))
vi.ReplicaPlacement = rp
}
if vi.ReplicaPlacement != nil {
vl := topo.GetVolumeLayout(vi.Collection, vi.ReplicaPlacement, needle.EMPTY_TTL, types.HardDriveType)
vl.RegisterVolume(&vi, server)
vl.setVolumeWritable(vi.Id)
}
server.AddOrUpdateVolume(vi)
2012-09-03 16:50:04 +08:00
}
2021-02-17 02:51:03 +08:00
disk := server.getOrCreateDisk("")
deltaDiskUsages := newDiskUsages()
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk("")
deltaDiskUsage.maxVolumeCount = int64(serverMap["limit"].(float64))
disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
2012-09-03 16:50:04 +08:00
}
}
}
return topo
}
func TestFindEmptySlotsForOneVolume(t *testing.T) {
2012-09-03 16:50:04 +08:00
topo := setup(topologyLayout)
vg := NewDefaultVolumeGrowth()
2019-12-24 04:48:20 +08:00
rp, _ := super_block.NewReplicaPlacementFromString("002")
volumeGrowOption := &VolumeGrowOption{
Collection: "",
ReplicaPlacement: rp,
DataCenter: "dc1",
Rack: "",
DataNode: "",
}
servers, err := vg.findEmptySlotsForOneVolume(topo, volumeGrowOption)
if err != nil {
fmt.Println("finding empty slots error :", err)
2012-09-03 16:50:04 +08:00
t.Fail()
}
for _, server := range servers {
fmt.Println("assigned node :", server.Id())
2012-09-03 16:50:04 +08:00
}
}
2020-02-20 17:21:11 +08:00
var topologyLayout2 = `
{
"dc1":{
"rack1":{
"server111":{
"volumes":[
{"id":1, "size":12312},
{"id":2, "size":12312},
{"id":3, "size":12312}
],
"limit":300
},
"server112":{
"volumes":[
{"id":4, "size":12312},
{"id":5, "size":12312},
{"id":6, "size":12312}
],
"limit":300
},
"server113":{
"volumes":[],
"limit":300
},
"server114":{
"volumes":[],
"limit":300
},
"server115":{
"volumes":[],
"limit":300
},
"server116":{
"volumes":[],
"limit":300
}
},
"rack2":{
"server121":{
"volumes":[
{"id":4, "size":12312},
{"id":5, "size":12312},
{"id":6, "size":12312}
],
"limit":300
},
"server122":{
"volumes":[],
"limit":300
},
"server123":{
"volumes":[
{"id":2, "size":12312},
{"id":3, "size":12312},
{"id":4, "size":12312}
],
"limit":300
},
"server124":{
"volumes":[],
"limit":300
},
"server125":{
"volumes":[],
"limit":300
},
"server126":{
"volumes":[],
"limit":300
}
},
"rack3":{
"server131":{
"volumes":[],
"limit":300
},
"server132":{
"volumes":[],
"limit":300
},
"server133":{
"volumes":[],
"limit":300
},
"server134":{
"volumes":[],
"limit":300
},
"server135":{
"volumes":[],
"limit":300
},
"server136":{
"volumes":[],
"limit":300
}
}
}
}
`
func TestReplication011(t *testing.T) {
topo := setup(topologyLayout2)
vg := NewDefaultVolumeGrowth()
rp, _ := super_block.NewReplicaPlacementFromString("011")
volumeGrowOption := &VolumeGrowOption{
Collection: "MAIL",
ReplicaPlacement: rp,
DataCenter: "dc1",
Rack: "",
DataNode: "",
}
servers, err := vg.findEmptySlotsForOneVolume(topo, volumeGrowOption)
if err != nil {
fmt.Println("finding empty slots error :", err)
t.Fail()
}
for _, server := range servers {
fmt.Println("assigned node :", server.Id())
}
}
var topologyLayout3 = `
{
"dc1":{
"rack1":{
"server111":{
"volumes":[],
"limit":2000
}
}
},
"dc2":{
"rack2":{
"server222":{
"volumes":[],
"limit":2000
}
}
},
"dc3":{
"rack3":{
"server333":{
"volumes":[],
"limit":1000
}
}
},
"dc4":{
"rack4":{
"server444":{
"volumes":[],
"limit":1000
}
}
},
"dc5":{
"rack5":{
"server555":{
"volumes":[],
"limit":500
}
}
},
"dc6":{
"rack6":{
"server666":{
"volumes":[],
"limit":500
}
}
}
}
`
func TestFindEmptySlotsForOneVolumeScheduleByWeight(t *testing.T) {
topo := setup(topologyLayout3)
vg := NewDefaultVolumeGrowth()
rp, _ := super_block.NewReplicaPlacementFromString("100")
volumeGrowOption := &VolumeGrowOption{
Collection: "Weight",
ReplicaPlacement: rp,
DataCenter: "",
Rack: "",
DataNode: "",
}
distribution := map[NodeId]int{}
// assign 1000 volumes
2020-03-07 22:12:57 +08:00
for i := 0; i < 1000; i++ {
servers, err := vg.findEmptySlotsForOneVolume(topo, volumeGrowOption)
if err != nil {
fmt.Println("finding empty slots error :", err)
t.Fail()
}
for _, server := range servers {
2020-03-07 22:12:20 +08:00
// fmt.Println("assigned node :", server.Id())
if _, ok := distribution[server.id]; !ok {
distribution[server.id] = 0
}
distribution[server.id] += 1
}
}
for k, v := range distribution {
2020-03-07 22:12:20 +08:00
fmt.Printf("%s : %d\n", k, v)
}
2020-03-07 22:12:57 +08:00
}
var topologyLayout4 = `
{
"dc1":{
"rack1":{
"serverdc111":{
"ip": "127.0.0.1",
"volumes":[
{"id":1, "size":12312, "collection":"test", "replication":"001"},
{"id":2, "size":12312, "collection":"test", "replication":"100"},
{"id":4, "size":12312, "collection":"test", "replication":"100"},
{"id":6, "size":12312, "collection":"test", "replication":"010"}
],
"limit":100
}
}
},
"dc2":{
"rack1":{
"serverdc211":{
"ip": "127.0.0.2",
"volumes":[
{"id":2, "size":12312, "collection":"test", "replication":"100"},
{"id":3, "size":12312, "collection":"test", "replication":"010"},
{"id":5, "size":12312, "collection":"test", "replication":"001"},
{"id":6, "size":12312, "collection":"test", "replication":"010"}
],
"limit":100
}
}
},
"dc3":{
"rack1":{
"serverdc311":{
"ip": "127.0.0.3",
"volumes":[
{"id":1, "size":12312, "collection":"test", "replication":"001"},
{"id":3, "size":12312, "collection":"test", "replication":"010"},
{"id":4, "size":12312, "collection":"test", "replication":"100"},
{"id":5, "size":12312, "collection":"test", "replication":"001"}
],
"limit":100
}
}
}
}
`
func TestPickForWrite(t *testing.T) {
topo := setup(topologyLayout4)
volumeGrowOption := &VolumeGrowOption{
Collection: "test",
DataCenter: "",
Rack: "",
DataNode: "",
}
v := util.GetViper()
v.Set("master.volume_growth.threshold", 0.9)
for _, rpStr := range []string{"001", "010", "100"} {
rp, _ := super_block.NewReplicaPlacementFromString(rpStr)
vl := topo.GetVolumeLayout("test", rp, needle.EMPTY_TTL, types.HardDriveType)
volumeGrowOption.ReplicaPlacement = rp
for _, dc := range []string{"", "dc1", "dc2", "dc3", "dc0"} {
volumeGrowOption.DataCenter = dc
for _, r := range []string{""} {
volumeGrowOption.Rack = r
for _, dn := range []string{""} {
if dc == "" && dn != "" {
continue
}
volumeGrowOption.DataNode = dn
fileId, count, _, shouldGrow, err := topo.PickForWrite(1, volumeGrowOption, vl)
if dc == "dc0" {
if err == nil || count != 0 || !shouldGrow {
fmt.Println(dc, r, dn, "pick for write should be with error")
t.Fail()
}
} else if err != nil {
fmt.Println(dc, r, dn, "pick for write error :", err)
t.Fail()
} else if count == 0 {
fmt.Println(dc, r, dn, "pick for write count is zero")
t.Fail()
} else if len(fileId) == 0 {
fmt.Println(dc, r, dn, "pick for write file id is empty")
t.Fail()
} else if shouldGrow {
fmt.Println(dc, r, dn, "pick for write error : not should grow")
t.Fail()
}
}
}
}
}
}