2012-09-03 16:50:04 +08:00
|
|
|
package replication
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"math/rand"
|
|
|
|
"pkg/storage"
|
2013-01-17 16:15:05 +08:00
|
|
|
"pkg/topology"
|
2012-09-03 16:50:04 +08:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
|
|
|
var topologyLayout = `
|
|
|
|
{
|
|
|
|
"dc1":{
|
|
|
|
"rack1":{
|
|
|
|
"server1":{
|
|
|
|
"volumes":[
|
|
|
|
{"id":1, "size":12312},
|
|
|
|
{"id":2, "size":12312},
|
|
|
|
{"id":3, "size":12312}
|
|
|
|
],
|
|
|
|
"limit":3
|
|
|
|
},
|
|
|
|
"server2":{
|
|
|
|
"volumes":[
|
|
|
|
{"id":4, "size":12312},
|
|
|
|
{"id":5, "size":12312},
|
|
|
|
{"id":6, "size":12312}
|
|
|
|
],
|
|
|
|
"limit":10
|
|
|
|
}
|
|
|
|
},
|
|
|
|
"rack2":{
|
|
|
|
"server1":{
|
|
|
|
"volumes":[
|
|
|
|
{"id":4, "size":12312},
|
|
|
|
{"id":5, "size":12312},
|
|
|
|
{"id":6, "size":12312}
|
|
|
|
],
|
|
|
|
"limit":4
|
|
|
|
},
|
|
|
|
"server2":{
|
|
|
|
"volumes":[],
|
|
|
|
"limit":4
|
|
|
|
},
|
|
|
|
"server3":{
|
|
|
|
"volumes":[
|
|
|
|
{"id":2, "size":12312},
|
|
|
|
{"id":3, "size":12312},
|
|
|
|
{"id":4, "size":12312}
|
|
|
|
],
|
|
|
|
"limit":2
|
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
"dc2":{
|
|
|
|
},
|
|
|
|
"dc3":{
|
|
|
|
"rack2":{
|
|
|
|
"server1":{
|
|
|
|
"volumes":[
|
|
|
|
{"id":1, "size":12312},
|
|
|
|
{"id":3, "size":12312},
|
|
|
|
{"id":5, "size":12312}
|
|
|
|
],
|
|
|
|
"limit":4
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`
|
|
|
|
|
|
|
|
func setup(topologyLayout string) *topology.Topology {
|
|
|
|
var data interface{}
|
|
|
|
err := json.Unmarshal([]byte(topologyLayout), &data)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Println("error:", err)
|
|
|
|
}
|
|
|
|
fmt.Println("data:", data)
|
|
|
|
|
|
|
|
//need to connect all nodes first before server adding volumes
|
2013-01-17 16:15:05 +08:00
|
|
|
topo := topology.NewTopology("mynetwork","/etc/weedfs/weedfs.conf","/tmp","testing",32*1024, 5)
|
2012-09-03 16:50:04 +08:00
|
|
|
mTopology := data.(map[string]interface{})
|
|
|
|
for dcKey, dcValue := range mTopology {
|
|
|
|
dc := topology.NewDataCenter(dcKey)
|
|
|
|
dcMap := dcValue.(map[string]interface{})
|
|
|
|
topo.LinkChildNode(dc)
|
|
|
|
for rackKey, rackValue := range dcMap {
|
|
|
|
rack := topology.NewRack(rackKey)
|
|
|
|
rackMap := rackValue.(map[string]interface{})
|
|
|
|
dc.LinkChildNode(rack)
|
|
|
|
for serverKey, serverValue := range rackMap {
|
2012-09-09 07:25:44 +08:00
|
|
|
server := topology.NewDataNode(serverKey)
|
2012-09-03 16:50:04 +08:00
|
|
|
serverMap := serverValue.(map[string]interface{})
|
|
|
|
rack.LinkChildNode(server)
|
|
|
|
for _, v := range serverMap["volumes"].([]interface{}) {
|
|
|
|
m := v.(map[string]interface{})
|
2013-01-17 16:15:05 +08:00
|
|
|
vi := storage.VolumeInfo{Id: storage.VolumeId(int64(m["id"].(float64))), Size: int64(m["size"].(float64)), Version:storage.CurrentVersion}
|
2012-09-17 08:31:15 +08:00
|
|
|
server.AddOrUpdateVolume(vi)
|
2012-09-03 16:50:04 +08:00
|
|
|
}
|
|
|
|
server.UpAdjustMaxVolumeCountDelta(int(serverMap["limit"].(float64)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return topo
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestRemoveDataCenter(t *testing.T) {
|
|
|
|
topo := setup(topologyLayout)
|
|
|
|
topo.UnlinkChildNode(topology.NodeId("dc2"))
|
|
|
|
if topo.GetActiveVolumeCount() != 15 {
|
|
|
|
t.Fail()
|
|
|
|
}
|
|
|
|
topo.UnlinkChildNode(topology.NodeId("dc3"))
|
|
|
|
if topo.GetActiveVolumeCount() != 12 {
|
|
|
|
t.Fail()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestReserveOneVolume(t *testing.T) {
|
|
|
|
topo := setup(topologyLayout)
|
2013-01-17 16:15:05 +08:00
|
|
|
rand.Seed(time.Now().UnixNano())
|
|
|
|
vg:=&VolumeGrowth{copy1factor:3,copy2factor:2,copy3factor:1,copyAll:4}
|
|
|
|
if c, e := vg.GrowByCountAndType(1,storage.Copy000,topo);e==nil{
|
|
|
|
t.Log("reserved", c)
|
|
|
|
}
|
2012-09-17 08:31:15 +08:00
|
|
|
}
|
2013-01-17 16:15:05 +08:00
|
|
|
|