go fmt for all source codes

This commit is contained in:
Chris Lu 2013-01-17 00:56:56 -08:00
parent ca9056d673
commit b0c7df0c3b
35 changed files with 478 additions and 493 deletions

View File

@ -1,53 +1,52 @@
package main
import (
"flag"
"fmt"
"os"
"strings"
"flag"
"fmt"
"os"
"strings"
)
type Command struct {
// Run runs the command.
// The args are the arguments after the command name.
Run func(cmd *Command, args []string) bool
// Run runs the command.
// The args are the arguments after the command name.
Run func(cmd *Command, args []string) bool
// UsageLine is the one-line usage message.
// The first word in the line is taken to be the command name.
UsageLine string
// UsageLine is the one-line usage message.
// The first word in the line is taken to be the command name.
UsageLine string
// Short is the short description shown in the 'go help' output.
Short string
// Short is the short description shown in the 'go help' output.
Short string
// Long is the long message shown in the 'go help <this-command>' output.
Long string
// Flag is a set of flags specific to this command.
Flag flag.FlagSet
// Long is the long message shown in the 'go help <this-command>' output.
Long string
// Flag is a set of flags specific to this command.
Flag flag.FlagSet
}
// Name returns the command's name: the first word in the usage line.
func (c *Command) Name() string {
name := c.UsageLine
i := strings.Index(name, " ")
if i >= 0 {
name = name[:i]
}
return name
name := c.UsageLine
i := strings.Index(name, " ")
if i >= 0 {
name = name[:i]
}
return name
}
func (c *Command) Usage() {
fmt.Fprintf(os.Stderr, "Example: weed %s\n", c.UsageLine)
fmt.Fprintf(os.Stderr, "Default Usage:\n")
c.Flag.PrintDefaults()
fmt.Fprintf(os.Stderr, "Description:\n")
fmt.Fprintf(os.Stderr, " %s\n", strings.TrimSpace(c.Long))
os.Exit(2)
fmt.Fprintf(os.Stderr, "Example: weed %s\n", c.UsageLine)
fmt.Fprintf(os.Stderr, "Default Usage:\n")
c.Flag.PrintDefaults()
fmt.Fprintf(os.Stderr, "Description:\n")
fmt.Fprintf(os.Stderr, " %s\n", strings.TrimSpace(c.Long))
os.Exit(2)
}
// Runnable reports whether the command can be run; otherwise
// it is a documentation pseudo-command such as importpath.
func (c *Command) Runnable() bool {
return c.Run != nil
return c.Run != nil
}

View File

@ -1,54 +1,53 @@
package main
import (
"bufio"
"os"
"fmt"
"bufio"
"fmt"
"os"
)
func init() {
cmdShell.Run = runShell // break init cycle
cmdShell.Run = runShell // break init cycle
}
var cmdShell = &Command{
UsageLine: "shell",
Short: "run interactive commands, now just echo",
Long: `run interactive commands.
UsageLine: "shell",
Short: "run interactive commands, now just echo",
Long: `run interactive commands.
`,
}
var (
)
var ()
func runShell(command *Command, args []string) bool {
r := bufio.NewReader(os.Stdin)
o := bufio.NewWriter(os.Stdout)
e := bufio.NewWriter(os.Stderr)
prompt := func () {
o.WriteString("> ")
o.Flush()
};
readLine := func () string {
ret, err := r.ReadString('\n')
if err != nil {
fmt.Fprint(e,err);
os.Exit(1)
}
return ret
}
execCmd := func (cmd string) int {
if cmd != "" {
o.WriteString(cmd)
}
return 0
}
r := bufio.NewReader(os.Stdin)
o := bufio.NewWriter(os.Stdout)
e := bufio.NewWriter(os.Stderr)
prompt := func() {
o.WriteString("> ")
o.Flush()
}
readLine := func() string {
ret, err := r.ReadString('\n')
if err != nil {
fmt.Fprint(e, err)
os.Exit(1)
}
return ret
}
execCmd := func(cmd string) int {
if cmd != "" {
o.WriteString(cmd)
}
return 0
}
cmd := ""
for {
prompt()
cmd = readLine()
execCmd(cmd)
}
return true
cmd := ""
for {
prompt()
cmd = readLine()
execCmd(cmd)
}
return true
}

View File

@ -67,7 +67,7 @@ func upload(filename string, server string, fid string) (int, error) {
}
ret, e := operation.Upload("http://"+server+"/"+fid, filename, fh)
if e != nil {
return 0, e
return 0, e
}
return ret.Size, e
}

View File

@ -42,7 +42,7 @@ var (
store *storage.Store
)
var fileNameEscaper = strings.NewReplacer("\\","\\\\","\"","\\\"")
var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"")
func statusHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
@ -156,7 +156,9 @@ func GetHandler(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
w.Header().Set("Content-Encoding", "gzip")
} else {
n.Data = storage.UnGzipData(n.Data)
if n.Data, err = storage.UnGzipData(n.Data); err != nil {
debug("lookup error:", err, r.URL.Path)
}
}
}
}

View File

@ -175,9 +175,9 @@ func writeJson(w http.ResponseWriter, r *http.Request, obj interface{}) {
w.Header().Set("Content-Type", "application/javascript")
var bytes []byte
if r.FormValue("pretty") != "" {
bytes, _ = json.MarshalIndent(obj, "", " ")
bytes, _ = json.MarshalIndent(obj, "", " ")
} else {
bytes, _ = json.Marshal(obj)
bytes, _ = json.Marshal(obj)
}
callback := r.FormValue("callback")
if callback == "" {

View File

@ -3,8 +3,8 @@ package directory
import (
"encoding/hex"
"pkg/storage"
"strings"
"pkg/util"
"strings"
)
type FileId struct {
@ -16,14 +16,14 @@ type FileId struct {
func NewFileId(VolumeId storage.VolumeId, Key uint64, Hashcode uint32) *FileId {
return &FileId{VolumeId: VolumeId, Key: Key, Hashcode: Hashcode}
}
func ParseFileId(fid string) *FileId{
func ParseFileId(fid string) *FileId {
a := strings.Split(fid, ",")
if len(a) != 2 {
println("Invalid fid", fid, ", split length", len(a))
return nil
}
vid_string, key_hash_string := a[0], a[1]
volumeId, _ := storage.NewVolumeId(vid_string)
volumeId, _ := storage.NewVolumeId(vid_string)
key, hash := storage.ParseKeyHash(key_hash_string)
return &FileId{VolumeId: volumeId, Key: key, Hashcode: hash}
}

View File

@ -1,32 +1,32 @@
package operation
import (
"encoding/json"
"errors"
"net/url"
"pkg/storage"
"pkg/topology"
"pkg/util"
"encoding/json"
"errors"
"net/url"
"pkg/storage"
"pkg/topology"
"pkg/util"
)
type AllocateVolumeResult struct {
Error string
Error string
}
func AllocateVolume(dn *topology.DataNode, vid storage.VolumeId, repType storage.ReplicationType) error {
values := make(url.Values)
values.Add("volume", vid.String())
values.Add("replicationType", repType.String())
jsonBlob, err := util.Post("http://"+dn.Url()+"/admin/assign_volume", values)
if err != nil {
return err
}
var ret AllocateVolumeResult
if err := json.Unmarshal(jsonBlob, &ret); err != nil {
return err
}
if ret.Error != "" {
return errors.New(ret.Error)
}
return nil
values := make(url.Values)
values.Add("volume", vid.String())
values.Add("replicationType", repType.String())
jsonBlob, err := util.Post("http://"+dn.Url()+"/admin/assign_volume", values)
if err != nil {
return err
}
var ret AllocateVolumeResult
if err := json.Unmarshal(jsonBlob, &ret); err != nil {
return err
}
if ret.Error != "" {
return errors.New(ret.Error)
}
return nil
}

View File

@ -1,8 +1,8 @@
package operation
import (
"net/http"
"log"
"net/http"
)
func Delete(url string) error {

View File

@ -1,38 +1,38 @@
package operation
import (
"encoding/json"
"net/url"
"pkg/storage"
"pkg/util"
_ "fmt"
"errors"
"encoding/json"
"errors"
_ "fmt"
"net/url"
"pkg/storage"
"pkg/util"
)
type Location struct {
Url string "url"
PublicUrl string "publicUrl"
Url string "url"
PublicUrl string "publicUrl"
}
type LookupResult struct {
Locations []Location "locations"
Error string "error"
Locations []Location "locations"
Error string "error"
}
//TODO: Add a caching for vid here
func Lookup(server string, vid storage.VolumeId) (*LookupResult, error) {
values := make(url.Values)
values.Add("volumeId", vid.String())
jsonBlob, err := util.Post("http://"+server+"/dir/lookup", values)
if err != nil {
return nil, err
}
var ret LookupResult
err = json.Unmarshal(jsonBlob, &ret)
if err != nil {
return nil, err
}
if ret.Error != ""{
return nil, errors.New(ret.Error)
}
return &ret, nil
values := make(url.Values)
values.Add("volumeId", vid.String())
jsonBlob, err := util.Post("http://"+server+"/dir/lookup", values)
if err != nil {
return nil, err
}
var ret LookupResult
err = json.Unmarshal(jsonBlob, &ret)
if err != nil {
return nil, err
}
if ret.Error != "" {
return nil, errors.New(ret.Error)
}
return &ret, nil
}

View File

@ -3,18 +3,18 @@ package operation
import (
"bytes"
"encoding/json"
"errors"
_ "fmt"
"io"
"io/ioutil"
"log"
"log"
"mime/multipart"
"net/http"
"errors"
)
type UploadResult struct {
Size int
Error string
Size int
Error string
}
func Upload(uploadUrl string, filename string, reader io.Reader) (*UploadResult, error) {
@ -26,7 +26,7 @@ func Upload(uploadUrl string, filename string, reader io.Reader) (*UploadResult,
body_writer.Close()
resp, err := http.Post(uploadUrl, content_type, body_buf)
if err != nil {
log.Println("failing to upload to", uploadUrl)
log.Println("failing to upload to", uploadUrl)
return nil, err
}
defer resp.Body.Close()
@ -37,11 +37,11 @@ func Upload(uploadUrl string, filename string, reader io.Reader) (*UploadResult,
var ret UploadResult
err = json.Unmarshal(resp_body, &ret)
if err != nil {
log.Println("failing to read upload resonse", uploadUrl, resp_body)
return nil, err
log.Println("failing to read upload resonse", uploadUrl, resp_body)
return nil, err
}
if ret.Error != ""{
return nil, errors.New(ret.Error)
if ret.Error != "" {
return nil, errors.New(ret.Error)
}
return &ret, nil
}

View File

@ -7,7 +7,7 @@ import (
"pkg/operation"
"pkg/storage"
"pkg/topology"
"sync"
"sync"
)
/*
@ -24,7 +24,7 @@ type VolumeGrowth struct {
copy3factor int
copyAll int
accessLock sync.Mutex
accessLock sync.Mutex
}
func NewDefaultVolumeGrowth() *VolumeGrowth {
@ -49,8 +49,8 @@ func (vg *VolumeGrowth) GrowByType(repType storage.ReplicationType, topo *topolo
return 0, errors.New("Unknown Replication Type!")
}
func (vg *VolumeGrowth) GrowByCountAndType(count int, repType storage.ReplicationType, topo *topology.Topology) (counter int, err error) {
vg.accessLock.Lock()
defer vg.accessLock.Unlock()
vg.accessLock.Lock()
defer vg.accessLock.Unlock()
counter = 0
switch repType {
@ -182,7 +182,7 @@ func (vg *VolumeGrowth) GrowByCountAndType(count int, repType storage.Replicatio
func (vg *VolumeGrowth) grow(topo *topology.Topology, vid storage.VolumeId, repType storage.ReplicationType, servers ...*topology.DataNode) error {
for _, server := range servers {
if err := operation.AllocateVolume(server, vid, repType); err == nil {
vi := storage.VolumeInfo{Id: vid, Size: 0, RepType: repType, Version:storage.CurrentVersion}
vi := storage.VolumeInfo{Id: vid, Size: 0, RepType: repType, Version: storage.CurrentVersion}
server.AddOrUpdateVolume(vi)
topo.RegisterVolumeLayout(&vi, server)
fmt.Println("Created Volume", vid, "on", server)

View File

@ -5,7 +5,7 @@ import (
"fmt"
"math/rand"
"pkg/storage"
"pkg/topology"
"pkg/topology"
"testing"
"time"
)
@ -80,7 +80,7 @@ func setup(topologyLayout string) *topology.Topology {
fmt.Println("data:", data)
//need to connect all nodes first before server adding volumes
topo := topology.NewTopology("mynetwork","/etc/weedfs/weedfs.conf","/tmp","testing",32*1024, 5)
topo := topology.NewTopology("mynetwork", "/etc/weedfs/weedfs.conf", "/tmp", "testing", 32*1024, 5)
mTopology := data.(map[string]interface{})
for dcKey, dcValue := range mTopology {
dc := topology.NewDataCenter(dcKey)
@ -96,7 +96,7 @@ func setup(topologyLayout string) *topology.Topology {
rack.LinkChildNode(server)
for _, v := range serverMap["volumes"].([]interface{}) {
m := v.(map[string]interface{})
vi := storage.VolumeInfo{Id: storage.VolumeId(int64(m["id"].(float64))), Size: int64(m["size"].(float64)), Version:storage.CurrentVersion}
vi := storage.VolumeInfo{Id: storage.VolumeId(int64(m["id"].(float64))), Size: int64(m["size"].(float64)), Version: storage.CurrentVersion}
server.AddOrUpdateVolume(vi)
}
server.UpAdjustMaxVolumeCountDelta(int(serverMap["limit"].(float64)))
@ -121,10 +121,9 @@ func TestRemoveDataCenter(t *testing.T) {
func TestReserveOneVolume(t *testing.T) {
topo := setup(topologyLayout)
rand.Seed(time.Now().UnixNano())
vg:=&VolumeGrowth{copy1factor:3,copy2factor:2,copy3factor:1,copyAll:4}
if c, e := vg.GrowByCountAndType(1,storage.Copy000,topo);e==nil{
t.Log("reserved", c)
}
rand.Seed(time.Now().UnixNano())
vg := &VolumeGrowth{copy1factor: 3, copy2factor: 2, copy3factor: 1, copyAll: 4}
if c, e := vg.GrowByCountAndType(1, storage.Copy000, topo); e == nil {
t.Log("reserved", c)
}
}

View File

@ -1,11 +1,11 @@
package sequence
import (
"encoding/gob"
"os"
"path"
"sync"
"encoding/gob"
"log"
"os"
"path"
"sync"
)
const (
@ -27,21 +27,21 @@ type SequencerImpl struct {
}
func NewSequencer(dirname string, filename string) (m *SequencerImpl) {
m = &SequencerImpl{dir: dirname, fileName: filename}
m = &SequencerImpl{dir: dirname, fileName: filename}
seqFile, se := os.OpenFile(path.Join(m.dir, m.fileName+".seq"), os.O_RDONLY, 0644)
if se != nil {
m.FileIdSequence = FileIdSaveInterval
log.Println("Setting file id sequence", m.FileIdSequence)
} else {
decoder := gob.NewDecoder(seqFile)
defer seqFile.Close()
decoder.Decode(&m.FileIdSequence)
log.Println("Loading file id sequence", m.FileIdSequence, "=>", m.FileIdSequence+FileIdSaveInterval)
//in case the server stops between intervals
m.FileIdSequence += FileIdSaveInterval
}
return
seqFile, se := os.OpenFile(path.Join(m.dir, m.fileName+".seq"), os.O_RDONLY, 0644)
if se != nil {
m.FileIdSequence = FileIdSaveInterval
log.Println("Setting file id sequence", m.FileIdSequence)
} else {
decoder := gob.NewDecoder(seqFile)
defer seqFile.Close()
decoder.Decode(&m.FileIdSequence)
log.Println("Loading file id sequence", m.FileIdSequence, "=>", m.FileIdSequence+FileIdSaveInterval)
//in case the server stops between intervals
m.FileIdSequence += FileIdSaveInterval
}
return
}
//count should be 1 or more
@ -60,12 +60,12 @@ func (m *SequencerImpl) NextFileId(count int) (uint64, int) {
return m.FileIdSequence - m.fileIdCounter, count
}
func (m *SequencerImpl) saveSequence() {
log.Println("Saving file id sequence", m.FileIdSequence, "to", path.Join(m.dir, m.fileName+".seq"))
seqFile, e := os.OpenFile(path.Join(m.dir, m.fileName+".seq"), os.O_CREATE|os.O_WRONLY, 0644)
if e != nil {
log.Fatalf("Sequence File Save [ERROR] %s\n", e)
}
defer seqFile.Close()
encoder := gob.NewEncoder(seqFile)
encoder.Encode(m.FileIdSequence)
log.Println("Saving file id sequence", m.FileIdSequence, "to", path.Join(m.dir, m.fileName+".seq"))
seqFile, e := os.OpenFile(path.Join(m.dir, m.fileName+".seq"), os.O_CREATE|os.O_WRONLY, 0644)
if e != nil {
log.Fatalf("Sequence File Save [ERROR] %s\n", e)
}
defer seqFile.Close()
encoder := gob.NewEncoder(seqFile)
encoder.Encode(m.FileIdSequence)
}

View File

@ -1,43 +1,43 @@
package storage
import (
"log"
"os"
"pkg/util"
"testing"
"log"
"os"
"pkg/util"
)
func TestMemoryUsage(t *testing.T) {
indexFile, ie := os.OpenFile("sample.idx", os.O_RDWR|os.O_RDONLY, 0644)
if ie != nil {
log.Fatalln(ie)
}
LoadNewNeedleMap(indexFile)
indexFile, ie := os.OpenFile("sample.idx", os.O_RDWR|os.O_RDONLY, 0644)
if ie != nil {
log.Fatalln(ie)
}
LoadNewNeedleMap(indexFile)
}
func LoadNewNeedleMap(file *os.File) CompactMap {
m := NewCompactMap()
bytes := make([]byte, 16*1024)
count, e := file.Read(bytes)
if count > 0 {
fstat, _ := file.Stat()
log.Println("Loading index file", fstat.Name(), "size", fstat.Size())
}
for count > 0 && e == nil {
for i := 0; i < count; i += 16 {
key := util.BytesToUint64(bytes[i : i+8])
offset := util.BytesToUint32(bytes[i+8 : i+12])
size := util.BytesToUint32(bytes[i+12 : i+16])
if offset > 0 {
m.Set(Key(key), offset, size)
} else {
//delete(m, key)
}
}
m := NewCompactMap()
bytes := make([]byte, 16*1024)
count, e := file.Read(bytes)
if count > 0 {
fstat, _ := file.Stat()
log.Println("Loading index file", fstat.Name(), "size", fstat.Size())
}
for count > 0 && e == nil {
for i := 0; i < count; i += 16 {
key := util.BytesToUint64(bytes[i : i+8])
offset := util.BytesToUint32(bytes[i+8 : i+12])
size := util.BytesToUint32(bytes[i+12 : i+16])
if offset > 0 {
m.Set(Key(key), offset, size)
} else {
//delete(m, key)
}
}
count, e = file.Read(bytes)
}
return m
count, e = file.Read(bytes)
}
return m
}

View File

@ -18,42 +18,42 @@ func TestXYZ(t *testing.T) {
m.Set(Key(i), i+11, i+5)
}
// for i := uint32(0); i < 100; i++ {
// if v := m.Get(Key(i)); v != nil {
// println(i, "=", v.Key, v.Offset, v.Size)
// }
// }
// for i := uint32(0); i < 100; i++ {
// if v := m.Get(Key(i)); v != nil {
// println(i, "=", v.Key, v.Offset, v.Size)
// }
// }
for i := uint32(0); i < 10*batch; i++ {
v, ok := m.Get(Key(i))
v, ok := m.Get(Key(i))
if i%3 == 0 {
if !ok {
t.Fatal("key", i, "missing!")
}
if !ok {
t.Fatal("key", i, "missing!")
}
if v.Size != i+5 {
t.Fatal("key", i, "size", v.Size)
}
} else if i%37 == 0 {
if ok && v.Size > 0 {
if ok && v.Size > 0 {
t.Fatal("key", i, "should have been deleted needle value", v)
}
} else if i%2 == 0 {
if v.Size != i {
if v.Size != i {
t.Fatal("key", i, "size", v.Size)
}
}
}
for i := uint32(10 * batch); i < 100*batch; i++ {
v, ok := m.Get(Key(i))
v, ok := m.Get(Key(i))
if i%37 == 0 {
if ok && v.Size > 0 {
t.Fatal("key", i, "should have been deleted needle value", v)
}
if ok && v.Size > 0 {
t.Fatal("key", i, "should have been deleted needle value", v)
}
} else if i%2 == 0 {
if v==nil{
t.Fatal("key", i, "missing")
}
if v == nil {
t.Fatal("key", i, "missing")
}
if v.Size != i {
t.Fatal("key", i, "size", v.Size)
}

View File

@ -12,30 +12,15 @@ import (
* Default more not to gzip since gzip can be done on client side.
*/
func IsGzippable(ext, mtype string) bool {
if strings.HasPrefix(mtype, "text/"){
return true
}
if ext == ".zip" {
return false
}
if ext == ".rar" {
return false
}
if ext == ".gz" {
return false
}
if ext == ".pdf" {
if strings.HasPrefix(mtype, "text/") {
return true
}
if ext == ".css" {
switch ext {
case ".zip", ".rar", ".gz", ".bz2", ".xz":
return false
case ".pdf", ".txt", ".html", ".css", ".js", ".json":
return true
}
if ext == ".js" {
return true
}
if ext == ".json" {
return true
}
if strings.HasPrefix(mtype, "application/") {
if strings.HasSuffix(mtype, "xml") {
return true
@ -46,18 +31,21 @@ func IsGzippable(ext, mtype string) bool {
}
return false
}
func GzipData(input []byte) []byte {
func GzipData(input []byte) ([]byte, error) {
buf := new(bytes.Buffer)
w, _ := gzip.NewWriterLevel(buf, flate.BestCompression)
if _, err := w.Write(input); err != nil {
println("error compressing data:", err)
return nil, err
}
if err := w.Close(); err != nil {
println("error closing compressed data:", err)
return nil, err
}
return buf.Bytes()
return buf.Bytes(), nil
}
func UnGzipData(input []byte) []byte {
func UnGzipData(input []byte) ([]byte, error) {
buf := bytes.NewBuffer(input)
r, _ := gzip.NewReader(buf)
defer r.Close()
@ -65,5 +53,5 @@ func UnGzipData(input []byte) []byte {
if err != nil {
println("error uncompressing data:", err)
}
return output
return output, err
}

View File

@ -12,8 +12,8 @@ import (
)
const (
NeedleHeaderSize = 16 //should never change this
NeedlePaddingSize = 8
NeedleHeaderSize = 16 //should never change this
NeedlePaddingSize = 8
NeedleChecksumSize = 4
)
@ -64,7 +64,9 @@ func NewNeedle(r *http.Request) (n *Needle, fname string, e error) {
mtype = contentType
}
if IsGzippable(ext, mtype) {
data = GzipData(data)
if data, e = GzipData(data); e != nil {
return
}
n.SetGzipped()
}
if ext == ".gz" {

View File

@ -1,123 +1,123 @@
package storage
import (
"errors"
"errors"
)
type ReplicationType string
const (
Copy000 = ReplicationType("000") // single copy
Copy001 = ReplicationType("001") // 2 copies, both on the same racks, and same data center
Copy010 = ReplicationType("010") // 2 copies, both on different racks, but same data center
Copy100 = ReplicationType("100") // 2 copies, each on different data center
Copy110 = ReplicationType("110") // 3 copies, 2 on different racks and local data center, 1 on different data center
Copy200 = ReplicationType("200") // 3 copies, each on dffereint data center
LengthRelicationType = 6
CopyNil = ReplicationType(255) // nil value
Copy000 = ReplicationType("000") // single copy
Copy001 = ReplicationType("001") // 2 copies, both on the same racks, and same data center
Copy010 = ReplicationType("010") // 2 copies, both on different racks, but same data center
Copy100 = ReplicationType("100") // 2 copies, each on different data center
Copy110 = ReplicationType("110") // 3 copies, 2 on different racks and local data center, 1 on different data center
Copy200 = ReplicationType("200") // 3 copies, each on dffereint data center
LengthRelicationType = 6
CopyNil = ReplicationType(255) // nil value
)
func NewReplicationTypeFromString(t string) (ReplicationType, error) {
switch t {
case "000":
return Copy000, nil
case "001":
return Copy001, nil
case "010":
return Copy010, nil
case "100":
return Copy100, nil
case "110":
return Copy110, nil
case "200":
return Copy200, nil
}
return Copy000, errors.New("Unknown Replication Type:"+t)
switch t {
case "000":
return Copy000, nil
case "001":
return Copy001, nil
case "010":
return Copy010, nil
case "100":
return Copy100, nil
case "110":
return Copy110, nil
case "200":
return Copy200, nil
}
return Copy000, errors.New("Unknown Replication Type:" + t)
}
func NewReplicationTypeFromByte(b byte) (ReplicationType, error) {
switch b {
case byte(000):
return Copy000, nil
case byte(001):
return Copy001, nil
case byte(010):
return Copy010, nil
case byte(100):
return Copy100, nil
case byte(110):
return Copy110, nil
case byte(200):
return Copy200, nil
}
return Copy000, errors.New("Unknown Replication Type:"+string(b))
switch b {
case byte(000):
return Copy000, nil
case byte(001):
return Copy001, nil
case byte(010):
return Copy010, nil
case byte(100):
return Copy100, nil
case byte(110):
return Copy110, nil
case byte(200):
return Copy200, nil
}
return Copy000, errors.New("Unknown Replication Type:" + string(b))
}
func (r *ReplicationType) String() string {
switch *r {
case Copy000:
return "000"
case Copy001:
return "001"
case Copy010:
return "010"
case Copy100:
return "100"
case Copy110:
return "110"
case Copy200:
return "200"
}
return "000"
switch *r {
case Copy000:
return "000"
case Copy001:
return "001"
case Copy010:
return "010"
case Copy100:
return "100"
case Copy110:
return "110"
case Copy200:
return "200"
}
return "000"
}
func (r *ReplicationType) Byte() byte {
switch *r {
case Copy000:
return byte(000)
case Copy001:
return byte(001)
case Copy010:
return byte(010)
case Copy100:
return byte(100)
case Copy110:
return byte(110)
case Copy200:
return byte(200)
}
return byte(000)
switch *r {
case Copy000:
return byte(000)
case Copy001:
return byte(001)
case Copy010:
return byte(010)
case Copy100:
return byte(100)
case Copy110:
return byte(110)
case Copy200:
return byte(200)
}
return byte(000)
}
func (repType ReplicationType)GetReplicationLevelIndex() int {
switch repType {
case Copy000:
return 0
case Copy001:
return 1
case Copy010:
return 2
case Copy100:
return 3
case Copy110:
return 4
case Copy200:
return 5
}
return -1
func (repType ReplicationType) GetReplicationLevelIndex() int {
switch repType {
case Copy000:
return 0
case Copy001:
return 1
case Copy010:
return 2
case Copy100:
return 3
case Copy110:
return 4
case Copy200:
return 5
}
return -1
}
func (repType ReplicationType)GetCopyCount() int {
switch repType {
case Copy000:
return 1
case Copy001:
return 2
case Copy010:
return 2
case Copy100:
return 2
case Copy110:
return 3
case Copy200:
return 3
}
return 0
func (repType ReplicationType) GetCopyCount() int {
switch repType {
case Copy000:
return 1
case Copy001:
return 2
case Copy010:
return 2
case Copy100:
return 2
case Copy110:
return 3
case Copy200:
return 3
}
return 0
}

View File

@ -65,13 +65,13 @@ func (s *Store) AddVolume(volumeListString string, replicationType string) error
}
return e
}
func (s *Store) addVolume(vid VolumeId, replicationType ReplicationType) error {
func (s *Store) addVolume(vid VolumeId, replicationType ReplicationType) (err error) {
if s.volumes[vid] != nil {
return errors.New("Volume Id " + vid.String() + " already exists!")
}
log.Println("In dir", s.dir, "adds volume =", vid, ", replicationType =", replicationType)
s.volumes[vid] = NewVolume(s.dir, vid, replicationType)
return nil
s.volumes[vid], err = NewVolume(s.dir, vid, replicationType)
return err
}
func (s *Store) CheckCompactVolume(volumeIdString string, garbageThresholdString string) (error, bool) {
@ -107,9 +107,10 @@ func (s *Store) loadExistingVolumes() {
base := name[:len(name)-len(".dat")]
if vid, err := NewVolumeId(base); err == nil {
if s.volumes[vid] == nil {
v := NewVolume(s.dir, vid, CopyNil)
s.volumes[vid] = v
log.Println("In dir", s.dir, "read volume =", vid, "replicationType =", v.replicaType, "version =", v.version, "size =", v.Size())
if v, e := NewVolume(s.dir, vid, CopyNil); e == nil {
s.volumes[vid] = v
log.Println("In dir", s.dir, "read volume =", vid, "replicationType =", v.replicaType, "version =", v.version, "size =", v.Size())
}
}
}
}

View File

@ -24,9 +24,9 @@ type Volume struct {
accessLock sync.Mutex
}
func NewVolume(dirname string, id VolumeId, replicationType ReplicationType) (v *Volume) {
func NewVolume(dirname string, id VolumeId, replicationType ReplicationType) (v *Volume, e error) {
v = &Volume{dir: dirname, Id: id, replicaType: replicationType}
v.load()
e = v.load()
return
}
func (v *Volume) load() error {
@ -79,21 +79,19 @@ func (v *Volume) maybeWriteSuperBlock() {
v.dataFile.Write(header)
}
}
func (v *Volume) readSuperBlock() error {
func (v *Volume) readSuperBlock() (err error) {
v.dataFile.Seek(0, 0)
header := make([]byte, SuperBlockSize)
if _, e := v.dataFile.Read(header); e != nil {
return fmt.Errorf("cannot read superblock: %s", e)
}
var err error
v.version, v.replicaType, err = ParseSuperBlock(header)
return err
}
func ParseSuperBlock(header []byte) (version Version, replicaType ReplicationType, e error) {
func ParseSuperBlock(header []byte) (version Version, replicaType ReplicationType, err error) {
version = Version(header[0])
var err error
if replicaType, err = NewReplicationTypeFromByte(header[1]); err != nil {
e = fmt.Errorf("cannot read replica type: %s", err)
err = fmt.Errorf("cannot read replica type: %s", err)
}
return
}

View File

@ -1,17 +1,18 @@
package storage
import (
"strconv"
"strconv"
)
type VolumeId uint32
func NewVolumeId(vid string) (VolumeId,error) {
volumeId, err := strconv.ParseUint(vid, 10, 64)
return VolumeId(volumeId), err
func NewVolumeId(vid string) (VolumeId, error) {
volumeId, err := strconv.ParseUint(vid, 10, 64)
return VolumeId(volumeId), err
}
func (vid *VolumeId) String() string{
return strconv.FormatUint(uint64(*vid), 10)
func (vid *VolumeId) String() string {
return strconv.FormatUint(uint64(*vid), 10)
}
func (vid *VolumeId) Next() VolumeId{
return VolumeId(uint32(*vid)+1)
func (vid *VolumeId) Next() VolumeId {
return VolumeId(uint32(*vid) + 1)
}

View File

@ -1,12 +1,11 @@
package storage
import (
)
import ()
type Version uint8
const (
Version1 = Version(1)
Version2 = Version(2)
CurrentVersion = Version2
Version1 = Version(1)
Version2 = Version(2)
CurrentVersion = Version2
)

View File

@ -30,13 +30,13 @@ func TestLoadConfiguration(t *testing.T) {
</Configuration>
`
c, err := NewConfiguration([]byte(confContent))
fmt.Printf("%s\n", c)
if err!=nil{
t.Fatalf("unmarshal error:%s",err.Error())
fmt.Printf("%s\n", c)
if err != nil {
t.Fatalf("unmarshal error:%s", err.Error())
}
if len(c.Topo.DataCenters) <= 0 || c.Topo.DataCenters[0].Name != "dc1" {
t.Fatalf("unmarshal error:%s",c)
t.Fatalf("unmarshal error:%s", c)
}
}

View File

@ -1,7 +1,6 @@
package topology
import (
)
import ()
type DataCenter struct {
NodeImpl
@ -12,31 +11,31 @@ func NewDataCenter(id string) *DataCenter {
dc.id = NodeId(id)
dc.nodeType = "DataCenter"
dc.children = make(map[NodeId]Node)
dc.NodeImpl.value = dc
dc.NodeImpl.value = dc
return dc
}
func (dc *DataCenter) GetOrCreateRack(rackName string) *Rack {
for _, c := range dc.Children() {
rack := c.(*Rack)
if string(rack.Id()) == rackName {
return rack
}
}
rack := NewRack(rackName)
dc.LinkChildNode(rack)
return rack
for _, c := range dc.Children() {
rack := c.(*Rack)
if string(rack.Id()) == rackName {
return rack
}
}
rack := NewRack(rackName)
dc.LinkChildNode(rack)
return rack
}
func (dc *DataCenter) ToMap() interface{}{
m := make(map[string]interface{})
m["Max"] = dc.GetMaxVolumeCount()
m["Free"] = dc.FreeSpace()
var racks []interface{}
for _, c := range dc.Children() {
rack := c.(*Rack)
racks = append(racks, rack.ToMap())
}
m["Racks"] = racks
return m
func (dc *DataCenter) ToMap() interface{} {
m := make(map[string]interface{})
m["Max"] = dc.GetMaxVolumeCount()
m["Free"] = dc.FreeSpace()
var racks []interface{}
for _, c := range dc.Children() {
rack := c.(*Rack)
racks = append(racks, rack.ToMap())
}
m["Racks"] = racks
return m
}

View File

@ -37,14 +37,14 @@ func (nl *NodeList) RandomlyPickN(n int, min int) ([]Node, bool) {
list = append(list, n)
}
}
if n > len(list){
return nil,false
if n > len(list) {
return nil, false
}
for i := n; i > 0; i-- {
r := rand.Intn(i)
t := list[r]
list[r] = list[i-1]
list[i-1] = t
r := rand.Intn(i)
t := list[r]
list[r] = list[i-1]
list[i-1] = t
}
return list[len(list)-n:], true
}

View File

@ -1,39 +1,39 @@
package topology
import (
_ "fmt"
"strconv"
"testing"
_ "fmt"
)
func TestXYZ(t *testing.T) {
topo := NewTopology("topo","/etc/weed.conf", "/tmp","test",234,5)
topo := NewTopology("topo", "/etc/weed.conf", "/tmp", "test", 234, 5)
for i := 0; i < 5; i++ {
dc := NewDataCenter("dc" + strconv.Itoa(i))
dc.activeVolumeCount = i
dc.maxVolumeCount = 5
topo.LinkChildNode(dc)
}
nl := NewNodeList(topo.Children(),nil)
nl := NewNodeList(topo.Children(), nil)
picked, ret := nl.RandomlyPickN(1)
if !ret || len(picked)!=1 {
t.Errorf("need to randomly pick 1 node")
}
picked, ret = nl.RandomlyPickN(4)
if !ret || len(picked)!=4 {
t.Errorf("need to randomly pick 4 nodes")
picked, ret := nl.RandomlyPickN(1)
if !ret || len(picked) != 1 {
t.Errorf("need to randomly pick 1 node")
}
picked, ret = nl.RandomlyPickN(5)
if !ret || len(picked)!=5 {
t.Errorf("need to randomly pick 5 nodes")
}
picked, ret = nl.RandomlyPickN(4)
if !ret || len(picked) != 4 {
t.Errorf("need to randomly pick 4 nodes")
}
picked, ret = nl.RandomlyPickN(6)
if ret || len(picked)!=0 {
t.Errorf("can not randomly pick 6 nodes:", ret, picked)
}
picked, ret = nl.RandomlyPickN(5)
if !ret || len(picked) != 5 {
t.Errorf("need to randomly pick 5 nodes")
}
picked, ret = nl.RandomlyPickN(6)
if ret || len(picked) != 0 {
t.Errorf("can not randomly pick 6 nodes:", ret, picked)
}
}

View File

@ -19,13 +19,13 @@ func NewRack(id string) *Rack {
}
func (r *Rack) FindDataNode(ip string, port int) *DataNode {
for _, c := range r.Children() {
dn := c.(*DataNode)
if dn.MatchLocation(ip, port) {
return dn
}
}
return nil
for _, c := range r.Children() {
dn := c.(*DataNode)
if dn.MatchLocation(ip, port) {
return dn
}
}
return nil
}
func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVolumeCount int) *DataNode {
for _, c := range r.Children() {

View File

@ -78,7 +78,7 @@ func setup(topologyLayout string) *Topology {
}
//need to connect all nodes first before server adding volumes
topo := NewTopology("mynetwork","/etc/weed.conf","/tmp","test",234,5)
topo := NewTopology("mynetwork", "/etc/weed.conf", "/tmp", "test", 234, 5)
mTopology := data.(map[string]interface{})
for dcKey, dcValue := range mTopology {
dc := NewDataCenter(dcKey)
@ -94,7 +94,7 @@ func setup(topologyLayout string) *Topology {
rack.LinkChildNode(server)
for _, v := range serverMap["volumes"].([]interface{}) {
m := v.(map[string]interface{})
vi := storage.VolumeInfo{Id: storage.VolumeId(int64(m["id"].(float64))), Size: int64(m["size"].(float64)), Version:storage.CurrentVersion}
vi := storage.VolumeInfo{Id: storage.VolumeId(int64(m["id"].(float64))), Size: int64(m["size"].(float64)), Version: storage.CurrentVersion}
server.AddOrUpdateVolume(vi)
}
server.UpAdjustMaxVolumeCountDelta(int(serverMap["limit"].(float64)))
@ -119,9 +119,9 @@ func TestRemoveDataCenter(t *testing.T) {
func TestReserveOneVolume(t *testing.T) {
topo := setup(topologyLayout)
rand.Seed(time.Now().UnixNano())
rand.Seed(1)
rand.Seed(time.Now().UnixNano())
rand.Seed(1)
ret, node, vid := topo.RandomlyReserveOneVolume()
fmt.Println("assigned :", ret, ", node :", node,", volume id:", vid)
fmt.Println("assigned :", ret, ", node :", node, ", volume id:", vid)
}

View File

@ -101,10 +101,10 @@ type VacuumVolumeResult struct {
func vacuumVolume_Check(urlLocation string, vid storage.VolumeId, garbageThreshold string) (error, bool) {
values := make(url.Values)
values.Add("volume", vid.String())
values.Add("garbageThreshold", garbageThreshold)
values.Add("garbageThreshold", garbageThreshold)
jsonBlob, err := util.Post("http://"+urlLocation+"/admin/vacuum_volume_check", values)
if err != nil {
fmt.Println("parameters:",values)
fmt.Println("parameters:", values)
return err, false
}
var ret VacuumVolumeResult

View File

@ -52,14 +52,14 @@ func (t *Topology) UnRegisterDataNode(dn *DataNode) {
vl := t.GetVolumeLayout(v.RepType)
vl.SetVolumeUnavailable(dn, v.Id)
}
dn.UpAdjustVolumeCountDelta(-dn.GetVolumeCount())
dn.UpAdjustVolumeCountDelta(-dn.GetVolumeCount())
dn.UpAdjustActiveVolumeCountDelta(-dn.GetActiveVolumeCount())
dn.UpAdjustMaxVolumeCountDelta(-dn.GetMaxVolumeCount())
dn.Parent().UnlinkChildNode(dn.Id())
}
func (t *Topology) RegisterRecoveredDataNode(dn *DataNode) {
for _, v := range dn.volumes {
vl := t.GetVolumeLayout(v.RepType)
vl := t.GetVolumeLayout(v.RepType)
if vl.isWritable(&v) {
vl.SetVolumeAvailable(dn, v.Id)
}

View File

@ -1,7 +1,6 @@
package topology
import (
)
import ()
func (t *Topology) ToMap() interface{} {
m := make(map[string]interface{})

View File

@ -15,7 +15,7 @@ func (dnll *VolumeLocationList) Head() *DataNode {
}
func (dnll *VolumeLocationList) Length() int {
return len(dnll.list)
return len(dnll.list)
}
func (dnll *VolumeLocationList) Add(loc *DataNode) bool {
@ -29,13 +29,13 @@ func (dnll *VolumeLocationList) Add(loc *DataNode) bool {
}
func (dnll *VolumeLocationList) Remove(loc *DataNode) bool {
for i, dnl := range dnll.list {
if loc.Ip == dnl.Ip && loc.Port == dnl.Port {
dnll.list = append(dnll.list[:i],dnll.list[i+1:]...)
return true
}
}
return false
for i, dnl := range dnll.list {
if loc.Ip == dnl.Ip && loc.Port == dnl.Port {
dnll.list = append(dnll.list[:i], dnll.list[i+1:]...)
return true
}
}
return false
}
func (dnll *VolumeLocationList) Refresh(freshThreshHold int64) {

View File

@ -1,34 +1,33 @@
package util
func BytesToUint64(b []byte)(v uint64){
length := uint(len(b))
for i :=uint(0);i<length-1;i++ {
v += uint64(b[i])
v <<= 8
}
v+=uint64(b[length-1])
return
func BytesToUint64(b []byte) (v uint64) {
length := uint(len(b))
for i := uint(0); i < length-1; i++ {
v += uint64(b[i])
v <<= 8
}
v += uint64(b[length-1])
return
}
func BytesToUint32(b []byte)(v uint32){
length := uint(len(b))
for i :=uint(0);i<length-1;i++ {
v += uint32(b[i])
v <<= 8
}
v+=uint32(b[length-1])
return
func BytesToUint32(b []byte) (v uint32) {
length := uint(len(b))
for i := uint(0); i < length-1; i++ {
v += uint32(b[i])
v <<= 8
}
v += uint32(b[length-1])
return
}
func Uint64toBytes(b []byte, v uint64){
for i :=uint(0);i<8;i++ {
b[7-i] = byte(v>>(i*8))
}
func Uint64toBytes(b []byte, v uint64) {
for i := uint(0); i < 8; i++ {
b[7-i] = byte(v >> (i * 8))
}
}
func Uint32toBytes(b []byte, v uint32){
for i :=uint(0);i<4;i++ {
b[3-i] = byte(v>>(i*8))
}
func Uint32toBytes(b []byte, v uint32) {
for i := uint(0); i < 4; i++ {
b[3-i] = byte(v >> (i * 8))
}
}
func Uint8toBytes(b []byte, v uint8){
b[0] = byte(v)
func Uint8toBytes(b []byte, v uint8) {
b[0] = byte(v)
}

View File

@ -1,16 +1,16 @@
package util
import (
"strconv"
"strconv"
)
func ParseInt(text string, defaultValue int) int{
count, parseError := strconv.ParseUint(text,10,64)
if parseError!=nil {
if len(text)>0{
return 0
}
return defaultValue
}
return int(count)
func ParseInt(text string, defaultValue int) int {
count, parseError := strconv.ParseUint(text, 10, 64)
if parseError != nil {
if len(text) > 0 {
return 0
}
return defaultValue
}
return int(count)
}

View File

@ -16,7 +16,7 @@ func Post(url string, values url.Values) ([]byte, error) {
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Println("read post result from", url, err)
log.Println("read post result from", url, err)
return nil, err
}
return b, nil