2011-12-16 22:51:26 +08:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
2012-06-29 15:53:47 +08:00
|
|
|
"encoding/json"
|
2014-02-15 09:10:49 +08:00
|
|
|
"errors"
|
2013-07-04 13:14:16 +08:00
|
|
|
"fmt"
|
2012-09-13 16:33:47 +08:00
|
|
|
"io/ioutil"
|
2014-02-15 09:10:49 +08:00
|
|
|
"math/rand"
|
2011-12-16 22:51:26 +08:00
|
|
|
"strconv"
|
2012-06-29 15:53:47 +08:00
|
|
|
"strings"
|
2014-10-27 02:34:55 +08:00
|
|
|
|
2015-04-17 03:18:06 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/go/glog"
|
|
|
|
"github.com/chrislusf/seaweedfs/go/operation"
|
|
|
|
"github.com/chrislusf/seaweedfs/go/security"
|
|
|
|
"github.com/chrislusf/seaweedfs/go/util"
|
2014-12-26 16:59:53 +08:00
|
|
|
"github.com/golang/protobuf/proto"
|
2011-12-16 22:51:26 +08:00
|
|
|
)
|
|
|
|
|
2014-09-21 03:38:59 +08:00
|
|
|
const (
|
|
|
|
MAX_TTL_VOLUME_REMOVAL_DELAY = 10 // 10 minutes
|
|
|
|
)
|
|
|
|
|
2013-07-14 02:38:01 +08:00
|
|
|
type DiskLocation struct {
|
2014-03-27 04:22:27 +08:00
|
|
|
Directory string
|
|
|
|
MaxVolumeCount int
|
2012-09-17 08:31:15 +08:00
|
|
|
volumes map[VolumeId]*Volume
|
2013-07-14 02:38:01 +08:00
|
|
|
}
|
2014-03-27 04:22:27 +08:00
|
|
|
|
|
|
|
func (mn *DiskLocation) reset() {
|
|
|
|
}
|
|
|
|
|
2014-02-15 09:10:49 +08:00
|
|
|
type MasterNodes struct {
|
|
|
|
nodes []string
|
|
|
|
lastNode int
|
|
|
|
}
|
|
|
|
|
2014-12-26 15:36:33 +08:00
|
|
|
func (mn *MasterNodes) String() string {
|
|
|
|
return fmt.Sprintf("nodes:%v, lastNode:%d", mn.nodes, mn.lastNode)
|
|
|
|
}
|
|
|
|
|
2014-02-15 09:10:49 +08:00
|
|
|
func NewMasterNodes(bootstrapNode string) (mn *MasterNodes) {
|
|
|
|
mn = &MasterNodes{nodes: []string{bootstrapNode}, lastNode: -1}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
func (mn *MasterNodes) reset() {
|
2015-05-22 23:12:51 +08:00
|
|
|
glog.V(4).Infof("Resetting master nodes: %v", mn)
|
2014-02-15 09:10:49 +08:00
|
|
|
if len(mn.nodes) > 1 && mn.lastNode > 0 {
|
2015-05-22 23:12:51 +08:00
|
|
|
glog.V(0).Infof("Reset master %s from: %v", mn.nodes[mn.lastNode], mn.nodes)
|
2014-02-15 09:10:49 +08:00
|
|
|
mn.lastNode = -mn.lastNode
|
|
|
|
}
|
|
|
|
}
|
|
|
|
func (mn *MasterNodes) findMaster() (string, error) {
|
|
|
|
if len(mn.nodes) == 0 {
|
|
|
|
return "", errors.New("No master node found!")
|
|
|
|
}
|
|
|
|
if mn.lastNode < 0 {
|
|
|
|
for _, m := range mn.nodes {
|
2015-05-15 12:46:59 +08:00
|
|
|
glog.V(4).Infof("Listing masters on %s", m)
|
2014-02-15 09:10:49 +08:00
|
|
|
if masters, e := operation.ListMasters(m); e == nil {
|
2014-04-17 14:43:27 +08:00
|
|
|
if len(masters) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
2015-05-22 23:12:51 +08:00
|
|
|
mn.nodes = append(masters, m)
|
2014-02-15 09:10:49 +08:00
|
|
|
mn.lastNode = rand.Intn(len(mn.nodes))
|
2015-05-22 23:12:51 +08:00
|
|
|
glog.V(2).Infof("current master nodes is %v", mn)
|
2014-02-15 09:10:49 +08:00
|
|
|
break
|
2015-05-15 12:46:59 +08:00
|
|
|
} else {
|
|
|
|
glog.V(4).Infof("Failed listing masters on %s: %v", m, e)
|
2014-02-15 09:10:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if mn.lastNode < 0 {
|
2015-01-13 18:46:56 +08:00
|
|
|
return "", errors.New("No master node available!")
|
2014-02-15 09:10:49 +08:00
|
|
|
}
|
|
|
|
return mn.nodes[mn.lastNode], nil
|
|
|
|
}
|
|
|
|
|
2014-12-26 15:36:33 +08:00
|
|
|
/*
|
|
|
|
* A VolumeServer contains one Store
|
|
|
|
*/
|
2013-07-14 02:38:01 +08:00
|
|
|
type Store struct {
|
|
|
|
Ip string
|
2015-01-19 09:03:38 +08:00
|
|
|
Port int
|
2013-07-14 02:38:01 +08:00
|
|
|
PublicUrl string
|
2014-03-27 04:22:27 +08:00
|
|
|
Locations []*DiskLocation
|
2013-06-20 09:10:38 +08:00
|
|
|
dataCenter string //optional informaton, overwriting master setting if exists
|
|
|
|
rack string //optional information, overwriting master setting if exists
|
2012-12-23 08:26:02 +08:00
|
|
|
connected bool
|
|
|
|
volumeSizeLimit uint64 //read from the master
|
2014-02-15 09:10:49 +08:00
|
|
|
masterNodes *MasterNodes
|
2011-12-16 22:51:26 +08:00
|
|
|
}
|
|
|
|
|
2014-12-26 15:36:33 +08:00
|
|
|
func (s *Store) String() (str string) {
|
|
|
|
str = fmt.Sprintf("Ip:%s, Port:%d, PublicUrl:%s, dataCenter:%s, rack:%s, connected:%v, volumeSizeLimit:%d, masterNodes:%s", s.Ip, s.Port, s.PublicUrl, s.dataCenter, s.rack, s.connected, s.volumeSizeLimit, s.masterNodes)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-30 02:04:32 +08:00
|
|
|
func NewStore(port int, ip, publicUrl string, dirnames []string, maxVolumeCounts []int, needleMapKind NeedleMapType) (s *Store) {
|
2015-03-09 16:10:01 +08:00
|
|
|
s = &Store{Port: port, Ip: ip, PublicUrl: publicUrl}
|
2014-03-27 04:22:27 +08:00
|
|
|
s.Locations = make([]*DiskLocation, 0)
|
2013-07-14 02:38:01 +08:00
|
|
|
for i := 0; i < len(dirnames); i++ {
|
2014-03-27 04:22:27 +08:00
|
|
|
location := &DiskLocation{Directory: dirnames[i], MaxVolumeCount: maxVolumeCounts[i]}
|
2013-07-14 02:38:01 +08:00
|
|
|
location.volumes = make(map[VolumeId]*Volume)
|
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-30 02:04:32 +08:00
|
|
|
location.loadExistingVolumes(needleMapKind)
|
2014-03-27 04:22:27 +08:00
|
|
|
s.Locations = append(s.Locations, location)
|
2013-07-14 02:38:01 +08:00
|
|
|
}
|
2011-12-29 09:46:38 +08:00
|
|
|
return
|
|
|
|
}
|
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-30 02:04:32 +08:00
|
|
|
func (s *Store) AddVolume(volumeListString string, collection string, needleMapKind NeedleMapType, replicaPlacement string, ttlString string) error {
|
2014-03-03 14:16:54 +08:00
|
|
|
rt, e := NewReplicaPlacementFromString(replicaPlacement)
|
2012-09-26 16:55:56 +08:00
|
|
|
if e != nil {
|
|
|
|
return e
|
|
|
|
}
|
2014-09-21 03:38:59 +08:00
|
|
|
ttl, e := ReadTTL(ttlString)
|
|
|
|
if e != nil {
|
|
|
|
return e
|
|
|
|
}
|
2012-06-29 15:53:47 +08:00
|
|
|
for _, range_string := range strings.Split(volumeListString, ",") {
|
|
|
|
if strings.Index(range_string, "-") < 0 {
|
|
|
|
id_string := range_string
|
2012-11-07 17:51:43 +08:00
|
|
|
id, err := NewVolumeId(id_string)
|
2012-06-29 15:53:47 +08:00
|
|
|
if err != nil {
|
2013-07-04 13:14:16 +08:00
|
|
|
return fmt.Errorf("Volume Id %s is not a valid unsigned integer!", id_string)
|
2012-06-29 15:53:47 +08:00
|
|
|
}
|
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-30 02:04:32 +08:00
|
|
|
e = s.addVolume(VolumeId(id), collection, needleMapKind, rt, ttl)
|
2012-06-29 15:53:47 +08:00
|
|
|
} else {
|
|
|
|
pair := strings.Split(range_string, "-")
|
|
|
|
start, start_err := strconv.ParseUint(pair[0], 10, 64)
|
|
|
|
if start_err != nil {
|
2013-07-04 13:14:16 +08:00
|
|
|
return fmt.Errorf("Volume Start Id %s is not a valid unsigned integer!", pair[0])
|
2012-06-29 15:53:47 +08:00
|
|
|
}
|
|
|
|
end, end_err := strconv.ParseUint(pair[1], 10, 64)
|
|
|
|
if end_err != nil {
|
2013-07-14 02:38:01 +08:00
|
|
|
return fmt.Errorf("Volume End Id %s is not a valid unsigned integer!", pair[1])
|
2012-06-29 15:53:47 +08:00
|
|
|
}
|
|
|
|
for id := start; id <= end; id++ {
|
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-30 02:04:32 +08:00
|
|
|
if err := s.addVolume(VolumeId(id), collection, needleMapKind, rt, ttl); err != nil {
|
2012-09-13 16:33:47 +08:00
|
|
|
e = err
|
2012-09-13 15:04:56 +08:00
|
|
|
}
|
2012-06-29 15:53:47 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-09-13 15:04:56 +08:00
|
|
|
return e
|
2011-12-29 09:46:38 +08:00
|
|
|
}
|
2014-03-11 02:43:54 +08:00
|
|
|
func (s *Store) DeleteCollection(collection string) (e error) {
|
2014-03-27 04:22:27 +08:00
|
|
|
for _, location := range s.Locations {
|
2014-03-11 02:43:54 +08:00
|
|
|
for k, v := range location.volumes {
|
|
|
|
if v.Collection == collection {
|
|
|
|
e = v.Destroy()
|
|
|
|
if e != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
delete(location.volumes, k)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2014-09-21 03:38:59 +08:00
|
|
|
func (s *Store) DeleteVolume(volumes map[VolumeId]*Volume, v *Volume) (e error) {
|
|
|
|
e = v.Destroy()
|
|
|
|
if e != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
delete(volumes, v.Id)
|
|
|
|
return
|
|
|
|
}
|
2013-07-14 02:38:01 +08:00
|
|
|
func (s *Store) findVolume(vid VolumeId) *Volume {
|
2014-03-27 04:22:27 +08:00
|
|
|
for _, location := range s.Locations {
|
2013-07-14 02:38:01 +08:00
|
|
|
if v, found := location.volumes[vid]; found {
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (s *Store) findFreeLocation() (ret *DiskLocation) {
|
|
|
|
max := 0
|
2014-03-27 04:22:27 +08:00
|
|
|
for _, location := range s.Locations {
|
|
|
|
currentFreeCount := location.MaxVolumeCount - len(location.volumes)
|
2013-07-14 02:38:01 +08:00
|
|
|
if currentFreeCount > max {
|
|
|
|
max = currentFreeCount
|
|
|
|
ret = location
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-30 02:04:32 +08:00
|
|
|
func (s *Store) addVolume(vid VolumeId, collection string, needleMapKind NeedleMapType, replicaPlacement *ReplicaPlacement, ttl *TTL) error {
|
2013-07-14 02:38:01 +08:00
|
|
|
if s.findVolume(vid) != nil {
|
2014-04-17 15:16:44 +08:00
|
|
|
return fmt.Errorf("Volume Id %d already exists!", vid)
|
2012-06-29 15:53:47 +08:00
|
|
|
}
|
2013-07-14 02:38:01 +08:00
|
|
|
if location := s.findFreeLocation(); location != nil {
|
2014-09-21 03:38:59 +08:00
|
|
|
glog.V(0).Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v",
|
|
|
|
location.Directory, vid, collection, replicaPlacement, ttl)
|
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-30 02:04:32 +08:00
|
|
|
if volume, err := NewVolume(location.Directory, collection, vid, needleMapKind, replicaPlacement, ttl); err == nil {
|
2013-09-02 14:58:21 +08:00
|
|
|
location.volumes[vid] = volume
|
|
|
|
return nil
|
2013-07-20 11:38:00 +08:00
|
|
|
} else {
|
2013-09-02 14:58:21 +08:00
|
|
|
return err
|
2013-07-20 11:38:00 +08:00
|
|
|
}
|
2013-07-14 02:38:01 +08:00
|
|
|
}
|
|
|
|
return fmt.Errorf("No more free space left")
|
2011-12-16 22:51:26 +08:00
|
|
|
}
|
2012-11-07 17:51:43 +08:00
|
|
|
|
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-30 02:04:32 +08:00
|
|
|
func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapType) {
|
2014-03-27 04:22:27 +08:00
|
|
|
if dirs, err := ioutil.ReadDir(l.Directory); err == nil {
|
2012-09-13 16:33:47 +08:00
|
|
|
for _, dir := range dirs {
|
|
|
|
name := dir.Name()
|
|
|
|
if !dir.IsDir() && strings.HasSuffix(name, ".dat") {
|
2013-11-12 18:21:22 +08:00
|
|
|
collection := ""
|
2012-09-13 16:33:47 +08:00
|
|
|
base := name[:len(name)-len(".dat")]
|
2013-11-12 18:21:22 +08:00
|
|
|
i := strings.Index(base, "_")
|
|
|
|
if i > 0 {
|
|
|
|
collection, base = base[0:i], base[i+1:]
|
|
|
|
}
|
2012-09-13 16:33:47 +08:00
|
|
|
if vid, err := NewVolumeId(base); err == nil {
|
2013-07-14 02:38:01 +08:00
|
|
|
if l.volumes[vid] == nil {
|
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-30 02:04:32 +08:00
|
|
|
if v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil); e == nil {
|
2013-07-14 02:38:01 +08:00
|
|
|
l.volumes[vid] = v
|
2014-09-21 03:38:59 +08:00
|
|
|
glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s", l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), v.Size(), v.Ttl.String())
|
2013-01-17 16:56:56 +08:00
|
|
|
}
|
2012-09-13 16:33:47 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-03-27 04:22:27 +08:00
|
|
|
glog.V(0).Infoln("Store started on dir:", l.Directory, "with", len(l.volumes), "volumes", "max", l.MaxVolumeCount)
|
2012-09-13 16:33:47 +08:00
|
|
|
}
|
2012-09-17 08:31:15 +08:00
|
|
|
func (s *Store) Status() []*VolumeInfo {
|
|
|
|
var stats []*VolumeInfo
|
2014-03-27 04:22:27 +08:00
|
|
|
for _, location := range s.Locations {
|
2013-07-14 02:38:01 +08:00
|
|
|
for k, v := range location.volumes {
|
2015-04-21 01:16:23 +08:00
|
|
|
s := &VolumeInfo{
|
|
|
|
Id: VolumeId(k),
|
|
|
|
Size: v.ContentSize(),
|
2013-11-12 18:21:22 +08:00
|
|
|
Collection: v.Collection,
|
2014-03-03 14:16:54 +08:00
|
|
|
ReplicaPlacement: v.ReplicaPlacement,
|
2013-11-12 18:21:22 +08:00
|
|
|
Version: v.Version(),
|
2013-07-14 02:38:01 +08:00
|
|
|
FileCount: v.nm.FileCount(),
|
|
|
|
DeleteCount: v.nm.DeletedCount(),
|
|
|
|
DeletedByteCount: v.nm.DeletedSize(),
|
2015-03-23 03:50:04 +08:00
|
|
|
ReadOnly: v.readOnly,
|
|
|
|
Ttl: v.Ttl}
|
2013-07-14 02:38:01 +08:00
|
|
|
stats = append(stats, s)
|
|
|
|
}
|
2011-12-26 17:43:17 +08:00
|
|
|
}
|
2015-04-21 01:16:23 +08:00
|
|
|
sortVolumeInfos(stats)
|
2011-12-26 17:43:17 +08:00
|
|
|
return stats
|
2011-12-22 12:04:47 +08:00
|
|
|
}
|
2012-12-04 14:54:08 +08:00
|
|
|
|
2013-06-20 09:10:38 +08:00
|
|
|
func (s *Store) SetDataCenter(dataCenter string) {
|
|
|
|
s.dataCenter = dataCenter
|
|
|
|
}
|
|
|
|
func (s *Store) SetRack(rack string) {
|
|
|
|
s.rack = rack
|
|
|
|
}
|
2014-02-15 09:10:49 +08:00
|
|
|
|
|
|
|
func (s *Store) SetBootstrapMaster(bootstrapMaster string) {
|
|
|
|
s.masterNodes = NewMasterNodes(bootstrapMaster)
|
|
|
|
}
|
2015-03-28 07:34:58 +08:00
|
|
|
func (s *Store) SendHeartbeatToMaster() (masterNode string, secretKey security.Secret, e error) {
|
2014-04-21 14:28:05 +08:00
|
|
|
masterNode, e = s.masterNodes.findMaster()
|
2014-02-15 09:10:49 +08:00
|
|
|
if e != nil {
|
2014-04-21 14:28:05 +08:00
|
|
|
return
|
2014-02-15 09:10:49 +08:00
|
|
|
}
|
2014-04-21 17:11:10 +08:00
|
|
|
var volumeMessages []*operation.VolumeInformationMessage
|
2013-07-14 02:38:01 +08:00
|
|
|
maxVolumeCount := 0
|
2014-04-17 14:43:27 +08:00
|
|
|
var maxFileKey uint64
|
2014-03-27 04:22:27 +08:00
|
|
|
for _, location := range s.Locations {
|
|
|
|
maxVolumeCount = maxVolumeCount + location.MaxVolumeCount
|
2013-07-14 02:38:01 +08:00
|
|
|
for k, v := range location.volumes {
|
2014-04-17 14:43:27 +08:00
|
|
|
if maxFileKey < v.nm.MaxFileKey() {
|
|
|
|
maxFileKey = v.nm.MaxFileKey()
|
|
|
|
}
|
2014-09-21 03:38:59 +08:00
|
|
|
if !v.expired(s.volumeSizeLimit) {
|
|
|
|
volumeMessage := &operation.VolumeInformationMessage{
|
|
|
|
Id: proto.Uint32(uint32(k)),
|
|
|
|
Size: proto.Uint64(uint64(v.Size())),
|
|
|
|
Collection: proto.String(v.Collection),
|
|
|
|
FileCount: proto.Uint64(uint64(v.nm.FileCount())),
|
|
|
|
DeleteCount: proto.Uint64(uint64(v.nm.DeletedCount())),
|
|
|
|
DeletedByteCount: proto.Uint64(v.nm.DeletedSize()),
|
|
|
|
ReadOnly: proto.Bool(v.readOnly),
|
|
|
|
ReplicaPlacement: proto.Uint32(uint32(v.ReplicaPlacement.Byte())),
|
|
|
|
Version: proto.Uint32(uint32(v.Version())),
|
|
|
|
Ttl: proto.Uint32(v.Ttl.ToUint32()),
|
|
|
|
}
|
|
|
|
volumeMessages = append(volumeMessages, volumeMessage)
|
|
|
|
} else {
|
|
|
|
if v.exiredLongEnough(MAX_TTL_VOLUME_REMOVAL_DELAY) {
|
|
|
|
s.DeleteVolume(location.volumes, v)
|
|
|
|
glog.V(0).Infoln("volume", v.Id, "is deleted.")
|
|
|
|
} else {
|
|
|
|
glog.V(0).Infoln("volume", v.Id, "is expired.")
|
|
|
|
}
|
|
|
|
}
|
2013-07-14 02:38:01 +08:00
|
|
|
}
|
2011-12-16 22:51:26 +08:00
|
|
|
}
|
2014-04-21 17:11:10 +08:00
|
|
|
|
|
|
|
joinMessage := &operation.JoinMessage{
|
|
|
|
IsInit: proto.Bool(!s.connected),
|
|
|
|
Ip: proto.String(s.Ip),
|
|
|
|
Port: proto.Uint32(uint32(s.Port)),
|
|
|
|
PublicUrl: proto.String(s.PublicUrl),
|
|
|
|
MaxVolumeCount: proto.Uint32(uint32(maxVolumeCount)),
|
|
|
|
MaxFileKey: proto.Uint64(maxFileKey),
|
|
|
|
DataCenter: proto.String(s.dataCenter),
|
|
|
|
Rack: proto.String(s.rack),
|
|
|
|
Volumes: volumeMessages,
|
2012-12-23 08:26:02 +08:00
|
|
|
}
|
2014-04-21 17:11:10 +08:00
|
|
|
|
|
|
|
data, err := proto.Marshal(joinMessage)
|
|
|
|
if err != nil {
|
2015-02-08 07:35:28 +08:00
|
|
|
return "", "", err
|
2014-04-21 17:11:10 +08:00
|
|
|
}
|
|
|
|
|
2015-03-28 07:34:58 +08:00
|
|
|
joinUrl := "http://" + masterNode + "/dir/join"
|
2015-05-15 12:46:59 +08:00
|
|
|
glog.V(4).Infof("Connecting to %s ...", joinUrl)
|
2015-03-28 07:34:58 +08:00
|
|
|
|
|
|
|
jsonBlob, err := util.PostBytes(joinUrl, data)
|
2012-12-04 14:54:08 +08:00
|
|
|
if err != nil {
|
2014-02-15 09:10:49 +08:00
|
|
|
s.masterNodes.reset()
|
2015-02-08 07:35:28 +08:00
|
|
|
return "", "", err
|
2012-12-04 14:54:08 +08:00
|
|
|
}
|
2014-04-17 08:29:58 +08:00
|
|
|
var ret operation.JoinResult
|
2012-12-04 14:54:08 +08:00
|
|
|
if err := json.Unmarshal(jsonBlob, &ret); err != nil {
|
2015-03-28 07:34:58 +08:00
|
|
|
glog.V(0).Infof("Failed to join %s with response: %s", joinUrl, string(jsonBlob))
|
2015-05-22 23:12:51 +08:00
|
|
|
s.masterNodes.reset()
|
2015-02-08 07:35:28 +08:00
|
|
|
return masterNode, "", err
|
2012-12-04 14:54:08 +08:00
|
|
|
}
|
2014-04-17 08:29:58 +08:00
|
|
|
if ret.Error != "" {
|
2015-05-22 23:12:51 +08:00
|
|
|
s.masterNodes.reset()
|
2015-02-08 07:35:28 +08:00
|
|
|
return masterNode, "", errors.New(ret.Error)
|
2014-04-17 08:29:58 +08:00
|
|
|
}
|
2012-12-04 14:54:08 +08:00
|
|
|
s.volumeSizeLimit = ret.VolumeSizeLimit
|
2015-02-08 07:35:28 +08:00
|
|
|
secretKey = security.Secret(ret.SecretKey)
|
2012-12-23 08:26:02 +08:00
|
|
|
s.connected = true
|
2014-04-21 14:28:05 +08:00
|
|
|
return
|
2011-12-16 22:51:26 +08:00
|
|
|
}
|
|
|
|
func (s *Store) Close() {
|
2014-03-27 04:22:27 +08:00
|
|
|
for _, location := range s.Locations {
|
2013-07-14 02:38:01 +08:00
|
|
|
for _, v := range location.volumes {
|
|
|
|
v.Close()
|
|
|
|
}
|
2011-12-16 22:51:26 +08:00
|
|
|
}
|
|
|
|
}
|
2013-01-20 19:40:04 +08:00
|
|
|
func (s *Store) Write(i VolumeId, n *Needle) (size uint32, err error) {
|
2013-07-14 02:38:01 +08:00
|
|
|
if v := s.findVolume(i); v != nil {
|
2013-04-15 10:34:37 +08:00
|
|
|
if v.readOnly {
|
2015-03-10 15:20:31 +08:00
|
|
|
err = fmt.Errorf("Volume %d is read only", i)
|
2013-04-18 15:23:14 +08:00
|
|
|
return
|
2015-03-10 15:20:31 +08:00
|
|
|
}
|
|
|
|
if MaxPossibleVolumeSize >= v.ContentSize()+uint64(size) {
|
|
|
|
size, err = v.write(n)
|
2013-04-15 10:34:37 +08:00
|
|
|
} else {
|
2015-03-10 15:20:31 +08:00
|
|
|
err = fmt.Errorf("Volume Size Limit %d Exceeded! Current size is %d", s.volumeSizeLimit, v.ContentSize())
|
|
|
|
}
|
|
|
|
if s.volumeSizeLimit < v.ContentSize()+3*uint64(size) {
|
|
|
|
glog.V(0).Infoln("volume", i, "size", v.ContentSize(), "will exceed limit", s.volumeSizeLimit)
|
2015-03-28 07:34:58 +08:00
|
|
|
if _, _, e := s.SendHeartbeatToMaster(); e != nil {
|
2015-03-10 15:20:31 +08:00
|
|
|
glog.V(0).Infoln("error when reporting size:", e)
|
2013-02-27 14:54:22 +08:00
|
|
|
}
|
2012-12-04 14:54:08 +08:00
|
|
|
}
|
2013-01-20 19:40:04 +08:00
|
|
|
return
|
2012-09-11 08:08:52 +08:00
|
|
|
}
|
2013-08-09 14:57:22 +08:00
|
|
|
glog.V(0).Infoln("volume", i, "not found!")
|
2014-04-17 15:16:44 +08:00
|
|
|
err = fmt.Errorf("Volume %d not found!", i)
|
2013-01-20 19:40:04 +08:00
|
|
|
return
|
2012-01-19 08:49:41 +08:00
|
|
|
}
|
2013-01-20 19:40:04 +08:00
|
|
|
func (s *Store) Delete(i VolumeId, n *Needle) (uint32, error) {
|
2013-07-14 02:38:01 +08:00
|
|
|
if v := s.findVolume(i); v != nil && !v.readOnly {
|
2012-09-11 08:08:52 +08:00
|
|
|
return v.delete(n)
|
|
|
|
}
|
2013-01-20 19:40:04 +08:00
|
|
|
return 0, nil
|
2011-12-16 22:51:26 +08:00
|
|
|
}
|
2012-08-24 13:46:54 +08:00
|
|
|
func (s *Store) Read(i VolumeId, n *Needle) (int, error) {
|
2013-07-14 02:38:01 +08:00
|
|
|
if v := s.findVolume(i); v != nil {
|
2012-09-11 08:08:52 +08:00
|
|
|
return v.read(n)
|
|
|
|
}
|
2014-04-14 16:00:09 +08:00
|
|
|
return 0, fmt.Errorf("Volume %v not found!", i)
|
2012-09-11 08:08:52 +08:00
|
|
|
}
|
2012-09-21 08:58:29 +08:00
|
|
|
func (s *Store) GetVolume(i VolumeId) *Volume {
|
2013-07-14 02:38:01 +08:00
|
|
|
return s.findVolume(i)
|
2012-09-21 08:58:29 +08:00
|
|
|
}
|
2012-09-11 08:08:52 +08:00
|
|
|
|
2012-09-20 17:47:32 +08:00
|
|
|
func (s *Store) HasVolume(i VolumeId) bool {
|
2013-07-14 02:38:01 +08:00
|
|
|
v := s.findVolume(i)
|
|
|
|
return v != nil
|
2012-09-20 17:47:32 +08:00
|
|
|
}
|