2013-12-02 17:37:36 +08:00
|
|
|
package weed_server
|
|
|
|
|
|
|
|
import (
|
|
|
|
"math/rand"
|
2013-12-10 05:27:09 +08:00
|
|
|
"net/http"
|
2015-02-04 18:05:18 +08:00
|
|
|
"sync"
|
2013-12-02 17:37:36 +08:00
|
|
|
"time"
|
2014-10-27 02:34:55 +08:00
|
|
|
|
2015-04-17 03:18:06 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/go/glog"
|
|
|
|
"github.com/chrislusf/seaweedfs/go/security"
|
|
|
|
"github.com/chrislusf/seaweedfs/go/storage"
|
2013-12-02 17:37:36 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
type VolumeServer struct {
|
|
|
|
masterNode string
|
2015-02-04 18:05:18 +08:00
|
|
|
mnLock sync.RWMutex
|
2013-12-02 17:37:36 +08:00
|
|
|
pulseSeconds int
|
|
|
|
dataCenter string
|
|
|
|
rack string
|
|
|
|
store *storage.Store
|
2015-01-06 06:20:04 +08:00
|
|
|
guard *security.Guard
|
2014-05-15 16:08:00 +08:00
|
|
|
|
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-30 02:04:32 +08:00
|
|
|
needleMapKind storage.NeedleMapType
|
2014-05-15 16:08:00 +08:00
|
|
|
FixJpgOrientation bool
|
2015-08-04 05:43:15 +08:00
|
|
|
ReadRedirect bool
|
2013-12-02 17:37:36 +08:00
|
|
|
}
|
|
|
|
|
2015-03-09 16:10:01 +08:00
|
|
|
func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
|
|
|
|
port int, publicUrl string,
|
2015-01-19 09:03:38 +08:00
|
|
|
folders []string, maxCounts []int,
|
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-30 02:04:32 +08:00
|
|
|
needleMapKind storage.NeedleMapType,
|
2013-12-02 17:37:36 +08:00
|
|
|
masterNode string, pulseSeconds int,
|
|
|
|
dataCenter string, rack string,
|
2015-01-06 06:20:04 +08:00
|
|
|
whiteList []string,
|
2015-08-04 05:43:15 +08:00
|
|
|
fixJpgOrientation bool,
|
|
|
|
readRedirect bool) *VolumeServer {
|
2013-12-02 17:37:36 +08:00
|
|
|
vs := &VolumeServer{
|
2014-05-15 16:08:00 +08:00
|
|
|
pulseSeconds: pulseSeconds,
|
|
|
|
dataCenter: dataCenter,
|
|
|
|
rack: rack,
|
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-30 02:04:32 +08:00
|
|
|
needleMapKind: needleMapKind,
|
2014-05-15 16:08:00 +08:00
|
|
|
FixJpgOrientation: fixJpgOrientation,
|
2015-08-04 05:43:15 +08:00
|
|
|
ReadRedirect: readRedirect,
|
2013-12-02 17:37:36 +08:00
|
|
|
}
|
2015-02-04 18:05:18 +08:00
|
|
|
vs.SetMasterNode(masterNode)
|
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-30 02:04:32 +08:00
|
|
|
vs.store = storage.NewStore(port, ip, publicUrl, folders, maxCounts, vs.needleMapKind)
|
2013-12-02 17:37:36 +08:00
|
|
|
|
2015-01-06 06:20:04 +08:00
|
|
|
vs.guard = security.NewGuard(whiteList, "")
|
|
|
|
|
2015-03-20 01:39:22 +08:00
|
|
|
adminMux.HandleFunc("/ui/index.html", vs.uiStatusHandler)
|
2015-02-08 07:35:28 +08:00
|
|
|
adminMux.HandleFunc("/status", vs.guard.WhiteList(vs.statusHandler))
|
|
|
|
adminMux.HandleFunc("/admin/assign_volume", vs.guard.WhiteList(vs.assignVolumeHandler))
|
2015-05-18 04:19:39 +08:00
|
|
|
adminMux.HandleFunc("/admin/vacuum/check", vs.guard.WhiteList(vs.vacuumVolumeCheckHandler))
|
|
|
|
adminMux.HandleFunc("/admin/vacuum/compact", vs.guard.WhiteList(vs.vacuumVolumeCompactHandler))
|
|
|
|
adminMux.HandleFunc("/admin/vacuum/commit", vs.guard.WhiteList(vs.vacuumVolumeCommitHandler))
|
2015-02-08 07:35:28 +08:00
|
|
|
adminMux.HandleFunc("/admin/delete_collection", vs.guard.WhiteList(vs.deleteCollectionHandler))
|
2015-05-26 15:58:41 +08:00
|
|
|
adminMux.HandleFunc("/admin/sync/status", vs.guard.WhiteList(vs.getVolumeSyncStatusHandler))
|
|
|
|
adminMux.HandleFunc("/admin/sync/index", vs.guard.WhiteList(vs.getVolumeIndexContentHandler))
|
|
|
|
adminMux.HandleFunc("/admin/sync/data", vs.guard.WhiteList(vs.getVolumeDataContentHandler))
|
2015-02-08 07:35:28 +08:00
|
|
|
adminMux.HandleFunc("/stats/counter", vs.guard.WhiteList(statsCounterHandler))
|
|
|
|
adminMux.HandleFunc("/stats/memory", vs.guard.WhiteList(statsMemoryHandler))
|
|
|
|
adminMux.HandleFunc("/stats/disk", vs.guard.WhiteList(vs.statsDiskHandler))
|
2015-03-13 22:59:29 +08:00
|
|
|
adminMux.HandleFunc("/delete", vs.guard.WhiteList(vs.batchDeleteHandler))
|
|
|
|
adminMux.HandleFunc("/", vs.privateStoreHandler)
|
2015-02-26 15:59:07 +08:00
|
|
|
if publicMux != adminMux {
|
|
|
|
// separated admin and public port
|
2016-04-14 16:46:16 +08:00
|
|
|
publicMux.HandleFunc("/favicon.ico", vs.faviconHandler)
|
2015-03-13 22:59:29 +08:00
|
|
|
publicMux.HandleFunc("/", vs.publicReadOnlyHandler)
|
2015-02-26 15:59:07 +08:00
|
|
|
}
|
2013-12-02 17:37:36 +08:00
|
|
|
|
|
|
|
go func() {
|
|
|
|
connected := true
|
2015-02-04 18:05:18 +08:00
|
|
|
|
2015-05-15 12:46:59 +08:00
|
|
|
glog.V(0).Infof("Volume server bootstraps with master %s", vs.GetMasterNode())
|
2015-02-04 18:05:18 +08:00
|
|
|
vs.store.SetBootstrapMaster(vs.GetMasterNode())
|
2013-12-02 17:37:36 +08:00
|
|
|
vs.store.SetDataCenter(vs.dataCenter)
|
|
|
|
vs.store.SetRack(vs.rack)
|
|
|
|
for {
|
2015-05-15 12:46:59 +08:00
|
|
|
glog.V(4).Infof("Volume server sending to master %s", vs.GetMasterNode())
|
2015-03-28 07:34:58 +08:00
|
|
|
master, secretKey, err := vs.store.SendHeartbeatToMaster()
|
2013-12-02 17:37:36 +08:00
|
|
|
if err == nil {
|
|
|
|
if !connected {
|
|
|
|
connected = true
|
2015-02-04 18:05:18 +08:00
|
|
|
vs.SetMasterNode(master)
|
2015-02-08 07:35:28 +08:00
|
|
|
vs.guard.SecretKey = secretKey
|
|
|
|
glog.V(0).Infoln("Volume Server Connected with master at", master)
|
2013-12-02 17:37:36 +08:00
|
|
|
}
|
|
|
|
} else {
|
2015-05-24 01:15:16 +08:00
|
|
|
glog.V(1).Infof("Volume Server Failed to talk with master %s: %v", vs.masterNode, err)
|
2013-12-02 17:37:36 +08:00
|
|
|
if connected {
|
|
|
|
connected = false
|
|
|
|
}
|
|
|
|
}
|
2014-02-15 09:10:49 +08:00
|
|
|
if connected {
|
|
|
|
time.Sleep(time.Duration(float32(vs.pulseSeconds*1e3)*(1+rand.Float32())) * time.Millisecond)
|
|
|
|
} else {
|
2014-03-03 14:16:54 +08:00
|
|
|
time.Sleep(time.Duration(float32(vs.pulseSeconds*1e3)*0.25) * time.Millisecond)
|
2014-02-15 09:10:49 +08:00
|
|
|
}
|
2013-12-02 17:37:36 +08:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return vs
|
|
|
|
}
|
2014-05-13 15:03:10 +08:00
|
|
|
|
2015-02-04 18:05:18 +08:00
|
|
|
func (vs *VolumeServer) GetMasterNode() string {
|
|
|
|
vs.mnLock.RLock()
|
|
|
|
defer vs.mnLock.RUnlock()
|
|
|
|
return vs.masterNode
|
|
|
|
}
|
|
|
|
|
|
|
|
func (vs *VolumeServer) SetMasterNode(masterNode string) {
|
|
|
|
vs.mnLock.Lock()
|
|
|
|
defer vs.mnLock.Unlock()
|
|
|
|
vs.masterNode = masterNode
|
|
|
|
}
|
|
|
|
|
2014-05-13 15:03:10 +08:00
|
|
|
func (vs *VolumeServer) Shutdown() {
|
|
|
|
glog.V(0).Infoln("Shutting down volume server...")
|
|
|
|
vs.store.Close()
|
|
|
|
glog.V(0).Infoln("Shut down successfully!")
|
|
|
|
}
|
2015-02-08 07:35:28 +08:00
|
|
|
|
|
|
|
func (vs *VolumeServer) jwt(fileId string) security.EncodedJwt {
|
|
|
|
return security.GenJwt(vs.guard.SecretKey, fileId)
|
|
|
|
}
|