2016-06-03 09:09:14 +08:00
package command
2012-08-07 16:29:22 +08:00
import (
2019-11-17 11:40:36 +08:00
"fmt"
2018-10-11 15:08:13 +08:00
"net/http"
2020-06-10 17:52:24 +08:00
httppprof "net/http/pprof"
2012-09-17 08:31:15 +08:00
"os"
2012-10-07 01:50:52 +08:00
"runtime"
2018-10-11 15:08:13 +08:00
"runtime/pprof"
2012-08-24 13:46:54 +08:00
"strconv"
"strings"
2018-10-11 15:08:13 +08:00
"time"
2019-06-05 16:30:24 +08:00
"github.com/spf13/viper"
2019-11-17 11:40:36 +08:00
"google.golang.org/grpc"
2019-06-05 16:30:24 +08:00
2020-06-11 04:10:10 +08:00
"github.com/chrislusf/seaweedfs/weed/util/grace"
2020-03-04 16:39:47 +08:00
"github.com/chrislusf/seaweedfs/weed/pb"
2020-01-03 16:37:24 +08:00
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util/httpdown"
"google.golang.org/grpc/reflection"
2016-06-03 09:09:14 +08:00
"github.com/chrislusf/seaweedfs/weed/glog"
2018-10-11 16:16:33 +08:00
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
2018-10-11 15:04:31 +08:00
"github.com/chrislusf/seaweedfs/weed/server"
2020-09-25 01:21:23 +08:00
stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
2018-10-11 15:08:13 +08:00
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/util"
2012-08-07 16:29:22 +08:00
)
2015-01-13 16:27:51 +08:00
var (
v VolumeServerOptions
)
type VolumeServerOptions struct {
2020-06-11 04:10:10 +08:00
port * int
publicPort * int
folders [ ] string
folderMaxLimits [ ] int
2020-11-27 06:59:03 +08:00
idxFolder * string
2020-06-11 04:10:10 +08:00
ip * string
publicUrl * string
bindIp * string
masters * string
2015-01-13 16:27:51 +08:00
idleConnectionTimeout * int
dataCenter * string
rack * string
whiteList [ ] string
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-30 02:04:32 +08:00
indexType * string
2020-12-17 01:14:05 +08:00
diskType * string
2020-07-10 10:08:36 +08:00
fixJpgOrientation * bool
2015-08-04 05:43:15 +08:00
readRedirect * bool
2017-06-22 16:33:58 +08:00
cpuProfile * string
memProfile * string
2019-05-04 08:22:39 +08:00
compactionMBPerSecond * int
2020-01-03 16:37:24 +08:00
fileSizeLimitMB * int
2020-07-04 07:41:30 +08:00
minFreeSpacePercents [ ] float32
2020-06-10 17:52:24 +08:00
pprof * bool
2020-08-26 19:16:11 +08:00
preStopSeconds * int
2020-09-24 20:45:39 +08:00
metricsHttpPort * int
2020-06-11 04:10:10 +08:00
// pulseSeconds *int
2015-01-13 16:27:51 +08:00
}
2012-08-07 16:29:22 +08:00
func init ( ) {
2012-08-24 13:46:54 +08:00
cmdVolume . Run = runVolume // break init cycle
2015-01-13 16:27:51 +08:00
v . port = cmdVolume . Flag . Int ( "port" , 8080 , "http listen port" )
2015-03-09 16:10:01 +08:00
v . publicPort = cmdVolume . Flag . Int ( "port.public" , 0 , "port opened to public" )
2020-04-19 06:20:20 +08:00
v . ip = cmdVolume . Flag . String ( "ip" , util . DetectedHostAddress ( ) , "ip or server name" )
2015-02-03 07:51:25 +08:00
v . publicUrl = cmdVolume . Flag . String ( "publicUrl" , "" , "Publicly accessible address" )
2015-01-13 16:27:51 +08:00
v . bindIp = cmdVolume . Flag . String ( "ip.bind" , "0.0.0.0" , "ip address to bind to" )
2018-06-01 15:39:39 +08:00
v . masters = cmdVolume . Flag . String ( "mserver" , "localhost:9333" , "comma-separated master servers" )
2020-09-12 19:05:33 +08:00
v . preStopSeconds = cmdVolume . Flag . Int ( "preStopSeconds" , 10 , "number of seconds between stop send heartbeats and stop volume server" )
2020-06-05 01:52:01 +08:00
// v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting")
2017-01-10 17:01:12 +08:00
v . idleConnectionTimeout = cmdVolume . Flag . Int ( "idleTimeout" , 30 , "connection idle seconds" )
2015-01-13 16:27:51 +08:00
v . dataCenter = cmdVolume . Flag . String ( "dataCenter" , "" , "current volume server's data center name" )
v . rack = cmdVolume . Flag . String ( "rack" , "" , "current volume server's rack name" )
2019-04-10 00:42:06 +08:00
v . indexType = cmdVolume . Flag . String ( "index" , "memory" , "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance." )
2020-12-14 04:06:21 +08:00
v . diskType = cmdVolume . Flag . String ( "disk" , "" , "[hdd|ssd] choose between hard drive or solid state drive" )
2020-07-10 10:08:36 +08:00
v . fixJpgOrientation = cmdVolume . Flag . Bool ( "images.fix.orientation" , false , "Adjust jpg orientation when uploading." )
2015-08-04 05:43:15 +08:00
v . readRedirect = cmdVolume . Flag . Bool ( "read.redirect" , true , "Redirect moved or non-local volumes." )
2017-06-22 16:33:58 +08:00
v . cpuProfile = cmdVolume . Flag . String ( "cpuprofile" , "" , "cpu profile output file" )
v . memProfile = cmdVolume . Flag . String ( "memprofile" , "" , "memory profile output file" )
2019-05-07 05:12:19 +08:00
v . compactionMBPerSecond = cmdVolume . Flag . Int ( "compactionMBps" , 0 , "limit background compaction or copying speed in mega bytes per second" )
2020-10-30 06:46:26 +08:00
v . fileSizeLimitMB = cmdVolume . Flag . Int ( "fileSizeLimitMB" , 256 , "limit file size to avoid out of memory" )
2020-06-10 17:52:24 +08:00
v . pprof = cmdVolume . Flag . Bool ( "pprof" , false , "enable pprof http handlers. precludes --memprofile and --cpuprofile" )
2020-09-24 20:45:39 +08:00
v . metricsHttpPort = cmdVolume . Flag . Int ( "metricsPort" , 0 , "Prometheus metrics listen port" )
2020-11-27 19:17:10 +08:00
v . idxFolder = cmdVolume . Flag . String ( "dir.idx" , "" , "directory to store .idx files" )
2012-08-07 16:29:22 +08:00
}
var cmdVolume = & Command {
2012-09-26 16:55:56 +08:00
UsageLine : "volume -port=8080 -dir=/tmp -max=5 -ip=server_name -mserver=localhost:9333" ,
2012-08-24 13:46:54 +08:00
Short : "start a volume server" ,
Long : ` start a volume server to provide storage spaces
2012-08-07 16:29:22 +08:00
` ,
}
var (
2013-08-29 01:39:15 +08:00
volumeFolders = cmdVolume . Flag . String ( "dir" , os . TempDir ( ) , "directories to store data files. dir[,dir]..." )
2020-06-29 05:34:21 +08:00
maxVolumeCounts = cmdVolume . Flag . String ( "max" , "8" , "maximum numbers of volumes, count[,count]... If set to zero, the limit will be auto configured." )
2013-08-14 00:22:06 +08:00
volumeWhiteListOption = cmdVolume . Flag . String ( "whiteList" , "" , "comma separated Ip addresses having write permission. No limit if empty." )
2020-07-04 07:42:02 +08:00
minFreeSpacePercent = cmdVolume . Flag . String ( "minFreeSpacePercent" , "1" , "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly." )
2012-08-07 16:29:22 +08:00
)
func runVolume ( cmd * Command , args [ ] string ) bool {
2019-02-19 04:11:52 +08:00
2019-06-05 16:30:24 +08:00
util . LoadConfiguration ( "security" , false )
2019-02-19 04:11:52 +08:00
2019-06-23 18:08:27 +08:00
runtime . GOMAXPROCS ( runtime . NumCPU ( ) )
2020-06-10 17:52:24 +08:00
// If --pprof is set we assume the caller wants to be able to collect
// cpu and memory profiles via go tool pprof
if ! * v . pprof {
grace . SetupProfiling ( * v . cpuProfile , * v . memProfile )
}
2015-01-13 16:27:51 +08:00
2020-09-25 01:21:23 +08:00
go stats_collect . StartMetricsServer ( * v . metricsHttpPort )
2020-06-06 00:00:38 +08:00
v . startVolumeServer ( * volumeFolders , * maxVolumeCounts , * volumeWhiteListOption , * minFreeSpacePercent )
2018-10-11 15:04:31 +08:00
return true
}
2020-06-06 00:00:38 +08:00
func ( v VolumeServerOptions ) startVolumeServer ( volumeFolders , maxVolumeCounts , volumeWhiteListOption , minFreeSpacePercent string ) {
2018-10-11 15:04:31 +08:00
2019-11-17 11:40:36 +08:00
// Set multiple folders and each folder's max volume count limit'
2018-10-11 15:04:31 +08:00
v . folders = strings . Split ( volumeFolders , "," )
2020-06-21 23:38:00 +08:00
for _ , folder := range v . folders {
2020-07-17 13:50:14 +08:00
if err := util . TestFolderWritable ( util . ResolvePath ( folder ) ) ; err != nil {
2020-06-21 23:38:00 +08:00
glog . Fatalf ( "Check Data Folder(-dir) Writable %s : %s" , folder , err )
}
}
// set max
2018-10-11 15:04:31 +08:00
maxCountStrings := strings . Split ( maxVolumeCounts , "," )
2013-07-14 02:38:01 +08:00
for _ , maxString := range maxCountStrings {
if max , e := strconv . Atoi ( maxString ) ; e == nil {
2015-01-13 16:27:51 +08:00
v . folderMaxLimits = append ( v . folderMaxLimits , max )
2013-07-14 02:38:01 +08:00
} else {
2014-04-17 15:16:44 +08:00
glog . Fatalf ( "The max specified in -max not a valid number %s" , maxString )
2013-07-14 02:38:01 +08:00
}
2012-09-17 08:31:15 +08:00
}
2020-10-07 00:05:30 +08:00
if len ( v . folderMaxLimits ) == 1 && len ( v . folders ) > 1 {
for i := 0 ; i < len ( v . folders ) - 1 ; i ++ {
v . folderMaxLimits = append ( v . folderMaxLimits , v . folderMaxLimits [ 0 ] )
}
}
2015-01-13 16:27:51 +08:00
if len ( v . folders ) != len ( v . folderMaxLimits ) {
glog . Fatalf ( "%d directories by -dir, but only %d max is set by -max" , len ( v . folders ) , len ( v . folderMaxLimits ) )
2013-07-14 02:38:01 +08:00
}
2020-06-21 23:38:00 +08:00
// set minFreeSpacePercent
2020-06-06 00:00:38 +08:00
minFreeSpacePercentStrings := strings . Split ( minFreeSpacePercent , "," )
for _ , freeString := range minFreeSpacePercentStrings {
2020-06-05 23:18:15 +08:00
if value , e := strconv . ParseFloat ( freeString , 32 ) ; e == nil {
2020-07-04 07:41:30 +08:00
v . minFreeSpacePercents = append ( v . minFreeSpacePercents , float32 ( value ) )
2020-06-05 23:18:15 +08:00
} else {
2020-06-06 00:00:38 +08:00
glog . Fatalf ( "The value specified in -minFreeSpacePercent not a valid value %s" , freeString )
2020-06-05 23:18:15 +08:00
}
}
2020-07-04 07:41:30 +08:00
if len ( v . minFreeSpacePercents ) == 1 && len ( v . folders ) > 1 {
2020-06-21 23:44:06 +08:00
for i := 0 ; i < len ( v . folders ) - 1 ; i ++ {
2020-07-04 07:41:30 +08:00
v . minFreeSpacePercents = append ( v . minFreeSpacePercents , v . minFreeSpacePercents [ 0 ] )
2020-06-21 23:44:06 +08:00
}
}
2020-07-04 07:41:30 +08:00
if len ( v . folders ) != len ( v . minFreeSpacePercents ) {
glog . Fatalf ( "%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent" , len ( v . folders ) , len ( v . minFreeSpacePercents ) )
2012-09-17 08:31:15 +08:00
}
2015-01-13 16:36:44 +08:00
2020-12-14 14:49:56 +08:00
// set disk types
var diskTypes [ ] storage . DiskType
diskTypeStrings := strings . Split ( * v . diskType , "," )
for _ , diskTypeString := range diskTypeStrings {
if diskType , err := storage . ToDiskType ( diskTypeString ) ; err == nil {
diskTypes = append ( diskTypes , diskType )
} else {
glog . Fatalf ( "failed to parse volume type: %v" , err )
}
}
if len ( diskTypes ) == 1 && len ( v . folders ) > 1 {
for i := 0 ; i < len ( v . folders ) - 1 ; i ++ {
diskTypes = append ( diskTypes , diskTypes [ 0 ] )
}
}
if len ( v . folders ) != len ( diskTypes ) {
glog . Fatalf ( "%d directories by -dir, but only %d disk types is set by -disk" , len ( v . folders ) , len ( diskTypes ) )
}
2019-11-17 11:40:36 +08:00
// security related white list configuration
2018-10-11 15:04:31 +08:00
if volumeWhiteListOption != "" {
v . whiteList = strings . Split ( volumeWhiteListOption , "," )
2015-01-13 16:27:51 +08:00
}
2012-09-26 18:27:10 +08:00
2015-02-03 07:51:25 +08:00
if * v . ip == "" {
2020-04-09 15:26:24 +08:00
* v . ip = util . DetectedHostAddress ( )
2020-04-10 14:42:59 +08:00
glog . V ( 0 ) . Infof ( "detected volume server ip address: %v" , * v . ip )
2012-09-26 17:29:16 +08:00
}
2012-09-20 17:11:08 +08:00
2015-03-09 16:10:01 +08:00
if * v . publicPort == 0 {
* v . publicPort = * v . port
2015-01-19 09:03:38 +08:00
}
2015-04-09 02:08:08 +08:00
if * v . publicUrl == "" {
* v . publicUrl = * v . ip + ":" + strconv . Itoa ( * v . publicPort )
}
2015-01-19 09:03:38 +08:00
2015-03-09 16:10:01 +08:00
volumeMux := http . NewServeMux ( )
publicVolumeMux := volumeMux
2019-11-17 11:40:36 +08:00
if v . isSeparatedPublicPort ( ) {
2015-03-09 16:10:01 +08:00
publicVolumeMux = http . NewServeMux ( )
2015-01-19 09:03:38 +08:00
}
2012-08-24 13:46:54 +08:00
2020-06-10 17:52:24 +08:00
if * v . pprof {
volumeMux . HandleFunc ( "/debug/pprof/" , httppprof . Index )
volumeMux . HandleFunc ( "/debug/pprof/cmdline" , httppprof . Cmdline )
volumeMux . HandleFunc ( "/debug/pprof/profile" , httppprof . Profile )
volumeMux . HandleFunc ( "/debug/pprof/symbol" , httppprof . Symbol )
volumeMux . HandleFunc ( "/debug/pprof/trace" , httppprof . Trace )
}
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-30 02:04:32 +08:00
volumeNeedleMapKind := storage . NeedleMapInMemory
switch * v . indexType {
case "leveldb" :
volumeNeedleMapKind = storage . NeedleMapLevelDb
2019-04-10 00:42:06 +08:00
case "leveldbMedium" :
2019-04-10 01:08:59 +08:00
volumeNeedleMapKind = storage . NeedleMapLevelDbMedium
2019-04-10 00:42:06 +08:00
case "leveldbLarge" :
2019-04-10 01:08:59 +08:00
volumeNeedleMapKind = storage . NeedleMapLevelDbLarge
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-30 02:04:32 +08:00
}
2018-06-01 15:39:39 +08:00
masters := * v . masters
2015-03-09 16:10:01 +08:00
volumeServer := weed_server . NewVolumeServer ( volumeMux , publicVolumeMux ,
* v . ip , * v . port , * v . publicUrl ,
2020-12-14 14:49:56 +08:00
v . folders , v . folderMaxLimits , v . minFreeSpacePercents , diskTypes ,
2020-12-14 14:29:52 +08:00
* v . idxFolder ,
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-30 02:04:32 +08:00
volumeNeedleMapKind ,
2020-06-05 01:52:01 +08:00
strings . Split ( masters , "," ) , 5 , * v . dataCenter , * v . rack ,
2015-01-13 16:27:51 +08:00
v . whiteList ,
2020-07-10 10:08:36 +08:00
* v . fixJpgOrientation , * v . readRedirect ,
2019-05-04 08:22:39 +08:00
* v . compactionMBPerSecond ,
2020-01-03 16:37:24 +08:00
* v . fileSizeLimitMB ,
2013-12-02 17:37:36 +08:00
)
2019-11-17 11:40:36 +08:00
// starting grpc server
grpcS := v . startGrpcService ( volumeServer )
// starting public http server
var publicHttpDown httpdown . Server
if v . isSeparatedPublicPort ( ) {
publicHttpDown = v . startPublicHttpService ( publicVolumeMux )
if nil == publicHttpDown {
glog . Fatalf ( "start public http service failed" )
2015-01-19 09:03:38 +08:00
}
}
2014-03-21 02:07:15 +08:00
2019-11-17 11:40:36 +08:00
// starting the cluster http server
clusterHttpServer := v . startClusterHttpService ( volumeMux )
2020-09-14 12:25:51 +08:00
stopChan := make ( chan bool )
2020-04-28 14:10:23 +08:00
grace . OnInterrupt ( func ( ) {
2019-11-17 11:40:36 +08:00
fmt . Println ( "volume server has be killed" )
2020-09-14 12:25:51 +08:00
// Stop heartbeats
if ! volumeServer . StopHeartbeat ( ) {
glog . V ( 0 ) . Infof ( "stop send heartbeat and wait %d seconds until shutdown ..." , * v . preStopSeconds )
time . Sleep ( time . Duration ( * v . preStopSeconds ) * time . Second )
}
2019-11-17 11:40:36 +08:00
2020-09-14 12:25:51 +08:00
shutdown ( publicHttpDown , clusterHttpServer , grpcS , volumeServer )
stopChan <- true
2020-09-14 03:41:26 +08:00
} )
2019-11-17 11:40:36 +08:00
2020-09-14 12:25:51 +08:00
select {
case <- stopChan :
}
2019-11-17 11:40:36 +08:00
2020-09-14 03:41:26 +08:00
}
2019-11-17 11:40:36 +08:00
2020-09-14 12:25:51 +08:00
func shutdown ( publicHttpDown httpdown . Server , clusterHttpServer httpdown . Server , grpcS * grpc . Server , volumeServer * weed_server . VolumeServer ) {
2019-11-17 11:40:36 +08:00
2020-09-14 03:41:26 +08:00
// firstly, stop the public http service to prevent from receiving new user request
if nil != publicHttpDown {
glog . V ( 0 ) . Infof ( "stop public http server ... " )
if err := publicHttpDown . Stop ( ) ; err != nil {
glog . Warningf ( "stop the public http server failed, %v" , err )
}
}
2014-05-13 15:04:28 +08:00
2020-09-14 03:41:26 +08:00
glog . V ( 0 ) . Infof ( "graceful stop cluster http server ... " )
if err := clusterHttpServer . Stop ( ) ; err != nil {
glog . Warningf ( "stop the cluster http server failed, %v" , err )
2019-11-17 11:40:36 +08:00
}
2020-09-14 03:41:26 +08:00
glog . V ( 0 ) . Infof ( "graceful stop gRPC ..." )
grpcS . GracefulStop ( )
volumeServer . Shutdown ( )
pprof . StopCPUProfile ( )
2019-11-17 11:40:36 +08:00
}
// check whether configure the public port
func ( v VolumeServerOptions ) isSeparatedPublicPort ( ) bool {
return * v . publicPort != * v . port
}
func ( v VolumeServerOptions ) startGrpcService ( vs volume_server_pb . VolumeServerServer ) * grpc . Server {
2018-10-11 16:16:33 +08:00
grpcPort := * v . port + 10000
grpcL , err := util . NewListener ( * v . bindIp + ":" + strconv . Itoa ( grpcPort ) , 0 )
if err != nil {
glog . Fatalf ( "failed to listen on grpc port %d: %v" , grpcPort , err )
}
2020-03-04 16:39:47 +08:00
grpcS := pb . NewGrpcServer ( security . LoadServerTLS ( util . GetViper ( ) , "grpc.volume" ) )
2019-11-17 11:40:36 +08:00
volume_server_pb . RegisterVolumeServerServer ( grpcS , vs )
2018-10-11 16:16:33 +08:00
reflection . Register ( grpcS )
2019-11-17 11:40:36 +08:00
go func ( ) {
if err := grpcS . Serve ( grpcL ) ; err != nil {
glog . Fatalf ( "start gRPC service failed, %s" , err )
2019-02-25 16:43:36 +08:00
}
2019-11-17 11:40:36 +08:00
} ( )
return grpcS
}
func ( v VolumeServerOptions ) startPublicHttpService ( handler http . Handler ) httpdown . Server {
publicListeningAddress := * v . bindIp + ":" + strconv . Itoa ( * v . publicPort )
2020-06-02 15:10:35 +08:00
glog . V ( 0 ) . Infoln ( "Start Seaweed volume server" , util . Version ( ) , "public at" , publicListeningAddress )
2019-11-17 11:40:36 +08:00
publicListener , e := util . NewListener ( publicListeningAddress , time . Duration ( * v . idleConnectionTimeout ) * time . Second )
if e != nil {
glog . Fatalf ( "Volume server listener error:%v" , e )
}
pubHttp := httpdown . HTTP { StopTimeout : 5 * time . Minute , KillTimeout : 5 * time . Minute }
publicHttpDown := pubHttp . Serve ( & http . Server { Handler : handler } , publicListener )
go func ( ) {
if err := publicHttpDown . Wait ( ) ; err != nil {
glog . Errorf ( "public http down wait failed, %v" , err )
2019-02-25 16:43:36 +08:00
}
2019-11-17 11:40:36 +08:00
} ( )
return publicHttpDown
}
func ( v VolumeServerOptions ) startClusterHttpService ( handler http . Handler ) httpdown . Server {
var (
certFile , keyFile string
)
if viper . GetString ( "https.volume.key" ) != "" {
certFile = viper . GetString ( "https.volume.cert" )
keyFile = viper . GetString ( "https.volume.key" )
}
listeningAddress := * v . bindIp + ":" + strconv . Itoa ( * v . port )
2020-06-02 15:10:35 +08:00
glog . V ( 0 ) . Infof ( "Start Seaweed volume server %s at %s" , util . Version ( ) , listeningAddress )
2019-11-17 11:40:36 +08:00
listener , e := util . NewListener ( listeningAddress , time . Duration ( * v . idleConnectionTimeout ) * time . Second )
if e != nil {
glog . Fatalf ( "Volume server listener error:%v" , e )
2012-08-24 13:46:54 +08:00
}
2018-10-11 15:04:31 +08:00
2019-11-17 11:40:36 +08:00
httpDown := httpdown . HTTP {
KillTimeout : 5 * time . Minute ,
StopTimeout : 5 * time . Minute ,
CertFile : certFile ,
KeyFile : keyFile }
clusterHttpServer := httpDown . Serve ( & http . Server { Handler : handler } , listener )
go func ( ) {
if e := clusterHttpServer . Wait ( ) ; e != nil {
glog . Fatalf ( "Volume server fail to serve: %v" , e )
}
} ( )
return clusterHttpServer
2012-08-07 16:29:22 +08:00
}