2018-08-20 06:36:30 +08:00
|
|
|
|
package command
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"io/ioutil"
|
|
|
|
|
"path/filepath"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
|
cmdScaffold.Run = runScaffold // break init cycle
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var cmdScaffold = &Command{
|
2019-06-05 16:30:24 +08:00
|
|
|
|
UsageLine: "scaffold -config=[filer|notification|replication|security|master]",
|
2018-08-20 06:36:30 +08:00
|
|
|
|
Short: "generate basic configuration files",
|
|
|
|
|
Long: `Generate filer.toml with all possible configurations for you to customize.
|
|
|
|
|
|
2020-01-30 01:09:55 +08:00
|
|
|
|
The options can also be overwritten by environment variables.
|
|
|
|
|
For example, the filer.toml mysql password can be overwritten by environment variable
|
2020-01-30 01:11:07 +08:00
|
|
|
|
export WEED_MYSQL_PASSWORD=some_password
|
2020-01-30 01:09:55 +08:00
|
|
|
|
Environment variable rules:
|
2020-04-11 05:50:10 +08:00
|
|
|
|
* Prefix the variable name with "WEED_"
|
2020-01-30 01:09:55 +08:00
|
|
|
|
* Upppercase the reset of variable name.
|
|
|
|
|
* Replace '.' with '_'
|
|
|
|
|
|
2018-08-20 06:36:30 +08:00
|
|
|
|
`,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var (
|
|
|
|
|
outputPath = cmdScaffold.Flag.String("output", "", "if not empty, save the configuration file to this directory")
|
2019-06-05 16:30:24 +08:00
|
|
|
|
config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication|security|master] the configuration file to generate")
|
2018-08-20 06:36:30 +08:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
func runScaffold(cmd *Command, args []string) bool {
|
|
|
|
|
|
|
|
|
|
content := ""
|
|
|
|
|
switch *config {
|
|
|
|
|
case "filer":
|
|
|
|
|
content = FILER_TOML_EXAMPLE
|
2018-11-01 16:11:09 +08:00
|
|
|
|
case "notification":
|
|
|
|
|
content = NOTIFICATION_TOML_EXAMPLE
|
2018-09-17 15:27:56 +08:00
|
|
|
|
case "replication":
|
|
|
|
|
content = REPLICATION_TOML_EXAMPLE
|
2019-02-10 13:07:12 +08:00
|
|
|
|
case "security":
|
|
|
|
|
content = SECURITY_TOML_EXAMPLE
|
2019-06-05 16:30:24 +08:00
|
|
|
|
case "master":
|
|
|
|
|
content = MASTER_TOML_EXAMPLE
|
2020-12-29 07:07:16 +08:00
|
|
|
|
case "shell":
|
|
|
|
|
content = SHELL_TOML_EXAMPLE
|
2018-08-20 06:36:30 +08:00
|
|
|
|
}
|
|
|
|
|
if content == "" {
|
|
|
|
|
println("need a valid -config option")
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if *outputPath != "" {
|
2018-11-01 16:11:09 +08:00
|
|
|
|
ioutil.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0644)
|
2018-08-20 06:36:30 +08:00
|
|
|
|
} else {
|
|
|
|
|
println(content)
|
|
|
|
|
}
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
FILER_TOML_EXAMPLE = `
|
|
|
|
|
# A sample TOML config file for SeaweedFS filer store
|
2018-09-23 13:12:06 +08:00
|
|
|
|
# Used with "weed filer" or "weed server -filer"
|
|
|
|
|
# Put this file to one of the location, with descending priority
|
|
|
|
|
# ./filer.toml
|
|
|
|
|
# $HOME/.seaweedfs/filer.toml
|
|
|
|
|
# /etc/seaweedfs/filer.toml
|
2018-08-20 05:58:24 +08:00
|
|
|
|
|
2020-01-01 03:52:54 +08:00
|
|
|
|
####################################################
|
|
|
|
|
# Customizable filer server options
|
|
|
|
|
####################################################
|
|
|
|
|
[filer.options]
|
|
|
|
|
# with http DELETE, by default the filer would check whether a folder is empty.
|
|
|
|
|
# recursive_delete will delete all sub folders and files, similar to "rm -Rf"
|
|
|
|
|
recursive_delete = false
|
2020-02-25 06:42:57 +08:00
|
|
|
|
# directories under this folder will be automatically creating a separate bucket
|
2020-03-06 16:49:47 +08:00
|
|
|
|
buckets_folder = "/buckets"
|
2020-01-01 03:52:54 +08:00
|
|
|
|
|
|
|
|
|
####################################################
|
|
|
|
|
# The following are filer store options
|
|
|
|
|
####################################################
|
|
|
|
|
|
2019-05-18 08:33:49 +08:00
|
|
|
|
[leveldb2]
|
|
|
|
|
# local on disk, mostly for simple single-machine setup, fairly scalable
|
|
|
|
|
# faster than previous leveldb, recommended.
|
2018-09-24 00:26:25 +08:00
|
|
|
|
enabled = true
|
2021-01-03 16:44:52 +08:00
|
|
|
|
dir = "./filerldb2" # directory to store level db files
|
|
|
|
|
|
2021-01-12 18:29:44 +08:00
|
|
|
|
[leveldb3]
|
|
|
|
|
# similar to leveldb2.
|
|
|
|
|
# each bucket has its own meta store.
|
|
|
|
|
enabled = false
|
|
|
|
|
dir = "./filerldb3" # directory to store level db files
|
|
|
|
|
|
2021-01-03 16:44:52 +08:00
|
|
|
|
[rocksdb]
|
|
|
|
|
# local on disk, similar to leveldb
|
|
|
|
|
# since it is using a C wrapper, you need to install rocksdb and build it by yourself
|
|
|
|
|
enabled = false
|
|
|
|
|
dir = "./filerrdb" # directory to store rocksdb files
|
2018-08-20 05:58:24 +08:00
|
|
|
|
|
2021-01-20 03:31:55 +08:00
|
|
|
|
[mysql] # or memsql, tidb
|
2018-08-20 05:58:24 +08:00
|
|
|
|
# CREATE TABLE IF NOT EXISTS filemeta (
|
2021-03-23 15:46:50 +08:00
|
|
|
|
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
|
|
|
|
|
# name VARCHAR(1000) BINARY COMMENT 'directory or file name',
|
|
|
|
|
# directory TEXT COMMENT 'full path to parent directory',
|
2019-05-18 00:11:08 +08:00
|
|
|
|
# meta LONGBLOB,
|
2018-08-20 05:58:24 +08:00
|
|
|
|
# PRIMARY KEY (dirhash, name)
|
|
|
|
|
# ) DEFAULT CHARSET=utf8;
|
2019-04-20 00:55:46 +08:00
|
|
|
|
|
2018-09-24 00:26:25 +08:00
|
|
|
|
enabled = false
|
2018-08-20 05:58:24 +08:00
|
|
|
|
hostname = "localhost"
|
|
|
|
|
port = 3306
|
|
|
|
|
username = "root"
|
|
|
|
|
password = ""
|
|
|
|
|
database = "" # create or use an existing database
|
|
|
|
|
connection_max_idle = 2
|
|
|
|
|
connection_max_open = 100
|
2021-01-14 14:14:21 +08:00
|
|
|
|
connection_max_lifetime_seconds = 0
|
2019-11-28 04:34:03 +08:00
|
|
|
|
interpolateParams = false
|
2018-08-20 05:58:24 +08:00
|
|
|
|
|
2021-01-20 09:21:50 +08:00
|
|
|
|
[mysql2] # or memsql, tidb
|
|
|
|
|
enabled = false
|
|
|
|
|
createTable = """
|
2021-02-14 19:14:36 +08:00
|
|
|
|
CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` (
|
|
|
|
|
dirhash BIGINT,
|
2021-03-23 15:46:50 +08:00
|
|
|
|
name VARCHAR(1000) BINARY,
|
2021-02-14 19:14:36 +08:00
|
|
|
|
directory TEXT,
|
|
|
|
|
meta LONGBLOB,
|
2021-01-20 09:21:50 +08:00
|
|
|
|
PRIMARY KEY (dirhash, name)
|
|
|
|
|
) DEFAULT CHARSET=utf8;
|
|
|
|
|
"""
|
|
|
|
|
hostname = "localhost"
|
|
|
|
|
port = 3306
|
|
|
|
|
username = "root"
|
|
|
|
|
password = ""
|
|
|
|
|
database = "" # create or use an existing database
|
|
|
|
|
connection_max_idle = 2
|
|
|
|
|
connection_max_open = 100
|
|
|
|
|
connection_max_lifetime_seconds = 0
|
|
|
|
|
interpolateParams = false
|
|
|
|
|
|
2021-01-20 04:35:14 +08:00
|
|
|
|
[postgres] # or cockroachdb, YugabyteDB
|
2018-08-20 05:58:24 +08:00
|
|
|
|
# CREATE TABLE IF NOT EXISTS filemeta (
|
|
|
|
|
# dirhash BIGINT,
|
2019-04-02 03:37:54 +08:00
|
|
|
|
# name VARCHAR(65535),
|
|
|
|
|
# directory VARCHAR(65535),
|
2018-08-20 05:58:24 +08:00
|
|
|
|
# meta bytea,
|
|
|
|
|
# PRIMARY KEY (dirhash, name)
|
|
|
|
|
# );
|
|
|
|
|
enabled = false
|
|
|
|
|
hostname = "localhost"
|
|
|
|
|
port = 5432
|
|
|
|
|
username = "postgres"
|
|
|
|
|
password = ""
|
2021-01-20 10:10:36 +08:00
|
|
|
|
database = "postgres" # create or use an existing database
|
2021-01-20 10:12:07 +08:00
|
|
|
|
schema = ""
|
2018-08-20 05:58:24 +08:00
|
|
|
|
sslmode = "disable"
|
|
|
|
|
connection_max_idle = 100
|
|
|
|
|
connection_max_open = 100
|
2021-02-15 13:45:09 +08:00
|
|
|
|
connection_max_lifetime_seconds = 0
|
2018-08-20 05:58:24 +08:00
|
|
|
|
|
2021-01-20 10:07:29 +08:00
|
|
|
|
[postgres2]
|
|
|
|
|
enabled = false
|
|
|
|
|
createTable = """
|
2021-02-14 19:14:36 +08:00
|
|
|
|
CREATE TABLE IF NOT EXISTS "%s" (
|
2021-01-20 10:07:29 +08:00
|
|
|
|
dirhash BIGINT,
|
|
|
|
|
name VARCHAR(65535),
|
|
|
|
|
directory VARCHAR(65535),
|
|
|
|
|
meta bytea,
|
|
|
|
|
PRIMARY KEY (dirhash, name)
|
|
|
|
|
);
|
|
|
|
|
"""
|
|
|
|
|
hostname = "localhost"
|
|
|
|
|
port = 5432
|
|
|
|
|
username = "postgres"
|
|
|
|
|
password = ""
|
2021-01-20 10:10:36 +08:00
|
|
|
|
database = "postgres" # create or use an existing database
|
2021-01-20 10:12:07 +08:00
|
|
|
|
schema = ""
|
2021-01-20 10:07:29 +08:00
|
|
|
|
sslmode = "disable"
|
|
|
|
|
connection_max_idle = 100
|
|
|
|
|
connection_max_open = 100
|
2021-02-15 13:45:09 +08:00
|
|
|
|
connection_max_lifetime_seconds = 0
|
2021-01-20 10:07:29 +08:00
|
|
|
|
|
2018-08-20 05:58:24 +08:00
|
|
|
|
[cassandra]
|
|
|
|
|
# CREATE TABLE filemeta (
|
|
|
|
|
# directory varchar,
|
|
|
|
|
# name varchar,
|
|
|
|
|
# meta blob,
|
|
|
|
|
# PRIMARY KEY (directory, name)
|
|
|
|
|
# ) WITH CLUSTERING ORDER BY (name ASC);
|
|
|
|
|
enabled = false
|
|
|
|
|
keyspace="seaweedfs"
|
|
|
|
|
hosts=[
|
|
|
|
|
"localhost:9042",
|
|
|
|
|
]
|
2020-09-25 12:31:06 +08:00
|
|
|
|
username=""
|
|
|
|
|
password=""
|
2020-12-22 18:26:05 +08:00
|
|
|
|
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
|
|
|
|
|
superLargeDirectories = []
|
2018-08-20 05:58:24 +08:00
|
|
|
|
|
2020-12-24 13:49:01 +08:00
|
|
|
|
[hbase]
|
2020-12-24 16:19:16 +08:00
|
|
|
|
enabled = false
|
2020-12-24 13:49:01 +08:00
|
|
|
|
zkquorum = ""
|
|
|
|
|
table = "seaweedfs"
|
|
|
|
|
|
2020-04-12 17:50:41 +08:00
|
|
|
|
[redis2]
|
2018-09-24 00:26:25 +08:00
|
|
|
|
enabled = false
|
2018-08-20 05:58:24 +08:00
|
|
|
|
address = "localhost:6379"
|
|
|
|
|
password = ""
|
2019-08-06 15:42:19 +08:00
|
|
|
|
database = 0
|
2020-12-22 18:26:05 +08:00
|
|
|
|
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
|
|
|
|
|
superLargeDirectories = []
|
2018-08-20 05:58:24 +08:00
|
|
|
|
|
2020-04-12 17:50:41 +08:00
|
|
|
|
[redis_cluster2]
|
2018-08-20 05:58:24 +08:00
|
|
|
|
enabled = false
|
|
|
|
|
addresses = [
|
2018-08-23 09:23:19 +08:00
|
|
|
|
"localhost:30001",
|
|
|
|
|
"localhost:30002",
|
|
|
|
|
"localhost:30003",
|
|
|
|
|
"localhost:30004",
|
|
|
|
|
"localhost:30005",
|
|
|
|
|
"localhost:30006",
|
2018-08-20 05:58:24 +08:00
|
|
|
|
]
|
2019-05-21 00:00:30 +08:00
|
|
|
|
password = ""
|
2020-01-13 12:31:33 +08:00
|
|
|
|
# allows reads from slave servers or the master, but all writes still go to the master
|
2021-01-11 18:43:53 +08:00
|
|
|
|
readOnly = false
|
2020-01-13 12:31:33 +08:00
|
|
|
|
# automatically use the closest Redis server for reads
|
2021-01-11 18:43:53 +08:00
|
|
|
|
routeByLatency = false
|
2020-12-22 18:26:05 +08:00
|
|
|
|
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
|
|
|
|
|
superLargeDirectories = []
|
2018-08-20 05:58:24 +08:00
|
|
|
|
|
2019-08-01 10:16:45 +08:00
|
|
|
|
[etcd]
|
|
|
|
|
enabled = false
|
|
|
|
|
servers = "localhost:2379"
|
|
|
|
|
timeout = "3s"
|
|
|
|
|
|
2020-04-19 18:52:38 +08:00
|
|
|
|
[mongodb]
|
|
|
|
|
enabled = false
|
|
|
|
|
uri = "mongodb://localhost:27017"
|
|
|
|
|
option_pool_size = 0
|
|
|
|
|
database = "seaweedfs"
|
2020-09-03 16:34:58 +08:00
|
|
|
|
|
|
|
|
|
[elastic7]
|
|
|
|
|
enabled = false
|
2020-09-10 16:24:09 +08:00
|
|
|
|
servers = [
|
|
|
|
|
"http://localhost1:9200",
|
|
|
|
|
"http://localhost2:9200",
|
|
|
|
|
"http://localhost3:9200",
|
|
|
|
|
]
|
2020-09-10 14:22:07 +08:00
|
|
|
|
username = ""
|
|
|
|
|
password = ""
|
|
|
|
|
sniff_enabled = false
|
|
|
|
|
healthcheck_enabled = false
|
2020-09-04 15:40:13 +08:00
|
|
|
|
# increase the value is recommend, be sure the value in Elastic is greater or equal here
|
2020-09-03 16:34:58 +08:00
|
|
|
|
index.max_result_window = 10000
|
2020-12-19 17:27:09 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
##########################
|
|
|
|
|
##########################
|
|
|
|
|
# To add path-specific filer store:
|
|
|
|
|
#
|
|
|
|
|
# 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp
|
|
|
|
|
# 2. Add a location configuraiton. E.g., location = "/tmp/"
|
|
|
|
|
# 3. Copy and customize all other configurations.
|
|
|
|
|
# Make sure they are not the same if using the same store type!
|
|
|
|
|
# 4. Set enabled to true
|
|
|
|
|
#
|
|
|
|
|
# The following is just using cassandra as an example
|
|
|
|
|
##########################
|
|
|
|
|
[redis2.tmp]
|
|
|
|
|
enabled = false
|
|
|
|
|
location = "/tmp/"
|
|
|
|
|
address = "localhost:6379"
|
|
|
|
|
password = ""
|
|
|
|
|
database = 1
|
|
|
|
|
|
2018-11-01 16:11:09 +08:00
|
|
|
|
`
|
|
|
|
|
|
|
|
|
|
NOTIFICATION_TOML_EXAMPLE = `
|
|
|
|
|
# A sample TOML config file for SeaweedFS filer store
|
|
|
|
|
# Used by both "weed filer" or "weed server -filer" and "weed filer.replicate"
|
|
|
|
|
# Put this file to one of the location, with descending priority
|
|
|
|
|
# ./notification.toml
|
|
|
|
|
# $HOME/.seaweedfs/notification.toml
|
|
|
|
|
# /etc/seaweedfs/notification.toml
|
2018-08-20 06:36:30 +08:00
|
|
|
|
|
2018-08-20 05:58:24 +08:00
|
|
|
|
####################################################
|
|
|
|
|
# notification
|
2018-11-01 16:11:09 +08:00
|
|
|
|
# send and receive filer updates for each file to an external message queue
|
2018-08-20 05:58:24 +08:00
|
|
|
|
####################################################
|
|
|
|
|
[notification.log]
|
2018-11-01 16:11:09 +08:00
|
|
|
|
# this is only for debugging perpose and does not work with "weed filer.replicate"
|
2018-09-24 00:26:25 +08:00
|
|
|
|
enabled = false
|
2018-08-20 05:58:24 +08:00
|
|
|
|
|
2018-11-01 16:11:09 +08:00
|
|
|
|
|
2018-08-20 05:58:24 +08:00
|
|
|
|
[notification.kafka]
|
|
|
|
|
enabled = false
|
|
|
|
|
hosts = [
|
|
|
|
|
"localhost:9092"
|
|
|
|
|
]
|
|
|
|
|
topic = "seaweedfs_filer"
|
2018-11-01 16:11:09 +08:00
|
|
|
|
offsetFile = "./last.offset"
|
|
|
|
|
offsetSaveIntervalSeconds = 10
|
|
|
|
|
|
2018-08-20 05:58:24 +08:00
|
|
|
|
|
2018-10-31 16:11:19 +08:00
|
|
|
|
[notification.aws_sqs]
|
|
|
|
|
# experimental, let me know if it works
|
|
|
|
|
enabled = false
|
|
|
|
|
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
|
|
|
|
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
|
|
|
|
region = "us-east-2"
|
|
|
|
|
sqs_queue_name = "my_filer_queue" # an existing queue name
|
|
|
|
|
|
2018-11-01 16:11:09 +08:00
|
|
|
|
|
|
|
|
|
[notification.google_pub_sub]
|
|
|
|
|
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
|
|
|
|
|
enabled = false
|
|
|
|
|
google_application_credentials = "/path/to/x.json" # path to json credential file
|
|
|
|
|
project_id = "" # an existing project id
|
|
|
|
|
topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists
|
|
|
|
|
|
2019-03-19 22:10:43 +08:00
|
|
|
|
[notification.gocdk_pub_sub]
|
|
|
|
|
# The Go Cloud Development Kit (https://gocloud.dev).
|
|
|
|
|
# PubSub API (https://godoc.org/gocloud.dev/pubsub).
|
|
|
|
|
# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
|
|
|
|
|
enabled = false
|
|
|
|
|
# This URL will Dial the RabbitMQ server at the URL in the environment
|
|
|
|
|
# variable RABBIT_SERVER_URL and open the exchange "myexchange".
|
|
|
|
|
# The exchange must have already been created by some other means, like
|
2021-01-27 15:45:58 +08:00
|
|
|
|
# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then
|
|
|
|
|
# create binding myexchange => myqueue
|
2019-03-19 22:10:43 +08:00
|
|
|
|
topic_url = "rabbit://myexchange"
|
2019-04-06 01:43:38 +08:00
|
|
|
|
sub_url = "rabbit://myqueue"
|
2018-09-17 15:27:56 +08:00
|
|
|
|
`
|
2018-11-01 16:11:09 +08:00
|
|
|
|
|
2018-09-17 15:27:56 +08:00
|
|
|
|
REPLICATION_TOML_EXAMPLE = `
|
2018-09-23 13:12:06 +08:00
|
|
|
|
# A sample TOML config file for replicating SeaweedFS filer
|
|
|
|
|
# Used with "weed filer.replicate"
|
|
|
|
|
# Put this file to one of the location, with descending priority
|
|
|
|
|
# ./replication.toml
|
|
|
|
|
# $HOME/.seaweedfs/replication.toml
|
|
|
|
|
# /etc/seaweedfs/replication.toml
|
2018-09-17 15:27:56 +08:00
|
|
|
|
|
|
|
|
|
[source.filer]
|
|
|
|
|
enabled = true
|
|
|
|
|
grpcAddress = "localhost:18888"
|
2018-12-05 18:03:03 +08:00
|
|
|
|
# all files under this directory tree are replicated.
|
|
|
|
|
# this is not a directory on your hard drive, but on your filer.
|
|
|
|
|
# i.e., all files with this "prefix" are sent to notification message queue.
|
2019-08-01 10:16:45 +08:00
|
|
|
|
directory = "/buckets"
|
2018-09-17 15:27:56 +08:00
|
|
|
|
|
2021-01-27 01:50:25 +08:00
|
|
|
|
[sink.local]
|
|
|
|
|
enabled = false
|
2021-01-27 18:01:33 +08:00
|
|
|
|
directory = "/data"
|
2021-03-01 08:19:03 +08:00
|
|
|
|
# all replicated files are under modified time as yyyy-mm-dd directories
|
|
|
|
|
# so each date directory contains all new and updated files.
|
|
|
|
|
is_incremental = false
|
2021-01-27 01:50:25 +08:00
|
|
|
|
|
2021-01-28 17:59:20 +08:00
|
|
|
|
[sink.local_incremental]
|
2021-01-28 18:39:22 +08:00
|
|
|
|
# all replicated files are under modified time as yyyy-mm-dd directories
|
|
|
|
|
# so each date directory contains all new and updated files.
|
2021-01-27 18:01:33 +08:00
|
|
|
|
enabled = false
|
|
|
|
|
directory = "/backup"
|
|
|
|
|
|
2018-09-17 15:27:56 +08:00
|
|
|
|
[sink.filer]
|
2018-10-04 14:36:52 +08:00
|
|
|
|
enabled = false
|
2018-09-17 15:27:56 +08:00
|
|
|
|
grpcAddress = "localhost:18888"
|
2018-12-05 18:03:03 +08:00
|
|
|
|
# all replicated files are under this directory tree
|
2019-08-01 10:16:45 +08:00
|
|
|
|
# this is not a directory on your hard drive, but on your filer.
|
2018-12-05 18:03:03 +08:00
|
|
|
|
# i.e., all received files will be "prefixed" to this directory.
|
2019-08-01 10:16:45 +08:00
|
|
|
|
directory = "/backup"
|
2018-09-22 15:53:52 +08:00
|
|
|
|
replication = ""
|
|
|
|
|
collection = ""
|
|
|
|
|
ttlSec = 0
|
2021-03-01 08:19:03 +08:00
|
|
|
|
is_incremental = false
|
2018-09-17 15:27:56 +08:00
|
|
|
|
|
2018-10-04 14:36:52 +08:00
|
|
|
|
[sink.s3]
|
2018-10-07 04:08:38 +08:00
|
|
|
|
# read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html
|
2019-08-01 10:16:45 +08:00
|
|
|
|
# default loads credentials from the shared credentials file (~/.aws/credentials).
|
2018-10-04 14:36:52 +08:00
|
|
|
|
enabled = false
|
|
|
|
|
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
|
|
|
|
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
|
|
|
|
region = "us-east-2"
|
|
|
|
|
bucket = "your_bucket_name" # an existing bucket
|
2018-11-05 03:58:59 +08:00
|
|
|
|
directory = "/" # destination directory
|
2020-04-08 08:49:00 +08:00
|
|
|
|
endpoint = ""
|
2021-03-01 08:19:03 +08:00
|
|
|
|
is_incremental = false
|
2018-10-04 14:36:52 +08:00
|
|
|
|
|
2018-10-04 16:14:44 +08:00
|
|
|
|
[sink.google_cloud_storage]
|
2018-10-07 04:08:38 +08:00
|
|
|
|
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
|
2018-10-04 16:14:44 +08:00
|
|
|
|
enabled = false
|
|
|
|
|
google_application_credentials = "/path/to/x.json" # path to json credential file
|
2018-10-07 04:08:38 +08:00
|
|
|
|
bucket = "your_bucket_seaweedfs" # an existing bucket
|
|
|
|
|
directory = "/" # destination directory
|
2021-03-01 08:19:03 +08:00
|
|
|
|
is_incremental = false
|
2018-10-04 16:14:44 +08:00
|
|
|
|
|
2018-10-09 16:35:48 +08:00
|
|
|
|
[sink.azure]
|
2018-10-24 14:59:40 +08:00
|
|
|
|
# experimental, let me know if it works
|
2018-10-09 16:35:48 +08:00
|
|
|
|
enabled = false
|
|
|
|
|
account_name = ""
|
|
|
|
|
account_key = ""
|
|
|
|
|
container = "mycontainer" # an existing container
|
2018-11-05 03:58:59 +08:00
|
|
|
|
directory = "/" # destination directory
|
2021-03-01 08:19:03 +08:00
|
|
|
|
is_incremental = false
|
2018-10-09 16:35:48 +08:00
|
|
|
|
|
2018-10-24 14:59:40 +08:00
|
|
|
|
[sink.backblaze]
|
|
|
|
|
enabled = false
|
2018-11-05 03:58:59 +08:00
|
|
|
|
b2_account_id = ""
|
|
|
|
|
b2_master_application_key = ""
|
2018-10-24 14:59:40 +08:00
|
|
|
|
bucket = "mybucket" # an existing bucket
|
2018-11-05 03:58:59 +08:00
|
|
|
|
directory = "/" # destination directory
|
2021-03-01 08:19:03 +08:00
|
|
|
|
is_incremental = false
|
2018-10-24 14:59:40 +08:00
|
|
|
|
|
2019-02-10 13:07:12 +08:00
|
|
|
|
`
|
|
|
|
|
|
|
|
|
|
SECURITY_TOML_EXAMPLE = `
|
2019-02-14 16:08:20 +08:00
|
|
|
|
# Put this file to one of the location, with descending priority
|
|
|
|
|
# ./security.toml
|
|
|
|
|
# $HOME/.seaweedfs/security.toml
|
|
|
|
|
# /etc/seaweedfs/security.toml
|
2019-02-10 13:07:12 +08:00
|
|
|
|
# this file is read by master, volume server, and filer
|
|
|
|
|
|
2019-05-04 23:42:25 +08:00
|
|
|
|
# the jwt signing key is read by master and volume server.
|
|
|
|
|
# a jwt defaults to expire after 10 seconds.
|
2019-02-14 16:08:20 +08:00
|
|
|
|
[jwt.signing]
|
|
|
|
|
key = ""
|
2019-05-04 23:42:25 +08:00
|
|
|
|
expires_after_seconds = 10 # seconds
|
2019-02-10 13:07:12 +08:00
|
|
|
|
|
2019-06-06 15:29:02 +08:00
|
|
|
|
# jwt for read is only supported with master+volume setup. Filer does not support this mode.
|
|
|
|
|
[jwt.signing.read]
|
|
|
|
|
key = ""
|
|
|
|
|
expires_after_seconds = 10 # seconds
|
|
|
|
|
|
2019-02-20 13:10:10 +08:00
|
|
|
|
# all grpc tls authentications are mutual
|
|
|
|
|
# the values for the following ca, cert, and key are paths to the PERM files.
|
2019-06-06 15:29:02 +08:00
|
|
|
|
# the host name is not checked, so the PERM files can be shared.
|
2019-02-19 04:11:52 +08:00
|
|
|
|
[grpc]
|
|
|
|
|
ca = ""
|
2021-03-10 17:42:39 +08:00
|
|
|
|
# Set wildcard domain for enable TLS authentication by common names
|
2021-03-10 17:02:13 +08:00
|
|
|
|
allowed_wildcard_domain = "" # .mycompany.com
|
2019-02-19 04:11:52 +08:00
|
|
|
|
|
|
|
|
|
[grpc.volume]
|
|
|
|
|
cert = ""
|
2019-02-20 13:10:10 +08:00
|
|
|
|
key = ""
|
2021-03-10 15:43:13 +08:00
|
|
|
|
allowed_commonNames = "" # comma-separated SSL certificate common names
|
2019-02-19 04:11:52 +08:00
|
|
|
|
|
|
|
|
|
[grpc.master]
|
|
|
|
|
cert = ""
|
2019-02-20 13:10:10 +08:00
|
|
|
|
key = ""
|
2021-03-10 15:43:13 +08:00
|
|
|
|
allowed_commonNames = "" # comma-separated SSL certificate common names
|
2019-02-19 04:11:52 +08:00
|
|
|
|
|
|
|
|
|
[grpc.filer]
|
|
|
|
|
cert = ""
|
2020-02-27 16:07:13 +08:00
|
|
|
|
key = ""
|
2021-03-10 15:43:13 +08:00
|
|
|
|
allowed_commonNames = "" # comma-separated SSL certificate common names
|
2020-02-27 16:07:13 +08:00
|
|
|
|
|
2020-03-04 16:39:47 +08:00
|
|
|
|
[grpc.msg_broker]
|
2020-02-27 16:07:13 +08:00
|
|
|
|
cert = ""
|
2019-02-20 13:10:10 +08:00
|
|
|
|
key = ""
|
2021-03-10 15:43:13 +08:00
|
|
|
|
allowed_commonNames = "" # comma-separated SSL certificate common names
|
2019-02-19 04:11:52 +08:00
|
|
|
|
|
|
|
|
|
# use this for any place needs a grpc client
|
|
|
|
|
# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
|
|
|
|
|
[grpc.client]
|
|
|
|
|
cert = ""
|
2019-02-20 13:10:10 +08:00
|
|
|
|
key = ""
|
2019-02-19 04:11:52 +08:00
|
|
|
|
|
2019-02-25 16:43:36 +08:00
|
|
|
|
# volume server https options
|
|
|
|
|
# Note: work in progress!
|
|
|
|
|
# this does not work with other clients, e.g., "weed filer|mount" etc, yet.
|
|
|
|
|
[https.client]
|
|
|
|
|
enabled = true
|
|
|
|
|
[https.volume]
|
|
|
|
|
cert = ""
|
|
|
|
|
key = ""
|
|
|
|
|
|
|
|
|
|
|
2019-06-05 16:30:24 +08:00
|
|
|
|
`
|
|
|
|
|
|
|
|
|
|
MASTER_TOML_EXAMPLE = `
|
|
|
|
|
# Put this file to one of the location, with descending priority
|
|
|
|
|
# ./master.toml
|
|
|
|
|
# $HOME/.seaweedfs/master.toml
|
|
|
|
|
# /etc/seaweedfs/master.toml
|
|
|
|
|
# this file is read by master
|
|
|
|
|
|
|
|
|
|
[master.maintenance]
|
2019-06-05 16:48:03 +08:00
|
|
|
|
# periodically run these scripts are the same as running them from 'weed shell'
|
2019-06-05 16:30:24 +08:00
|
|
|
|
scripts = """
|
2020-04-24 09:41:48 +08:00
|
|
|
|
lock
|
2019-06-05 16:30:24 +08:00
|
|
|
|
ec.encode -fullPercent=95 -quietFor=1h
|
|
|
|
|
ec.rebuild -force
|
|
|
|
|
ec.balance -force
|
|
|
|
|
volume.balance -force
|
2020-04-09 14:36:22 +08:00
|
|
|
|
volume.fix.replication
|
2020-04-24 09:41:48 +08:00
|
|
|
|
unlock
|
2019-06-05 16:30:24 +08:00
|
|
|
|
"""
|
2019-06-06 15:39:08 +08:00
|
|
|
|
sleep_minutes = 17 # sleep minutes between each script execution
|
2019-06-05 16:30:24 +08:00
|
|
|
|
|
2019-11-15 14:20:38 +08:00
|
|
|
|
[master.filer]
|
2020-04-09 14:57:15 +08:00
|
|
|
|
default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
|
|
|
|
|
|
2019-11-11 09:15:17 +08:00
|
|
|
|
|
2019-11-15 14:20:38 +08:00
|
|
|
|
[master.sequencer]
|
2021-03-25 18:49:26 +08:00
|
|
|
|
type = "raft" # Choose [raft|etcd|snowflake] type for storing the file id sequence
|
2019-11-11 09:15:17 +08:00
|
|
|
|
# when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
|
|
|
|
|
# example : http://127.0.0.1:2379,http://127.0.0.1:2389
|
2019-11-29 10:04:25 +08:00
|
|
|
|
sequencer_etcd_urls = "http://127.0.0.1:2379"
|
2019-11-11 09:15:17 +08:00
|
|
|
|
|
|
|
|
|
|
2020-02-04 10:15:16 +08:00
|
|
|
|
# configurations for tiered cloud storage
|
|
|
|
|
# old volumes are transparently moved to cloud for cost efficiency
|
2019-11-29 10:33:18 +08:00
|
|
|
|
[storage.backend]
|
|
|
|
|
[storage.backend.s3.default]
|
2019-11-29 10:47:51 +08:00
|
|
|
|
enabled = false
|
2019-11-29 10:33:18 +08:00
|
|
|
|
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
|
|
|
|
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
|
|
|
|
region = "us-east-2"
|
|
|
|
|
bucket = "your_bucket_name" # an existing bucket
|
2020-03-20 12:13:56 +08:00
|
|
|
|
endpoint = ""
|
2019-11-19 11:24:34 +08:00
|
|
|
|
|
2020-02-04 10:15:16 +08:00
|
|
|
|
# create this number of logical volumes if no more writable volumes
|
2020-02-28 05:15:21 +08:00
|
|
|
|
# count_x means how many copies of data.
|
|
|
|
|
# e.g.:
|
2020-03-23 17:54:49 +08:00
|
|
|
|
# 000 has only one copy, copy_1
|
|
|
|
|
# 010 and 001 has two copies, copy_2
|
|
|
|
|
# 011 has only 3 copies, copy_3
|
2020-02-04 10:15:16 +08:00
|
|
|
|
[master.volume_growth]
|
2020-03-23 16:45:43 +08:00
|
|
|
|
copy_1 = 7 # create 1 x 7 = 7 actual volumes
|
|
|
|
|
copy_2 = 6 # create 2 x 6 = 12 actual volumes
|
|
|
|
|
copy_3 = 3 # create 3 x 3 = 9 actual volumes
|
|
|
|
|
copy_other = 1 # create n x 1 = n actual volumes
|
2020-02-04 10:15:16 +08:00
|
|
|
|
|
2020-04-02 03:18:40 +08:00
|
|
|
|
# configuration flags for replication
|
|
|
|
|
[master.replication]
|
|
|
|
|
# any replication counts should be considered minimums. If you specify 010 and
|
|
|
|
|
# have 3 different racks, that's still considered writable. Writes will still
|
|
|
|
|
# try to replicate to all available volumes. You should only use this option
|
|
|
|
|
# if you are doing your own replication or periodic sync of volumes.
|
|
|
|
|
treat_replication_as_minimums = false
|
|
|
|
|
|
2020-12-29 07:07:16 +08:00
|
|
|
|
`
|
|
|
|
|
SHELL_TOML_EXAMPLE = `
|
|
|
|
|
|
|
|
|
|
[cluster]
|
|
|
|
|
default = "c1"
|
|
|
|
|
|
|
|
|
|
[cluster.c1]
|
|
|
|
|
master = "localhost:9333" # comma-separated master servers
|
|
|
|
|
filer = "localhost:8888" # filer host and port
|
|
|
|
|
|
|
|
|
|
[cluster.c2]
|
|
|
|
|
master = ""
|
|
|
|
|
filer = ""
|
|
|
|
|
|
2018-08-20 06:36:30 +08:00
|
|
|
|
`
|
|
|
|
|
)
|