seaweedfs/weed/command/scaffold.go

574 lines
17 KiB
Go
Raw Normal View History

2018-08-20 06:36:30 +08:00
package command
import (
"io/ioutil"
"path/filepath"
)
func init() {
cmdScaffold.Run = runScaffold // break init cycle
}
var cmdScaffold = &Command{
UsageLine: "scaffold -config=[filer|notification|replication|security|master]",
2018-08-20 06:36:30 +08:00
Short: "generate basic configuration files",
Long: `Generate filer.toml with all possible configurations for you to customize.
The options can also be overwritten by environment variables.
For example, the filer.toml mysql password can be overwritten by environment variable
2020-01-30 01:11:07 +08:00
export WEED_MYSQL_PASSWORD=some_password
Environment variable rules:
2020-04-11 05:50:10 +08:00
* Prefix the variable name with "WEED_"
* Upppercase the reset of variable name.
* Replace '.' with '_'
2018-08-20 06:36:30 +08:00
`,
}
var (
outputPath = cmdScaffold.Flag.String("output", "", "if not empty, save the configuration file to this directory")
config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication|security|master] the configuration file to generate")
2018-08-20 06:36:30 +08:00
)
func runScaffold(cmd *Command, args []string) bool {
content := ""
switch *config {
case "filer":
content = FILER_TOML_EXAMPLE
case "notification":
content = NOTIFICATION_TOML_EXAMPLE
2018-09-17 15:27:56 +08:00
case "replication":
content = REPLICATION_TOML_EXAMPLE
2019-02-10 13:07:12 +08:00
case "security":
content = SECURITY_TOML_EXAMPLE
case "master":
content = MASTER_TOML_EXAMPLE
case "shell":
content = SHELL_TOML_EXAMPLE
2018-08-20 06:36:30 +08:00
}
if content == "" {
println("need a valid -config option")
return false
}
if *outputPath != "" {
ioutil.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0644)
2018-08-20 06:36:30 +08:00
} else {
println(content)
}
return true
}
const (
FILER_TOML_EXAMPLE = `
# A sample TOML config file for SeaweedFS filer store
2018-09-23 13:12:06 +08:00
# Used with "weed filer" or "weed server -filer"
# Put this file to one of the location, with descending priority
# ./filer.toml
# $HOME/.seaweedfs/filer.toml
# /etc/seaweedfs/filer.toml
2018-08-20 05:58:24 +08:00
####################################################
# Customizable filer server options
####################################################
[filer.options]
# with http DELETE, by default the filer would check whether a folder is empty.
# recursive_delete will delete all sub folders and files, similar to "rm -Rf"
recursive_delete = false
# directories under this folder will be automatically creating a separate bucket
buckets_folder = "/buckets"
####################################################
# The following are filer store options
####################################################
[leveldb2]
# local on disk, mostly for simple single-machine setup, fairly scalable
# faster than previous leveldb, recommended.
2018-09-24 00:26:25 +08:00
enabled = true
2021-01-03 16:44:52 +08:00
dir = "./filerldb2" # directory to store level db files
[leveldb3]
# similar to leveldb2.
# each bucket has its own meta store.
enabled = false
dir = "./filerldb3" # directory to store level db files
2021-01-03 16:44:52 +08:00
[rocksdb]
# local on disk, similar to leveldb
# since it is using a C wrapper, you need to install rocksdb and build it by yourself
enabled = false
dir = "./filerrdb" # directory to store rocksdb files
2018-08-20 05:58:24 +08:00
2021-01-20 03:31:55 +08:00
[mysql] # or memsql, tidb
2018-08-20 05:58:24 +08:00
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
# name VARCHAR(1000) BINARY COMMENT 'directory or file name',
# directory TEXT COMMENT 'full path to parent directory',
# meta LONGBLOB,
2018-08-20 05:58:24 +08:00
# PRIMARY KEY (dirhash, name)
# ) DEFAULT CHARSET=utf8;
2018-09-24 00:26:25 +08:00
enabled = false
2018-08-20 05:58:24 +08:00
hostname = "localhost"
port = 3306
username = "root"
password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
connection_max_lifetime_seconds = 0
interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
2021-03-30 15:26:57 +08:00
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
2021-03-29 15:49:50 +08:00
[mysql2] # or memsql, tidb
enabled = false
2021-01-20 09:21:50 +08:00
createTable = """
CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` (
dirhash BIGINT,
name VARCHAR(1000) BINARY,
directory TEXT,
meta LONGBLOB,
2021-01-20 09:21:50 +08:00
PRIMARY KEY (dirhash, name)
) DEFAULT CHARSET=utf8;
"""
hostname = "localhost"
port = 3306
username = "root"
password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
connection_max_lifetime_seconds = 0
interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
2021-03-30 15:26:57 +08:00
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
2021-03-29 15:49:50 +08:00
[postgres] # or cockroachdb, YugabyteDB
2018-08-20 05:58:24 +08:00
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT,
# name VARCHAR(65535),
# directory VARCHAR(65535),
2018-08-20 05:58:24 +08:00
# meta bytea,
# PRIMARY KEY (dirhash, name)
# );
enabled = false
hostname = "localhost"
port = 5432
username = "postgres"
password = ""
database = "postgres" # create or use an existing database
schema = ""
2018-08-20 05:58:24 +08:00
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
connection_max_lifetime_seconds = 0
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
2021-03-30 15:26:57 +08:00
upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
2021-03-29 15:49:50 +08:00
[postgres2]
enabled = false
2021-01-20 10:07:29 +08:00
createTable = """
CREATE TABLE IF NOT EXISTS "%s" (
2021-01-20 10:07:29 +08:00
dirhash BIGINT,
name VARCHAR(65535),
directory VARCHAR(65535),
meta bytea,
PRIMARY KEY (dirhash, name)
);
"""
hostname = "localhost"
port = 5432
username = "postgres"
password = ""
database = "postgres" # create or use an existing database
schema = ""
2021-01-20 10:07:29 +08:00
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
connection_max_lifetime_seconds = 0
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
2021-03-30 15:51:52 +08:00
upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
2021-01-20 10:07:29 +08:00
2018-08-20 05:58:24 +08:00
[cassandra]
# CREATE TABLE filemeta (
# directory varchar,
# name varchar,
# meta blob,
# PRIMARY KEY (directory, name)
# ) WITH CLUSTERING ORDER BY (name ASC);
enabled = false
keyspace="seaweedfs"
hosts=[
"localhost:9042",
]
username=""
password=""
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
2018-08-20 05:58:24 +08:00
2020-12-24 13:49:01 +08:00
[hbase]
2020-12-24 16:19:16 +08:00
enabled = false
2020-12-24 13:49:01 +08:00
zkquorum = ""
table = "seaweedfs"
[redis2]
2018-09-24 00:26:25 +08:00
enabled = false
2018-08-20 05:58:24 +08:00
address = "localhost:6379"
password = ""
database = 0
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
2018-08-20 05:58:24 +08:00
[redis_cluster2]
2018-08-20 05:58:24 +08:00
enabled = false
addresses = [
"localhost:30001",
"localhost:30002",
"localhost:30003",
"localhost:30004",
"localhost:30005",
"localhost:30006",
2018-08-20 05:58:24 +08:00
]
password = ""
# allows reads from slave servers or the master, but all writes still go to the master
readOnly = false
# automatically use the closest Redis server for reads
routeByLatency = false
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
2018-08-20 05:58:24 +08:00
2019-08-01 10:16:45 +08:00
[etcd]
enabled = false
servers = "localhost:2379"
timeout = "3s"
[mongodb]
enabled = false
uri = "mongodb://localhost:27017"
option_pool_size = 0
database = "seaweedfs"
2020-09-03 16:34:58 +08:00
[elastic7]
enabled = false
servers = [
"http://localhost1:9200",
"http://localhost2:9200",
"http://localhost3:9200",
]
2020-09-10 14:22:07 +08:00
username = ""
password = ""
sniff_enabled = false
healthcheck_enabled = false
2020-09-04 15:40:13 +08:00
# increase the value is recommend, be sure the value in Elastic is greater or equal here
2020-09-03 16:34:58 +08:00
index.max_result_window = 10000
##########################
##########################
# To add path-specific filer store:
#
# 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp
# 2. Add a location configuraiton. E.g., location = "/tmp/"
# 3. Copy and customize all other configurations.
# Make sure they are not the same if using the same store type!
# 4. Set enabled to true
#
# The following is just using redis as an example
##########################
[redis2.tmp]
enabled = false
location = "/tmp/"
address = "localhost:6379"
password = ""
database = 1
`
NOTIFICATION_TOML_EXAMPLE = `
# A sample TOML config file for SeaweedFS filer store
# Used by both "weed filer" or "weed server -filer" and "weed filer.replicate"
# Put this file to one of the location, with descending priority
# ./notification.toml
# $HOME/.seaweedfs/notification.toml
# /etc/seaweedfs/notification.toml
2018-08-20 06:36:30 +08:00
2018-08-20 05:58:24 +08:00
####################################################
# notification
# send and receive filer updates for each file to an external message queue
2018-08-20 05:58:24 +08:00
####################################################
[notification.log]
# this is only for debugging perpose and does not work with "weed filer.replicate"
2018-09-24 00:26:25 +08:00
enabled = false
2018-08-20 05:58:24 +08:00
2018-08-20 05:58:24 +08:00
[notification.kafka]
enabled = false
hosts = [
"localhost:9092"
]
topic = "seaweedfs_filer"
offsetFile = "./last.offset"
offsetSaveIntervalSeconds = 10
2018-08-20 05:58:24 +08:00
[notification.aws_sqs]
# experimental, let me know if it works
enabled = false
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
sqs_queue_name = "my_filer_queue" # an existing queue name
[notification.google_pub_sub]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
enabled = false
google_application_credentials = "/path/to/x.json" # path to json credential file
project_id = "" # an existing project id
topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists
[notification.gocdk_pub_sub]
# The Go Cloud Development Kit (https://gocloud.dev).
# PubSub API (https://godoc.org/gocloud.dev/pubsub).
# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
enabled = false
# This URL will Dial the RabbitMQ server at the URL in the environment
# variable RABBIT_SERVER_URL and open the exchange "myexchange".
# The exchange must have already been created by some other means, like
2021-01-27 15:45:58 +08:00
# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then
# create binding myexchange => myqueue
topic_url = "rabbit://myexchange"
2019-04-06 01:43:38 +08:00
sub_url = "rabbit://myqueue"
2018-09-17 15:27:56 +08:00
`
2018-09-17 15:27:56 +08:00
REPLICATION_TOML_EXAMPLE = `
2018-09-23 13:12:06 +08:00
# A sample TOML config file for replicating SeaweedFS filer
# Used with "weed filer.replicate"
# Put this file to one of the location, with descending priority
# ./replication.toml
# $HOME/.seaweedfs/replication.toml
# /etc/seaweedfs/replication.toml
2018-09-17 15:27:56 +08:00
[source.filer]
enabled = true
grpcAddress = "localhost:18888"
2018-12-05 18:03:03 +08:00
# all files under this directory tree are replicated.
# this is not a directory on your hard drive, but on your filer.
# i.e., all files with this "prefix" are sent to notification message queue.
2019-08-01 10:16:45 +08:00
directory = "/buckets"
2018-09-17 15:27:56 +08:00
2021-01-27 01:50:25 +08:00
[sink.local]
enabled = false
directory = "/data"
2021-03-01 08:19:03 +08:00
# all replicated files are under modified time as yyyy-mm-dd directories
# so each date directory contains all new and updated files.
is_incremental = false
2021-01-27 01:50:25 +08:00
2021-01-28 17:59:20 +08:00
[sink.local_incremental]
# all replicated files are under modified time as yyyy-mm-dd directories
# so each date directory contains all new and updated files.
enabled = false
directory = "/backup"
2018-09-17 15:27:56 +08:00
[sink.filer]
2018-10-04 14:36:52 +08:00
enabled = false
2018-09-17 15:27:56 +08:00
grpcAddress = "localhost:18888"
2018-12-05 18:03:03 +08:00
# all replicated files are under this directory tree
2019-08-01 10:16:45 +08:00
# this is not a directory on your hard drive, but on your filer.
2018-12-05 18:03:03 +08:00
# i.e., all received files will be "prefixed" to this directory.
2019-08-01 10:16:45 +08:00
directory = "/backup"
2018-09-22 15:53:52 +08:00
replication = ""
collection = ""
ttlSec = 0
2021-03-01 08:19:03 +08:00
is_incremental = false
2018-09-17 15:27:56 +08:00
2018-10-04 14:36:52 +08:00
[sink.s3]
# read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html
2019-08-01 10:16:45 +08:00
# default loads credentials from the shared credentials file (~/.aws/credentials).
2018-10-04 14:36:52 +08:00
enabled = false
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
2018-11-05 03:58:59 +08:00
directory = "/" # destination directory
2020-04-08 08:49:00 +08:00
endpoint = ""
2021-03-01 08:19:03 +08:00
is_incremental = false
2018-10-04 14:36:52 +08:00
2018-10-04 16:14:44 +08:00
[sink.google_cloud_storage]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
2018-10-04 16:14:44 +08:00
enabled = false
google_application_credentials = "/path/to/x.json" # path to json credential file
bucket = "your_bucket_seaweedfs" # an existing bucket
directory = "/" # destination directory
2021-03-01 08:19:03 +08:00
is_incremental = false
2018-10-04 16:14:44 +08:00
2018-10-09 16:35:48 +08:00
[sink.azure]
2018-10-24 14:59:40 +08:00
# experimental, let me know if it works
2018-10-09 16:35:48 +08:00
enabled = false
account_name = ""
account_key = ""
container = "mycontainer" # an existing container
2018-11-05 03:58:59 +08:00
directory = "/" # destination directory
2021-03-01 08:19:03 +08:00
is_incremental = false
2018-10-09 16:35:48 +08:00
2018-10-24 14:59:40 +08:00
[sink.backblaze]
enabled = false
2018-11-05 03:58:59 +08:00
b2_account_id = ""
b2_master_application_key = ""
2018-10-24 14:59:40 +08:00
bucket = "mybucket" # an existing bucket
2018-11-05 03:58:59 +08:00
directory = "/" # destination directory
2021-03-01 08:19:03 +08:00
is_incremental = false
2018-10-24 14:59:40 +08:00
2019-02-10 13:07:12 +08:00
`
SECURITY_TOML_EXAMPLE = `
# Put this file to one of the location, with descending priority
# ./security.toml
# $HOME/.seaweedfs/security.toml
# /etc/seaweedfs/security.toml
2019-02-10 13:07:12 +08:00
# this file is read by master, volume server, and filer
2019-05-04 23:42:25 +08:00
# the jwt signing key is read by master and volume server.
# a jwt defaults to expire after 10 seconds.
[jwt.signing]
key = ""
2019-05-04 23:42:25 +08:00
expires_after_seconds = 10 # seconds
2019-02-10 13:07:12 +08:00
2019-06-06 15:29:02 +08:00
# jwt for read is only supported with master+volume setup. Filer does not support this mode.
[jwt.signing.read]
key = ""
expires_after_seconds = 10 # seconds
2019-02-20 13:10:10 +08:00
# all grpc tls authentications are mutual
# the values for the following ca, cert, and key are paths to the PERM files.
2019-06-06 15:29:02 +08:00
# the host name is not checked, so the PERM files can be shared.
2019-02-19 04:11:52 +08:00
[grpc]
ca = ""
2021-03-10 17:42:39 +08:00
# Set wildcard domain for enable TLS authentication by common names
2021-03-10 17:02:13 +08:00
allowed_wildcard_domain = "" # .mycompany.com
2019-02-19 04:11:52 +08:00
[grpc.volume]
cert = ""
2019-02-20 13:10:10 +08:00
key = ""
2021-03-10 15:43:13 +08:00
allowed_commonNames = "" # comma-separated SSL certificate common names
2019-02-19 04:11:52 +08:00
[grpc.master]
cert = ""
2019-02-20 13:10:10 +08:00
key = ""
2021-03-10 15:43:13 +08:00
allowed_commonNames = "" # comma-separated SSL certificate common names
2019-02-19 04:11:52 +08:00
[grpc.filer]
cert = ""
2020-02-27 16:07:13 +08:00
key = ""
2021-03-10 15:43:13 +08:00
allowed_commonNames = "" # comma-separated SSL certificate common names
2020-02-27 16:07:13 +08:00
2020-03-04 16:39:47 +08:00
[grpc.msg_broker]
2020-02-27 16:07:13 +08:00
cert = ""
2019-02-20 13:10:10 +08:00
key = ""
2021-03-10 15:43:13 +08:00
allowed_commonNames = "" # comma-separated SSL certificate common names
2019-02-19 04:11:52 +08:00
# use this for any place needs a grpc client
# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
[grpc.client]
cert = ""
2019-02-20 13:10:10 +08:00
key = ""
2019-02-19 04:11:52 +08:00
2019-02-25 16:43:36 +08:00
# volume server https options
# Note: work in progress!
# this does not work with other clients, e.g., "weed filer|mount" etc, yet.
[https.client]
enabled = true
[https.volume]
cert = ""
key = ""
`
MASTER_TOML_EXAMPLE = `
# Put this file to one of the location, with descending priority
# ./master.toml
# $HOME/.seaweedfs/master.toml
# /etc/seaweedfs/master.toml
# this file is read by master
[master.maintenance]
2019-06-05 16:48:03 +08:00
# periodically run these scripts are the same as running them from 'weed shell'
scripts = """
lock
ec.encode -fullPercent=95 -quietFor=1h
ec.rebuild -force
ec.balance -force
volume.balance -force
volume.fix.replication
unlock
"""
2019-06-06 15:39:08 +08:00
sleep_minutes = 17 # sleep minutes between each script execution
[master.filer]
default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
2019-11-11 09:15:17 +08:00
[master.sequencer]
type = "raft" # Choose [raft|etcd|snowflake] type for storing the file id sequence
2019-11-11 09:15:17 +08:00
# when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
# example : http://127.0.0.1:2379,http://127.0.0.1:2389
2019-11-29 10:04:25 +08:00
sequencer_etcd_urls = "http://127.0.0.1:2379"
2019-11-11 09:15:17 +08:00
# configurations for tiered cloud storage
# old volumes are transparently moved to cloud for cost efficiency
[storage.backend]
[storage.backend.s3.default]
2019-11-29 10:47:51 +08:00
enabled = false
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
endpoint = ""
2019-11-19 11:24:34 +08:00
# create this number of logical volumes if no more writable volumes
2020-02-28 05:15:21 +08:00
# count_x means how many copies of data.
# e.g.:
2020-03-23 17:54:49 +08:00
# 000 has only one copy, copy_1
# 010 and 001 has two copies, copy_2
# 011 has only 3 copies, copy_3
[master.volume_growth]
copy_1 = 7 # create 1 x 7 = 7 actual volumes
copy_2 = 6 # create 2 x 6 = 12 actual volumes
copy_3 = 3 # create 3 x 3 = 9 actual volumes
copy_other = 1 # create n x 1 = n actual volumes
# configuration flags for replication
[master.replication]
# any replication counts should be considered minimums. If you specify 010 and
# have 3 different racks, that's still considered writable. Writes will still
# try to replicate to all available volumes. You should only use this option
# if you are doing your own replication or periodic sync of volumes.
treat_replication_as_minimums = false
`
SHELL_TOML_EXAMPLE = `
[cluster]
default = "c1"
[cluster.c1]
master = "localhost:9333" # comma-separated master servers
filer = "localhost:8888" # filer host and port
[cluster.c2]
master = ""
filer = ""
2018-08-20 06:36:30 +08:00
`
)