2021-07-05 11:16:49 +08:00
# A sample TOML config file for SeaweedFS filer store
# Used with "weed filer" or "weed server -filer"
# Put this file to one of the location, with descending priority
# ./filer.toml
# $HOME/.seaweedfs/filer.toml
# /etc/seaweedfs/filer.toml
####################################################
# Customizable filer server options
####################################################
[ filer . options ]
# with http DELETE, by default the filer would check whether a folder is empty.
# recursive_delete will delete all sub folders and files, similar to "rm -Rf"
recursive_delete = false
####################################################
# The following are filer store options
####################################################
[ leveldb2 ]
# local on disk, mostly for simple single-machine setup, fairly scalable
# faster than previous leveldb, recommended.
enabled = true
dir = "./filerldb2" # directory to store level db files
[ leveldb3 ]
# similar to leveldb2.
# each bucket has its own meta store.
enabled = false
dir = "./filerldb3" # directory to store level db files
[ rocksdb ]
# local on disk, similar to leveldb
# since it is using a C wrapper, you need to install rocksdb and build it by yourself
enabled = false
dir = "./filerrdb" # directory to store rocksdb files
[ sqlite ]
# local on disk, similar to leveldb
enabled = false
dbFile = "./filer.db" # sqlite db file
[ mysql ] # or memsql, tidb
2023-01-01 21:07:53 +08:00
# CREATE TABLE IF NOT EXISTS `filemeta` (
# `dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field',
# `name` VARCHAR(766) NOT NULL COMMENT 'directory or file name',
# `directory` TEXT NOT NULL COMMENT 'full path to parent directory',
# `meta` LONGBLOB,
# PRIMARY KEY (`dirhash`, `name`)
# ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
2021-07-05 11:16:49 +08:00
enabled = false
hostname = "localhost"
port = 3306
username = "root"
password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
connection_max_lifetime_seconds = 0
interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
2023-01-01 21:06:57 +08:00
upsertQuery = "" "INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`" ""
2021-07-05 11:16:49 +08:00
[ mysql2 ] # or memsql, tidb
enabled = false
createTable = "" "
2021-11-12 14:28:28 +08:00
CREATE TABLE IF NOT EXISTS ` % s ` (
2023-01-01 21:07:53 +08:00
` dirhash ` BIGINT NOT NULL ,
` name ` VARCHAR ( 766 ) NOT NULL ,
` directory ` TEXT NOT NULL ,
` meta ` LONGBLOB ,
PRIMARY KEY ( ` dirhash ` , ` name ` )
) DEFAULT CHARSET = utf8mb4 COLLATE = utf8mb4_bin ;
2021-07-05 11:16:49 +08:00
"" "
hostname = "localhost"
port = 3306
username = "root"
password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
connection_max_lifetime_seconds = 0
interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
2023-01-01 21:06:57 +08:00
upsertQuery = "" "INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`" ""
2021-07-05 11:16:49 +08:00
[ postgres ] # or cockroachdb, YugabyteDB
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT,
# name VARCHAR(65535),
# directory VARCHAR(65535),
# meta bytea,
# PRIMARY KEY (dirhash, name)
# );
enabled = false
hostname = "localhost"
port = 5432
username = "postgres"
password = ""
database = "postgres" # create or use an existing database
schema = ""
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
connection_max_lifetime_seconds = 0
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = "" "INSERT INTO " % [ 1 ] s " (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE " % [ 1 ] s ".meta != EXCLUDED.meta" ""
[ postgres2 ]
enabled = false
createTable = "" "
CREATE TABLE IF NOT EXISTS "%s" (
dirhash BIGINT ,
name VARCHAR ( 65535 ) ,
directory VARCHAR ( 65535 ) ,
meta bytea ,
PRIMARY KEY ( dirhash , name )
) ;
"" "
hostname = "localhost"
port = 5432
username = "postgres"
password = ""
database = "postgres" # create or use an existing database
schema = ""
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
connection_max_lifetime_seconds = 0
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = "" "INSERT INTO " % [ 1 ] s " (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE " % [ 1 ] s ".meta != EXCLUDED.meta" ""
[ cassandra ]
# CREATE TABLE filemeta (
# directory varchar,
# name varchar,
# meta blob,
# PRIMARY KEY (directory, name)
# ) WITH CLUSTERING ORDER BY (name ASC);
enabled = false
keyspace = "seaweedfs"
hosts = [
"localhost:9042" ,
]
username = ""
password = ""
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = [ ]
# Name of the datacenter local to this filer, used as host selection fallback.
localDC = ""
2022-01-18 14:41:39 +08:00
# Gocql connection timeout, default: 600ms
connection_timeout_millisecond = 600
2021-07-05 11:16:49 +08:00
[ hbase ]
enabled = false
zkquorum = ""
table = "seaweedfs"
[ redis2 ]
enabled = false
address = "localhost:6379"
password = ""
database = 0
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = [ ]
2021-11-25 15:57:03 +08:00
[ redis2_sentinel ]
enabled = false
2021-11-25 19:20:02 +08:00
addresses = [ "172.22.12.7:26379" , "172.22.12.8:26379" , "172.22.12.9:26379" ]
2021-11-25 15:57:03 +08:00
masterName = "master"
username = ""
password = ""
database = 0
2021-07-05 11:16:49 +08:00
[ redis_cluster2 ]
enabled = false
addresses = [
"localhost:30001" ,
"localhost:30002" ,
"localhost:30003" ,
"localhost:30004" ,
"localhost:30005" ,
"localhost:30006" ,
]
password = ""
# allows reads from slave servers or the master, but all writes still go to the master
readOnly = false
# automatically use the closest Redis server for reads
routeByLatency = false
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = [ ]
2022-02-15 20:54:57 +08:00
[ redis_lua ]
enabled = false
address = "localhost:6379"
password = ""
database = 0
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = [ ]
[ redis_lua_sentinel ]
enabled = false
addresses = [ "172.22.12.7:26379" , "172.22.12.8:26379" , "172.22.12.9:26379" ]
masterName = "master"
username = ""
password = ""
database = 0
[ redis_lua_cluster ]
enabled = false
addresses = [
"localhost:30001" ,
"localhost:30002" ,
"localhost:30003" ,
"localhost:30004" ,
"localhost:30005" ,
"localhost:30006" ,
2021-07-05 11:16:49 +08:00
]
password = ""
# allows reads from slave servers or the master, but all writes still go to the master
readOnly = false
# automatically use the closest Redis server for reads
routeByLatency = false
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = [ ]
2021-10-04 16:04:27 +08:00
[ redis3 ] # beta
enabled = false
address = "localhost:6379"
password = ""
database = 0
2021-11-29 17:09:51 +08:00
[ redis3_sentinel ]
enabled = false
addresses = [ "172.22.12.7:26379" , "172.22.12.8:26379" , "172.22.12.9:26379" ]
masterName = "master"
username = ""
password = ""
database = 0
2021-10-04 16:04:27 +08:00
[ redis_cluster3 ] # beta
enabled = false
addresses = [
"localhost:30001" ,
"localhost:30002" ,
"localhost:30003" ,
"localhost:30004" ,
"localhost:30005" ,
"localhost:30006" ,
]
password = ""
# allows reads from slave servers or the master, but all writes still go to the master
readOnly = false
# automatically use the closest Redis server for reads
routeByLatency = false
2021-07-05 11:16:49 +08:00
[ etcd ]
enabled = false
servers = "localhost:2379"
timeout = "3s"
[ mongodb ]
enabled = false
uri = "mongodb://localhost:27017"
option_pool_size = 0
database = "seaweedfs"
[ elastic7 ]
enabled = false
servers = [
"http://localhost1:9200" ,
"http://localhost2:9200" ,
"http://localhost3:9200" ,
]
username = ""
password = ""
sniff_enabled = false
healthcheck_enabled = false
# increase the value is recommend, be sure the value in Elastic is greater or equal here
index . max_result_window = 10000
2022-03-17 17:49:26 +08:00
[ arangodb ] # in development dont use it
enabled = false
2022-03-18 10:12:25 +08:00
db_name = "seaweedfs"
servers = [ "http://localhost:8529" ] # list of servers to connect to
# only basic auth supported for now
2022-04-02 03:09:25 +08:00
username = ""
password = ""
2022-03-18 10:12:25 +08:00
# skip tls cert validation
insecure_skip_verify = true
2022-03-17 17:49:26 +08:00
2022-05-04 01:54:31 +08:00
[ ydb ] # https://ydb.tech/
2022-05-02 05:07:47 +08:00
enabled = false
2022-05-04 01:54:31 +08:00
dsn = "grpc://localhost:2136?database=/local"
prefix = "seaweedfs"
useBucketPrefix = true # Fast Bucket Deletion
poolSizeLimit = 50
dialTimeOut = 10
2022-05-02 05:07:47 +08:00
# Authenticate produced with one of next environment variables:
# YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS=<path/to/sa_key_file> — used service account key file by path
# YDB_ANONYMOUS_CREDENTIALS="1" — used for authenticate with anonymous access. Anonymous access needs for connect to testing YDB installation
# YDB_METADATA_CREDENTIALS="1" — used metadata service for authenticate to YDB from yandex cloud virtual machine or from yandex function
# YDB_ACCESS_TOKEN_CREDENTIALS=<access_token> — used for authenticate to YDB with short-life access token. For example, access token may be IAM token
2021-07-05 11:16:49 +08:00
##########################
##########################
# To add path-specific filer store:
#
# 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp
2022-09-15 16:33:04 +08:00
# 2. Add a location configuration. E.g., location = "/tmp/"
2021-07-05 11:16:49 +08:00
# 3. Copy and customize all other configurations.
# Make sure they are not the same if using the same store type!
# 4. Set enabled to true
#
# The following is just using redis as an example
##########################
[ redis2 . tmp ]
enabled = false
location = "/tmp/"
address = "localhost:6379"
password = ""
database = 1
2021-08-26 16:26:27 +08:00
[ tikv ]
enabled = false
# If you have many pd address, use ',' split then:
# pdaddrs = "pdhost1:2379, pdhost2:2379, pdhost3:2379"
pdaddrs = "localhost:2379"
2021-08-26 18:25:08 +08:00
# Concurrency for TiKV delete range
deleterange_concurrency = 1
2022-06-26 22:43:37 +08:00
# Enable 1PC
enable_1pc = false
2022-07-08 14:23:06 +08:00
# Set the CA certificate path
ca_path = ""
# Set the certificate path
cert_path = ""
# Set the private key path
key_path = ""
# The name list used to verify the cn name
verify_cn = ""