seaweedfs/weed/command/s3.go

273 lines
8.5 KiB
Go
Raw Normal View History

2018-07-18 17:37:09 +08:00
package command
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"google.golang.org/grpc/reflection"
2018-07-18 17:37:09 +08:00
"net/http"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/s3_pb"
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/gorilla/mux"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/s3api"
stats_collect "github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/util"
2018-07-18 17:37:09 +08:00
)
var (
s3StandaloneOptions S3Options
2018-07-18 17:37:09 +08:00
)
type S3Options struct {
2022-03-31 01:46:13 +08:00
filer *string
bindIp *string
port *int
portGrpc *int
2022-03-31 01:46:13 +08:00
config *string
domainName *string
tlsPrivateKey *string
tlsCertificate *string
metricsHttpPort *int
allowEmptyFolder *bool
allowDeleteBucketNotEmpty *bool
auditLogConfig *string
localFilerSocket *string
dataCenter *string
2018-07-18 17:37:09 +08:00
}
func init() {
cmdS3.Run = runS3 // break init cycle
s3StandaloneOptions.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address")
s3StandaloneOptions.bindIp = cmdS3.Flag.String("ip.bind", "", "ip address to bind to. Default to localhost.")
s3StandaloneOptions.port = cmdS3.Flag.Int("port", 8333, "s3 server http listen port")
s3StandaloneOptions.portGrpc = cmdS3.Flag.Int("port.grpc", 0, "s3 server grpc listen port")
2020-10-22 14:23:00 +08:00
s3StandaloneOptions.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name in comma separated list, {bucket}.{domainName}")
s3StandaloneOptions.dataCenter = cmdS3.Flag.String("dataCenter", "", "prefer to read and write to volumes in this data center")
2020-02-10 06:30:02 +08:00
s3StandaloneOptions.config = cmdS3.Flag.String("config", "", "path to the config file")
2021-12-07 21:20:52 +08:00
s3StandaloneOptions.auditLogConfig = cmdS3.Flag.String("auditLogConfig", "", "path to the audit log config file")
s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file")
s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file")
2020-09-24 20:45:39 +08:00
s3StandaloneOptions.metricsHttpPort = cmdS3.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
s3StandaloneOptions.allowEmptyFolder = cmdS3.Flag.Bool("allowEmptyFolder", true, "allow empty folders")
2022-03-31 01:46:13 +08:00
s3StandaloneOptions.allowDeleteBucketNotEmpty = cmdS3.Flag.Bool("allowDeleteBucketNotEmpty", true, "allow recursive deleting all entries along with bucket")
s3StandaloneOptions.localFilerSocket = cmdS3.Flag.String("localFilerSocket", "", "local filer socket path")
2018-07-18 17:37:09 +08:00
}
var cmdS3 = &Command{
2020-02-10 06:30:02 +08:00
UsageLine: "s3 [-port=8333] [-filer=<ip:port>] [-config=</path/to/config.json>]",
2018-07-18 17:37:09 +08:00
Short: "start a s3 API compatible server that is backed by a filer",
Long: `start a s3 API compatible server that is backed by a filer.
2020-02-10 06:30:02 +08:00
By default, you can use any access key and secret key to access the S3 APIs.
To enable credential based access, create a config.json file similar to this:
{
"identities": [
{
2020-10-09 01:11:59 +08:00
"name": "anonymous",
"actions": [
"Read"
]
},
{
"name": "some_admin_user",
2020-02-10 06:30:02 +08:00
"credentials": [
{
"accessKey": "some_access_key1",
2020-02-10 08:02:05 +08:00
"secretKey": "some_secret_key1"
2020-02-10 06:30:02 +08:00
}
],
"actions": [
"Admin",
"Read",
2020-10-09 01:11:59 +08:00
"List",
"Tagging",
2020-02-10 06:30:02 +08:00
"Write"
]
},
{
"name": "some_read_only_user",
"credentials": [
{
2020-02-10 08:02:05 +08:00
"accessKey": "some_access_key2",
"secretKey": "some_secret_key2"
2020-02-10 06:30:02 +08:00
}
],
"actions": [
"Read"
]
},
{
"name": "some_normal_user",
"credentials": [
{
2020-02-10 08:02:05 +08:00
"accessKey": "some_access_key3",
"secretKey": "some_secret_key3"
2020-02-10 06:30:02 +08:00
}
],
"actions": [
"Read",
2020-10-09 01:11:59 +08:00
"List",
"Tagging",
2020-02-10 06:30:02 +08:00
"Write"
]
2020-02-23 13:34:18 +08:00
},
{
"name": "user_limited_to_bucket1",
"credentials": [
{
"accessKey": "some_access_key4",
"secretKey": "some_secret_key4"
}
],
"actions": [
"Read:bucket1",
2020-10-09 01:11:59 +08:00
"List:bucket1",
"Tagging:bucket1",
2020-02-23 13:34:18 +08:00
"Write:bucket1"
]
2020-02-10 06:30:02 +08:00
}
]
}
2018-07-18 17:37:09 +08:00
`,
}
func runS3(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
2019-02-19 04:11:52 +08:00
go stats_collect.StartMetricsServer(*s3StandaloneOptions.bindIp, *s3StandaloneOptions.metricsHttpPort)
2020-09-25 01:21:23 +08:00
return s3StandaloneOptions.startS3Server()
}
func (s3opt *S3Options) startS3Server() bool {
filerAddress := pb.ServerAddress(*s3opt.filer)
2018-07-18 17:37:09 +08:00
filerBucketsPath := "/buckets"
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
2020-09-17 21:56:15 +08:00
// metrics read from the filer
var metricsAddress string
var metricsIntervalSec int
2020-02-27 08:49:47 +08:00
for {
err := pb.WithGrpcFilerClient(false, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
2020-02-27 08:49:47 +08:00
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
return fmt.Errorf("get filer %s configuration: %v", filerAddress, err)
2020-02-27 08:49:47 +08:00
}
filerBucketsPath = resp.DirBuckets
2020-09-17 21:56:15 +08:00
metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec)
2020-02-27 08:49:47 +08:00
glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath)
return nil
})
if err != nil {
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerAddress.ToGrpcAddress())
2020-02-27 08:49:47 +08:00
time.Sleep(time.Second)
} else {
glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerAddress.ToGrpcAddress())
2020-02-27 08:49:47 +08:00
break
}
}
2020-09-25 01:21:23 +08:00
go stats_collect.LoopPushingMetric("s3", stats_collect.SourceName(uint32(*s3opt.port)), metricsAddress, metricsIntervalSec)
2018-07-18 17:37:09 +08:00
router := mux.NewRouter().SkipClean(true)
var localFilerSocket string
if s3opt.localFilerSocket != nil {
localFilerSocket = *s3opt.localFilerSocket
}
s3ApiServer, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{
2022-03-31 01:46:13 +08:00
Filer: filerAddress,
Port: *s3opt.port,
Config: *s3opt.config,
DomainName: *s3opt.domainName,
BucketsPath: filerBucketsPath,
GrpcDialOption: grpcDialOption,
AllowEmptyFolder: *s3opt.allowEmptyFolder,
AllowDeleteBucketNotEmpty: *s3opt.allowDeleteBucketNotEmpty,
LocalFilerSocket: localFilerSocket,
DataCenter: *s3opt.dataCenter,
2018-07-18 17:37:09 +08:00
})
if s3ApiServer_err != nil {
glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
}
2020-09-24 20:48:39 +08:00
2018-07-23 12:28:54 +08:00
httpS := &http.Server{Handler: router}
if *s3opt.portGrpc == 0 {
*s3opt.portGrpc = 10000 + *s3opt.port
}
if *s3opt.bindIp == "" {
*s3opt.bindIp = "localhost"
}
listenAddress := fmt.Sprintf("%s:%d", *s3opt.bindIp, *s3opt.port)
s3ApiListener, s3ApiLocalListener, err := util.NewIpAndLocalListeners(*s3opt.bindIp, *s3opt.port, time.Duration(10)*time.Second)
2018-07-23 12:28:54 +08:00
if err != nil {
glog.Fatalf("S3 API Server listener on %s error: %v", listenAddress, err)
2018-07-18 17:37:09 +08:00
}
2021-12-07 21:20:52 +08:00
if len(*s3opt.auditLogConfig) > 0 {
s3err.InitAuditLog(*s3opt.auditLogConfig)
2021-12-10 22:40:32 +08:00
if s3err.Logger != nil {
defer s3err.Logger.Close()
}
2021-12-07 21:20:52 +08:00
}
// starting grpc server
grpcPort := *s3opt.portGrpc
grpcL, grpcLocalL, err := util.NewIpAndLocalListeners(*s3opt.bindIp, grpcPort, 0)
if err != nil {
glog.Fatalf("s3 failed to listen on grpc port %d: %v", grpcPort, err)
}
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.s3"))
s3_pb.RegisterSeaweedS3Server(grpcS, s3ApiServer)
reflection.Register(grpcS)
if grpcLocalL != nil {
go grpcS.Serve(grpcLocalL)
}
go grpcS.Serve(grpcL)
if *s3opt.tlsPrivateKey != "" {
2020-06-02 15:10:35 +08:00
glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port)
if s3ApiLocalListener != nil {
go func() {
if err = httpS.ServeTLS(s3ApiLocalListener, *s3opt.tlsCertificate, *s3opt.tlsPrivateKey); err != nil {
glog.Fatalf("S3 API Server Fail to serve: %v", err)
}
}()
}
if err = httpS.ServeTLS(s3ApiListener, *s3opt.tlsCertificate, *s3opt.tlsPrivateKey); err != nil {
2018-07-23 12:28:54 +08:00
glog.Fatalf("S3 API Server Fail to serve: %v", err)
}
} else {
2020-06-02 15:10:35 +08:00
glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port)
if s3ApiLocalListener != nil {
go func() {
if err = httpS.Serve(s3ApiLocalListener); err != nil {
glog.Fatalf("S3 API Server Fail to serve: %v", err)
}
}()
}
2018-07-23 12:28:54 +08:00
if err = httpS.Serve(s3ApiListener); err != nil {
glog.Fatalf("S3 API Server Fail to serve: %v", err)
}
2018-07-18 17:37:09 +08:00
}
return true
}