mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-11-24 02:59:13 +08:00
Merge branch 'master' into metadata_follow_with_client_id
This commit is contained in:
commit
42c849e0df
2
test/s3/compatibility/.gitignore
vendored
Normal file
2
test/s3/compatibility/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
/s3-tests
|
||||
/tmp
|
11
test/s3/compatibility/Dockerfile
Normal file
11
test/s3/compatibility/Dockerfile
Normal file
@ -0,0 +1,11 @@
|
||||
# the tests only support python 3.6, not newer
|
||||
FROM ubuntu:latest
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get install -y git-core sudo tzdata
|
||||
RUN git clone https://github.com/ceph/s3-tests.git
|
||||
WORKDIR s3-tests
|
||||
|
||||
# we pin a certain commit
|
||||
RUN git checkout 9a6a1e9f197fc9fb031b809d1e057635c2ff8d4e
|
||||
|
||||
RUN ./bootstrap
|
13
test/s3/compatibility/README.md
Normal file
13
test/s3/compatibility/README.md
Normal file
@ -0,0 +1,13 @@
|
||||
# Running S3 Compatibility tests against SeaweedFS
|
||||
|
||||
This is using [the tests from CephFS](https://github.com/ceph/s3-tests).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- have Docker installed
|
||||
- this has been executed on Mac. On Linux, the hostname in `s3tests.conf` needs to be adjusted.
|
||||
|
||||
## Running tests
|
||||
|
||||
- `./prepare.sh` to build the docker image
|
||||
- `./run.sh` to execute all tests
|
5
test/s3/compatibility/prepare.sh
Executable file
5
test/s3/compatibility/prepare.sh
Executable file
@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
|
||||
docker build --progress=plain -t s3tests .
|
24
test/s3/compatibility/run.sh
Executable file
24
test/s3/compatibility/run.sh
Executable file
@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
|
||||
killall -9 weed || echo "already stopped"
|
||||
rm -Rf tmp
|
||||
mkdir tmp
|
||||
docker stop s3test-instance || echo "already stopped"
|
||||
|
||||
ulimit -n 10000
|
||||
../../../weed/weed server -filer -s3 -volume.max 0 -master.volumeSizeLimitMB 5 -dir "$(pwd)/tmp" 1>&2>weed.log &
|
||||
|
||||
until $(curl --output /dev/null --silent --head --fail http://127.0.0.1:9333); do
|
||||
printf '.'
|
||||
sleep 5
|
||||
done
|
||||
sleep 3
|
||||
|
||||
rm -Rf logs-full.txt logs-summary.txt
|
||||
# docker run --name s3test-instance --rm -e S3TEST_CONF=s3tests.conf -v `pwd`/s3tests.conf:/s3-tests/s3tests.conf -it s3tests ./virtualenv/bin/nosetests s3tests_boto3/functional/test_s3.py:test_get_obj_tagging -v -a 'resource=object,!bucket-policy,!versioning,!encryption'
|
||||
docker run --name s3test-instance --rm -e S3TEST_CONF=s3tests.conf -v `pwd`/s3tests.conf:/s3-tests/s3tests.conf -it s3tests ./virtualenv/bin/nosetests s3tests_boto3/functional/test_s3.py -v -a 'resource=object,!bucket-policy,!versioning,!encryption' | sed -n -e '/botocore.hooks/!p;//q' | tee logs-summary.txt
|
||||
|
||||
docker stop s3test-instance || echo "already stopped"
|
||||
killall -9 weed
|
109
test/s3/compatibility/s3tests.conf
Normal file
109
test/s3/compatibility/s3tests.conf
Normal file
@ -0,0 +1,109 @@
|
||||
[DEFAULT]
|
||||
## this section is just used for host, port and bucket_prefix
|
||||
|
||||
# host set for rgw in vstart.sh
|
||||
host = host.docker.internal
|
||||
|
||||
# port set for rgw in vstart.sh
|
||||
port = 8333
|
||||
|
||||
## say "False" to disable TLS
|
||||
is_secure = False
|
||||
|
||||
## say "False" to disable SSL Verify
|
||||
ssl_verify = False
|
||||
|
||||
[fixtures]
|
||||
## all the buckets created will start with this prefix;
|
||||
## {random} will be filled with random characters to pad
|
||||
## the prefix to 30 characters long, and avoid collisions
|
||||
bucket prefix = yournamehere-{random}-
|
||||
|
||||
[s3 main]
|
||||
# main display_name set in vstart.sh
|
||||
display_name = M. Tester
|
||||
|
||||
# main user_idname set in vstart.sh
|
||||
user_id = testid
|
||||
|
||||
# main email set in vstart.sh
|
||||
email = tester@ceph.com
|
||||
|
||||
# zonegroup api_name for bucket location
|
||||
api_name = default
|
||||
|
||||
## main AWS access key
|
||||
access_key = 0555b35654ad1656d804
|
||||
|
||||
## main AWS secret key
|
||||
secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
|
||||
|
||||
## replace with key id obtained when secret is created, or delete if KMS not tested
|
||||
#kms_keyid = 01234567-89ab-cdef-0123-456789abcdef
|
||||
|
||||
[s3 alt]
|
||||
# alt display_name set in vstart.sh
|
||||
display_name = john.doe
|
||||
## alt email set in vstart.sh
|
||||
email = john.doe@example.com
|
||||
|
||||
# alt user_id set in vstart.sh
|
||||
user_id = 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234
|
||||
|
||||
# alt AWS access key set in vstart.sh
|
||||
access_key = NOPQRSTUVWXYZABCDEFG
|
||||
|
||||
# alt AWS secret key set in vstart.sh
|
||||
secret_key = nopqrstuvwxyzabcdefghijklmnabcdefghijklm
|
||||
|
||||
[s3 tenant]
|
||||
# tenant display_name set in vstart.sh
|
||||
display_name = testx$tenanteduser
|
||||
|
||||
# tenant user_id set in vstart.sh
|
||||
user_id = 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef
|
||||
|
||||
# tenant AWS secret key set in vstart.sh
|
||||
access_key = HIJKLMNOPQRSTUVWXYZA
|
||||
|
||||
# tenant AWS secret key set in vstart.sh
|
||||
secret_key = opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab
|
||||
|
||||
# tenant email set in vstart.sh
|
||||
email = tenanteduser@example.com
|
||||
|
||||
#following section needs to be added for all sts-tests
|
||||
[iam]
|
||||
#used for iam operations in sts-tests
|
||||
#email from vstart.sh
|
||||
email = s3@example.com
|
||||
|
||||
#user_id from vstart.sh
|
||||
user_id = 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef
|
||||
|
||||
#access_key from vstart.sh
|
||||
access_key = ABCDEFGHIJKLMNOPQRST
|
||||
|
||||
#secret_key vstart.sh
|
||||
secret_key = abcdefghijklmnopqrstuvwxyzabcdefghijklmn
|
||||
|
||||
#display_name from vstart.sh
|
||||
display_name = youruseridhere
|
||||
|
||||
#following section needs to be added when you want to run Assume Role With Webidentity test
|
||||
[webidentity]
|
||||
#used for assume role with web identity test in sts-tests
|
||||
#all parameters will be obtained from ceph/qa/tasks/keycloak.py
|
||||
token=<access_token>
|
||||
|
||||
aud=<obtained after introspecting token>
|
||||
|
||||
sub=<obtained after introspecting token>
|
||||
|
||||
azp=<obtained after introspecting token>
|
||||
|
||||
user_token=<access token for a user, with attribute Department=[Engineering, Marketing>]
|
||||
|
||||
thumbprint=<obtained from x509 certificate>
|
||||
|
||||
KC_REALM=<name of the realm>
|
@ -4,24 +4,46 @@
|
||||
# /etc/seaweedfs/security.toml
|
||||
# this file is read by master, volume server, and filer
|
||||
|
||||
# the jwt signing key is read by master and volume server.
|
||||
# a jwt defaults to expire after 10 seconds.
|
||||
# this jwt signing key is read by master and volume server, and it is used for write operations:
|
||||
# - the Master server generates the JWT, which can be used to write a certain file on a volume server
|
||||
# - the Volume server validates the JWT on writing
|
||||
# the jwt defaults to expire after 10 seconds.
|
||||
[jwt.signing]
|
||||
key = ""
|
||||
expires_after_seconds = 10 # seconds
|
||||
|
||||
# by default, if the signing key above is set, the Volume UI over HTTP is disabled.
|
||||
# by setting ui.access to true, you can re-enable the Volume UI. Despite
|
||||
# some information leakage (as the UI is unauthenticted), this should not
|
||||
# some information leakage (as the UI is not authenticated), this should not
|
||||
# pose a security risk.
|
||||
[access]
|
||||
ui = false
|
||||
|
||||
# jwt for read is only supported with master+volume setup. Filer does not support this mode.
|
||||
# this jwt signing key is read by master and volume server, and it is used for read operations:
|
||||
# - the Master server generates the JWT, which can be used to read a certain file on a volume server
|
||||
# - the Volume server validates the JWT on reading
|
||||
# NOTE: jwt for read is only supported with master+volume setup. Filer does not support this mode.
|
||||
[jwt.signing.read]
|
||||
key = ""
|
||||
expires_after_seconds = 10 # seconds
|
||||
|
||||
|
||||
# If this JWT key is configured, Filer only accepts writes over HTTP if they are signed with this JWT:
|
||||
# - f.e. the S3 API Shim generates the JWT
|
||||
# - the Filer server validates the JWT on writing
|
||||
# the jwt defaults to expire after 10 seconds.
|
||||
[jwt.filer_signing]
|
||||
key = ""
|
||||
expires_after_seconds = 10 # seconds
|
||||
|
||||
# If this JWT key is configured, Filer only accepts reads over HTTP if they are signed with this JWT:
|
||||
# - f.e. the S3 API Shim generates the JWT
|
||||
# - the Filer server validates the JWT on writing
|
||||
# the jwt defaults to expire after 10 seconds.
|
||||
[jwt.filer_signing.read]
|
||||
key = ""
|
||||
expires_after_seconds = 10 # seconds
|
||||
|
||||
# all grpc tls authentications are mutual
|
||||
# the values for the following ca, cert, and key are paths to the PERM files.
|
||||
# the host name is not checked, so the PERM files can be shared.
|
||||
|
@ -25,7 +25,7 @@ type MetaAggregator struct {
|
||||
isLeader bool
|
||||
grpcDialOption grpc.DialOption
|
||||
MetaLogBuffer *log_buffer.LogBuffer
|
||||
peerStatues map[pb.ServerAddress]struct{}
|
||||
peerStatues map[pb.ServerAddress]int
|
||||
peerStatuesLock sync.Mutex
|
||||
// notifying clients
|
||||
ListenersLock sync.Mutex
|
||||
@ -39,7 +39,7 @@ func NewMetaAggregator(filer *Filer, self pb.ServerAddress, grpcDialOption grpc.
|
||||
filer: filer,
|
||||
self: self,
|
||||
grpcDialOption: grpcDialOption,
|
||||
peerStatues: make(map[pb.ServerAddress]struct{}),
|
||||
peerStatues: make(map[pb.ServerAddress]int),
|
||||
}
|
||||
t.ListenersCond = sync.NewCond(&t.ListenersLock)
|
||||
t.MetaLogBuffer = log_buffer.NewLogBuffer("aggr", LogFlushInterval, nil, func() {
|
||||
@ -56,27 +56,40 @@ func (ma *MetaAggregator) OnPeerUpdate(update *master_pb.ClusterNodeUpdate) {
|
||||
address := pb.ServerAddress(update.Address)
|
||||
if update.IsAdd {
|
||||
// every filer should subscribe to a new filer
|
||||
ma.setActive(address, true)
|
||||
go ma.subscribeToOneFiler(ma.filer, ma.self, address)
|
||||
if ma.setActive(address, true) {
|
||||
go ma.subscribeToOneFiler(ma.filer, ma.self, address)
|
||||
}
|
||||
} else {
|
||||
ma.setActive(address, false)
|
||||
}
|
||||
}
|
||||
|
||||
func (ma *MetaAggregator) setActive(address pb.ServerAddress, isActive bool) {
|
||||
func (ma *MetaAggregator) setActive(address pb.ServerAddress, isActive bool) (notDuplicated bool) {
|
||||
ma.peerStatuesLock.Lock()
|
||||
defer ma.peerStatuesLock.Unlock()
|
||||
if isActive {
|
||||
ma.peerStatues[address] = struct{}{}
|
||||
if _, found := ma.peerStatues[address]; found {
|
||||
ma.peerStatues[address] += 1
|
||||
} else {
|
||||
ma.peerStatues[address] = 1
|
||||
notDuplicated = true
|
||||
}
|
||||
} else {
|
||||
delete(ma.peerStatues, address)
|
||||
if _, found := ma.peerStatues[address]; found {
|
||||
ma.peerStatues[address] -= 1
|
||||
}
|
||||
if ma.peerStatues[address] <= 0 {
|
||||
delete(ma.peerStatues, address)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
func (ma *MetaAggregator) isActive(address pb.ServerAddress) (isActive bool) {
|
||||
ma.peerStatuesLock.Lock()
|
||||
defer ma.peerStatuesLock.Unlock()
|
||||
_, isActive = ma.peerStatues[address]
|
||||
return
|
||||
var count int
|
||||
count, isActive = ma.peerStatues[address]
|
||||
return count > 0 && isActive
|
||||
}
|
||||
|
||||
func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress) {
|
||||
|
@ -74,7 +74,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
|
||||
srcUrl := fmt.Sprintf("http://%s%s/%s%s",
|
||||
s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, srcBucket, urlPathEscape(srcObject))
|
||||
|
||||
_, _, resp, err := util.DownloadFile(srcUrl, "")
|
||||
_, _, resp, err := util.DownloadFile(srcUrl, s3a.maybeGetFilerJwtAuthorizationToken(false))
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
|
||||
return
|
||||
@ -157,7 +157,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
|
||||
srcUrl := fmt.Sprintf("http://%s%s/%s%s",
|
||||
s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, srcBucket, urlPathEscape(srcObject))
|
||||
|
||||
dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, rangeHeader)
|
||||
dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, s3a.maybeGetFilerJwtAuthorizationToken(false), rangeHeader)
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
|
||||
return
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@ -143,7 +144,7 @@ func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request)
|
||||
destUrl := fmt.Sprintf("http://%s%s/%s%s",
|
||||
s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, urlPathEscape(object))
|
||||
|
||||
s3a.proxyToFiler(w, r, destUrl, passThroughResponse)
|
||||
s3a.proxyToFiler(w, r, destUrl, false, passThroughResponse)
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
@ -154,7 +155,7 @@ func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request
|
||||
destUrl := fmt.Sprintf("http://%s%s/%s%s",
|
||||
s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, urlPathEscape(object))
|
||||
|
||||
s3a.proxyToFiler(w, r, destUrl, passThroughResponse)
|
||||
s3a.proxyToFiler(w, r, destUrl, false, passThroughResponse)
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
@ -165,7 +166,7 @@ func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Reque
|
||||
destUrl := fmt.Sprintf("http://%s%s/%s%s?recursive=true",
|
||||
s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, urlPathEscape(object))
|
||||
|
||||
s3a.proxyToFiler(w, r, destUrl, func(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int) {
|
||||
s3a.proxyToFiler(w, r, destUrl, true, func(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int) {
|
||||
statusCode = http.StatusNoContent
|
||||
for k, v := range proxyResponse.Header {
|
||||
w.Header()[k] = v
|
||||
@ -306,7 +307,7 @@ func (s3a *S3ApiServer) doDeleteEmptyDirectories(client filer_pb.SeaweedFilerCli
|
||||
return
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int)) {
|
||||
func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, isWrite bool, responseFn func(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int)) {
|
||||
|
||||
glog.V(3).Infof("s3 proxying %s to %s", r.Method, destUrl)
|
||||
|
||||
@ -328,6 +329,9 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
|
||||
proxyReq.Header[header] = values
|
||||
}
|
||||
|
||||
// ensure that the Authorization header is overriding any previous
|
||||
// Authorization header which might be already present in proxyReq
|
||||
s3a.maybeAddFilerJwtAuthorization(proxyReq, isWrite)
|
||||
resp, postErr := client.Do(proxyReq)
|
||||
|
||||
if postErr != nil {
|
||||
@ -389,7 +393,9 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
|
||||
proxyReq.Header.Add(header, value)
|
||||
}
|
||||
}
|
||||
|
||||
// ensure that the Authorization header is overriding any previous
|
||||
// Authorization header which might be already present in proxyReq
|
||||
s3a.maybeAddFilerJwtAuthorization(proxyReq, true)
|
||||
resp, postErr := client.Do(proxyReq)
|
||||
|
||||
if postErr != nil {
|
||||
@ -435,3 +441,23 @@ func filerErrorToS3Error(errString string) s3err.ErrorCode {
|
||||
}
|
||||
return s3err.ErrInternalError
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) maybeAddFilerJwtAuthorization(r *http.Request, isWrite bool) {
|
||||
encodedJwt := s3a.maybeGetFilerJwtAuthorizationToken(isWrite)
|
||||
|
||||
if encodedJwt == "" {
|
||||
return
|
||||
}
|
||||
|
||||
r.Header.Set("Authorization", "BEARER "+string(encodedJwt))
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) maybeGetFilerJwtAuthorizationToken(isWrite bool) string {
|
||||
var encodedJwt security.EncodedJwt
|
||||
if isWrite {
|
||||
encodedJwt = security.GenJwtForFilerServer(s3a.filerGuard.SigningKey, s3a.filerGuard.ExpiresAfterSec)
|
||||
} else {
|
||||
encodedJwt = security.GenJwtForFilerServer(s3a.filerGuard.ReadSigningKey, s3a.filerGuard.ReadExpiresAfterSec)
|
||||
}
|
||||
return string(encodedJwt)
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package s3api
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"net/http"
|
||||
"strings"
|
||||
@ -29,13 +30,24 @@ type S3ApiServer struct {
|
||||
option *S3ApiServerOption
|
||||
iam *IdentityAccessManagement
|
||||
randomClientId int32
|
||||
filerGuard *security.Guard
|
||||
}
|
||||
|
||||
func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer *S3ApiServer, err error) {
|
||||
v := util.GetViper()
|
||||
signingKey := v.GetString("jwt.filer_signing.key")
|
||||
v.SetDefault("jwt.filer_signing.expires_after_seconds", 10)
|
||||
expiresAfterSec := v.GetInt("jwt.filer_signing.expires_after_seconds")
|
||||
|
||||
readSigningKey := v.GetString("jwt.filer_signing.read.key")
|
||||
v.SetDefault("jwt.filer_signing.read.expires_after_seconds", 60)
|
||||
readExpiresAfterSec := v.GetInt("jwt.filer_signing.read.expires_after_seconds")
|
||||
|
||||
s3ApiServer = &S3ApiServer{
|
||||
option: option,
|
||||
iam: NewIdentityAccessManagement(option),
|
||||
randomClientId: util.RandomInt32(),
|
||||
filerGuard: security.NewGuard([]string{}, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec),
|
||||
}
|
||||
|
||||
s3ApiServer.registerRouter(router)
|
||||
|
@ -123,5 +123,5 @@ func (g *Guard) checkWhiteList(w http.ResponseWriter, r *http.Request) error {
|
||||
}
|
||||
|
||||
glog.V(0).Infof("Not in whitelist: %s", r.RemoteAddr)
|
||||
return fmt.Errorf("Not in whitelis: %s", r.RemoteAddr)
|
||||
return fmt.Errorf("Not in whitelist: %s", r.RemoteAddr)
|
||||
}
|
||||
|
@ -13,12 +13,21 @@ import (
|
||||
type EncodedJwt string
|
||||
type SigningKey []byte
|
||||
|
||||
// SeaweedFileIdClaims is created by Master server(s) and consumed by Volume server(s),
|
||||
// restricting the access this JWT allows to only a single file.
|
||||
type SeaweedFileIdClaims struct {
|
||||
Fid string `json:"fid"`
|
||||
jwt.StandardClaims
|
||||
}
|
||||
|
||||
func GenJwt(signingKey SigningKey, expiresAfterSec int, fileId string) EncodedJwt {
|
||||
// SeaweedFilerClaims is created e.g. by S3 proxy server and consumed by Filer server.
|
||||
// Right now, it only contains the standard claims; but this might be extended later
|
||||
// for more fine-grained permissions.
|
||||
type SeaweedFilerClaims struct {
|
||||
jwt.StandardClaims
|
||||
}
|
||||
|
||||
func GenJwtForVolumeServer(signingKey SigningKey, expiresAfterSec int, fileId string) EncodedJwt {
|
||||
if len(signingKey) == 0 {
|
||||
return ""
|
||||
}
|
||||
@ -39,6 +48,28 @@ func GenJwt(signingKey SigningKey, expiresAfterSec int, fileId string) EncodedJw
|
||||
return EncodedJwt(encoded)
|
||||
}
|
||||
|
||||
// GenJwtForFilerServer creates a JSON-web-token for using the authenticated Filer API. Used f.e. inside
|
||||
// the S3 API
|
||||
func GenJwtForFilerServer(signingKey SigningKey, expiresAfterSec int) EncodedJwt {
|
||||
if len(signingKey) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
claims := SeaweedFilerClaims{
|
||||
jwt.StandardClaims{},
|
||||
}
|
||||
if expiresAfterSec > 0 {
|
||||
claims.ExpiresAt = time.Now().Add(time.Second * time.Duration(expiresAfterSec)).Unix()
|
||||
}
|
||||
t := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
|
||||
encoded, e := t.SignedString([]byte(signingKey))
|
||||
if e != nil {
|
||||
glog.V(0).Infof("Failed to sign claims %+v: %v", t.Claims, e)
|
||||
return ""
|
||||
}
|
||||
return EncodedJwt(encoded)
|
||||
}
|
||||
|
||||
func GetJwt(r *http.Request) EncodedJwt {
|
||||
|
||||
// Get token from query params
|
||||
@ -55,9 +86,9 @@ func GetJwt(r *http.Request) EncodedJwt {
|
||||
return EncodedJwt(tokenStr)
|
||||
}
|
||||
|
||||
func DecodeJwt(signingKey SigningKey, tokenString EncodedJwt) (token *jwt.Token, err error) {
|
||||
func DecodeJwt(signingKey SigningKey, tokenString EncodedJwt, claims jwt.Claims) (token *jwt.Token, err error) {
|
||||
// check exp, nbf
|
||||
return jwt.ParseWithClaims(string(tokenString), &SeaweedFileIdClaims{}, func(token *jwt.Token) (interface{}, error) {
|
||||
return jwt.ParseWithClaims(string(tokenString), claims, func(token *jwt.Token) (interface{}, error) {
|
||||
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
|
||||
return nil, fmt.Errorf("unknown token method")
|
||||
}
|
||||
|
@ -71,6 +71,7 @@ type FilerServer struct {
|
||||
option *FilerOption
|
||||
secret security.SigningKey
|
||||
filer *filer.Filer
|
||||
filerGuard *security.Guard
|
||||
grpcDialOption grpc.DialOption
|
||||
|
||||
// metrics read from the master
|
||||
@ -94,6 +95,15 @@ type FilerServer struct {
|
||||
|
||||
func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) (fs *FilerServer, err error) {
|
||||
|
||||
v := util.GetViper()
|
||||
signingKey := v.GetString("jwt.filer_signing.key")
|
||||
v.SetDefault("jwt.filer_signing.expires_after_seconds", 10)
|
||||
expiresAfterSec := v.GetInt("jwt.filer_signing.expires_after_seconds")
|
||||
|
||||
readSigningKey := v.GetString("jwt.filer_signing.read.key")
|
||||
v.SetDefault("jwt.filer_signing.read.expires_after_seconds", 60)
|
||||
readExpiresAfterSec := v.GetInt("jwt.filer_signing.read.expires_after_seconds")
|
||||
|
||||
fs = &FilerServer{
|
||||
option: option,
|
||||
grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"),
|
||||
@ -111,13 +121,14 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
|
||||
fs.listenersCond.Broadcast()
|
||||
})
|
||||
fs.filer.Cipher = option.Cipher
|
||||
// we do not support IP whitelist right now
|
||||
fs.filerGuard = security.NewGuard([]string{}, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec)
|
||||
|
||||
fs.checkWithMaster()
|
||||
|
||||
go stats.LoopPushingMetric("filer", string(fs.option.Host), fs.metricsAddress, fs.metricsIntervalSec)
|
||||
go fs.filer.KeepMasterClientConnected()
|
||||
|
||||
v := util.GetViper()
|
||||
if !util.LoadConfiguration("filer", false) {
|
||||
v.Set("leveldb2.enabled", true)
|
||||
v.Set("leveldb2.dir", option.DefaultLevelDbDir)
|
||||
|
@ -1,7 +1,9 @@
|
||||
package weed_server
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"net/http"
|
||||
"strings"
|
||||
@ -15,6 +17,19 @@ func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
start := time.Now()
|
||||
|
||||
if r.Method == "OPTIONS" {
|
||||
stats.FilerRequestCounter.WithLabelValues("options").Inc()
|
||||
OptionsHandler(w, r, false)
|
||||
stats.FilerRequestHistogram.WithLabelValues("options").Observe(time.Since(start).Seconds())
|
||||
return
|
||||
}
|
||||
|
||||
isReadHttpCall := r.Method == "GET" || r.Method == "HEAD"
|
||||
if !fs.maybeCheckJwtAuthorization(r, !isReadHttpCall) {
|
||||
writeJsonError(w, r, http.StatusUnauthorized, errors.New("wrong jwt"))
|
||||
return
|
||||
}
|
||||
|
||||
// proxy to volume servers
|
||||
var fileId string
|
||||
if strings.HasPrefix(r.RequestURI, "/?proxyChunkId=") {
|
||||
@ -78,20 +93,31 @@ func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) {
|
||||
fs.PostHandler(w, r, contentLength)
|
||||
stats.FilerRequestHistogram.WithLabelValues("post").Observe(time.Since(start).Seconds())
|
||||
}
|
||||
case "OPTIONS":
|
||||
stats.FilerRequestCounter.WithLabelValues("options").Inc()
|
||||
OptionsHandler(w, r, false)
|
||||
stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *FilerServer) readonlyFilerHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
start := time.Now()
|
||||
|
||||
// We handle OPTIONS first because it never should be authenticated
|
||||
if r.Method == "OPTIONS" {
|
||||
stats.FilerRequestCounter.WithLabelValues("options").Inc()
|
||||
OptionsHandler(w, r, true)
|
||||
stats.FilerRequestHistogram.WithLabelValues("options").Observe(time.Since(start).Seconds())
|
||||
return
|
||||
}
|
||||
|
||||
if !fs.maybeCheckJwtAuthorization(r, false) {
|
||||
writeJsonError(w, r, http.StatusUnauthorized, errors.New("wrong jwt"))
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Server", "SeaweedFS Filer "+util.VERSION)
|
||||
if r.Header.Get("Origin") != "" {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Credentials", "true")
|
||||
}
|
||||
start := time.Now()
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
stats.FilerRequestCounter.WithLabelValues("get").Inc()
|
||||
@ -101,10 +127,6 @@ func (fs *FilerServer) readonlyFilerHandler(w http.ResponseWriter, r *http.Reque
|
||||
stats.FilerRequestCounter.WithLabelValues("head").Inc()
|
||||
fs.GetOrHeadHandler(w, r)
|
||||
stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds())
|
||||
case "OPTIONS":
|
||||
stats.FilerRequestCounter.WithLabelValues("options").Inc()
|
||||
OptionsHandler(w, r, true)
|
||||
stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,3 +138,41 @@ func OptionsHandler(w http.ResponseWriter, r *http.Request, isReadOnly bool) {
|
||||
}
|
||||
w.Header().Add("Access-Control-Allow-Headers", "*")
|
||||
}
|
||||
|
||||
// maybeCheckJwtAuthorization returns true if access should be granted, false if it should be denied
|
||||
func (fs *FilerServer) maybeCheckJwtAuthorization(r *http.Request, isWrite bool) bool {
|
||||
|
||||
var signingKey security.SigningKey
|
||||
|
||||
if isWrite {
|
||||
if len(fs.filerGuard.SigningKey) == 0 {
|
||||
return true
|
||||
} else {
|
||||
signingKey = fs.filerGuard.SigningKey
|
||||
}
|
||||
} else {
|
||||
if len(fs.filerGuard.ReadSigningKey) == 0 {
|
||||
return true
|
||||
} else {
|
||||
signingKey = fs.filerGuard.ReadSigningKey
|
||||
}
|
||||
}
|
||||
|
||||
tokenStr := security.GetJwt(r)
|
||||
if tokenStr == "" {
|
||||
glog.V(1).Infof("missing jwt from %s", r.RemoteAddr)
|
||||
return false
|
||||
}
|
||||
|
||||
token, err := security.DecodeJwt(signingKey, tokenStr, &security.SeaweedFilerClaims{})
|
||||
if err != nil {
|
||||
glog.V(1).Infof("jwt verification error from %s: %v", r.RemoteAddr, err)
|
||||
return false
|
||||
}
|
||||
if !token.Valid {
|
||||
glog.V(1).Infof("jwt invalid from %s: %v", r.RemoteAddr, tokenStr)
|
||||
return false
|
||||
} else {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ func (ms *MasterServer) LookupVolume(ctx context.Context, req *master_pb.LookupV
|
||||
}
|
||||
var auth string
|
||||
if strings.Contains(result.VolumeOrFileId, ",") { // this is a file id
|
||||
auth = string(security.GenJwt(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, result.VolumeOrFileId))
|
||||
auth = string(security.GenJwtForVolumeServer(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, result.VolumeOrFileId))
|
||||
}
|
||||
resp.VolumeIdLocations = append(resp.VolumeIdLocations, &master_pb.LookupVolumeResponse_VolumeIdLocation{
|
||||
VolumeOrFileId: result.VolumeOrFileId,
|
||||
@ -173,7 +173,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
|
||||
GrpcPort: uint32(dn.GrpcPort),
|
||||
},
|
||||
Count: count,
|
||||
Auth: string(security.GenJwt(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fid)),
|
||||
Auth: string(security.GenJwtForVolumeServer(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fid)),
|
||||
Replicas: replicas,
|
||||
}, nil
|
||||
}
|
||||
|
@ -149,9 +149,9 @@ func (ms *MasterServer) maybeAddJwtAuthorization(w http.ResponseWriter, fileId s
|
||||
}
|
||||
var encodedJwt security.EncodedJwt
|
||||
if isWrite {
|
||||
encodedJwt = security.GenJwt(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fileId)
|
||||
encodedJwt = security.GenJwtForVolumeServer(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fileId)
|
||||
} else {
|
||||
encodedJwt = security.GenJwt(ms.guard.ReadSigningKey, ms.guard.ReadExpiresAfterSec, fileId)
|
||||
encodedJwt = security.GenJwtForVolumeServer(ms.guard.ReadSigningKey, ms.guard.ReadExpiresAfterSec, fileId)
|
||||
}
|
||||
if encodedJwt == "" {
|
||||
return
|
||||
|
@ -133,7 +133,7 @@ func (vs *VolumeServer) maybeCheckJwtAuthorization(r *http.Request, vid, fid str
|
||||
return false
|
||||
}
|
||||
|
||||
token, err := security.DecodeJwt(signingKey, tokenStr)
|
||||
token, err := security.DecodeJwt(signingKey, tokenStr, &security.SeaweedFileIdClaims{})
|
||||
if err != nil {
|
||||
glog.V(1).Infof("jwt verification error from %s: %v", r.RemoteAddr, err)
|
||||
return false
|
||||
|
@ -180,7 +180,16 @@ func GetUrlStream(url string, values url.Values, readFn func(io.Reader) error) e
|
||||
}
|
||||
|
||||
func DownloadFile(fileUrl string, jwt string) (filename string, header http.Header, resp *http.Response, e error) {
|
||||
response, err := client.Get(fileUrl)
|
||||
req, err := http.NewRequest("GET", fileUrl, nil)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
if len(jwt) > 0 {
|
||||
req.Header.Set("Authorization", "BEARER "+jwt)
|
||||
}
|
||||
|
||||
response, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
@ -358,7 +367,7 @@ func readEncryptedUrl(fileUrl string, cipherKey []byte, isContentCompressed bool
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, error) {
|
||||
func ReadUrlAsReaderCloser(fileUrl string, jwt string, rangeHeader string) (io.ReadCloser, error) {
|
||||
|
||||
req, err := http.NewRequest("GET", fileUrl, nil)
|
||||
if err != nil {
|
||||
@ -370,6 +379,10 @@ func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, e
|
||||
req.Header.Add("Accept-Encoding", "gzip")
|
||||
}
|
||||
|
||||
if len(jwt) > 0 {
|
||||
req.Header.Set("Authorization", "BEARER "+jwt)
|
||||
}
|
||||
|
||||
r, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
Loading…
Reference in New Issue
Block a user