mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-11-24 02:59:13 +08:00
Merge branch 'master' of https://github.com/seaweedfs/seaweedfs
This commit is contained in:
commit
3e23421608
@ -21,14 +21,16 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
charsetUpper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
charset = charsetUpper + "abcdefghijklmnopqrstuvwxyz/"
|
||||
policyDocumentVersion = "2012-10-17"
|
||||
StatementActionAdmin = "*"
|
||||
StatementActionWrite = "Put*"
|
||||
StatementActionRead = "Get*"
|
||||
StatementActionList = "List*"
|
||||
StatementActionTagging = "Tagging*"
|
||||
charsetUpper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
charset = charsetUpper + "abcdefghijklmnopqrstuvwxyz/"
|
||||
policyDocumentVersion = "2012-10-17"
|
||||
StatementActionAdmin = "*"
|
||||
StatementActionWrite = "Put*"
|
||||
StatementActionWriteAcp = "PutBucketAcl"
|
||||
StatementActionRead = "Get*"
|
||||
StatementActionReadAcp = "GetBucketAcl"
|
||||
StatementActionList = "List*"
|
||||
StatementActionTagging = "Tagging*"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -44,8 +46,12 @@ func MapToStatementAction(action string) string {
|
||||
return s3_constants.ACTION_ADMIN
|
||||
case StatementActionWrite:
|
||||
return s3_constants.ACTION_WRITE
|
||||
case StatementActionWriteAcp:
|
||||
return s3_constants.ACTION_WRITE_ACP
|
||||
case StatementActionRead:
|
||||
return s3_constants.ACTION_READ
|
||||
case StatementActionReadAcp:
|
||||
return s3_constants.ACTION_READ_ACP
|
||||
case StatementActionList:
|
||||
return s3_constants.ACTION_LIST
|
||||
case StatementActionTagging:
|
||||
@ -61,8 +67,12 @@ func MapToIdentitiesAction(action string) string {
|
||||
return StatementActionAdmin
|
||||
case s3_constants.ACTION_WRITE:
|
||||
return StatementActionWrite
|
||||
case s3_constants.ACTION_WRITE_ACP:
|
||||
return StatementActionWriteAcp
|
||||
case s3_constants.ACTION_READ:
|
||||
return StatementActionRead
|
||||
case s3_constants.ACTION_READ_ACP:
|
||||
return StatementActionReadAcp
|
||||
case s3_constants.ACTION_LIST:
|
||||
return StatementActionList
|
||||
case s3_constants.ACTION_TAGGING:
|
||||
|
@ -89,10 +89,13 @@ func TestCanDo(t *testing.T) {
|
||||
Actions: []Action{
|
||||
"Read:bucket1",
|
||||
"Write:bucket1/*",
|
||||
"WriteAcp:bucket1",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, true, ident2.canDo(ACTION_READ, "bucket1", "/a/b/c/d.txt"))
|
||||
assert.Equal(t, true, ident2.canDo(ACTION_WRITE, "bucket1", "/a/b/c/d.txt"))
|
||||
assert.Equal(t, true, ident2.canDo(ACTION_WRITE_ACP, "bucket1", ""))
|
||||
assert.Equal(t, false, ident2.canDo(ACTION_READ_ACP, "bucket1", ""))
|
||||
assert.Equal(t, false, ident2.canDo(ACTION_LIST, "bucket1", "/a/b/c/d.txt"))
|
||||
|
||||
// across buckets
|
||||
@ -106,15 +109,18 @@ func TestCanDo(t *testing.T) {
|
||||
assert.Equal(t, true, ident3.canDo(ACTION_READ, "bucket1", "/a/b/c/d.txt"))
|
||||
assert.Equal(t, true, ident3.canDo(ACTION_WRITE, "bucket1", "/a/b/c/d.txt"))
|
||||
assert.Equal(t, false, ident3.canDo(ACTION_LIST, "bucket1", "/a/b/other/some"))
|
||||
assert.Equal(t, false, ident3.canDo(ACTION_WRITE_ACP, "bucket1", ""))
|
||||
|
||||
// partial buckets
|
||||
ident4 := &Identity{
|
||||
Name: "anything",
|
||||
Actions: []Action{
|
||||
"Read:special_*",
|
||||
"ReadAcp:special_*",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, true, ident4.canDo(ACTION_READ, "special_bucket", "/a/b/c/d.txt"))
|
||||
assert.Equal(t, true, ident4.canDo(ACTION_READ_ACP, "special_bucket", ""))
|
||||
assert.Equal(t, false, ident4.canDo(ACTION_READ, "bucket1", "/a/b/c/d.txt"))
|
||||
|
||||
// admin buckets
|
||||
@ -125,7 +131,9 @@ func TestCanDo(t *testing.T) {
|
||||
},
|
||||
}
|
||||
assert.Equal(t, true, ident5.canDo(ACTION_READ, "special_bucket", "/a/b/c/d.txt"))
|
||||
assert.Equal(t, true, ident5.canDo(ACTION_READ_ACP, "special_bucket", ""))
|
||||
assert.Equal(t, true, ident5.canDo(ACTION_WRITE, "special_bucket", "/a/b/c/d.txt"))
|
||||
assert.Equal(t, true, ident5.canDo(ACTION_WRITE_ACP, "special_bucket", ""))
|
||||
|
||||
// anonymous buckets
|
||||
ident6 := &Identity{
|
||||
|
@ -1,11 +1,13 @@
|
||||
package s3_constants
|
||||
|
||||
const (
|
||||
ACTION_READ = "Read"
|
||||
ACTION_WRITE = "Write"
|
||||
ACTION_ADMIN = "Admin"
|
||||
ACTION_TAGGING = "Tagging"
|
||||
ACTION_LIST = "List"
|
||||
ACTION_READ = "Read"
|
||||
ACTION_READ_ACP = "ReadAcp"
|
||||
ACTION_WRITE = "Write"
|
||||
ACTION_WRITE_ACP = "WriteAcp"
|
||||
ACTION_ADMIN = "Admin"
|
||||
ACTION_TAGGING = "Tagging"
|
||||
ACTION_LIST = "List"
|
||||
|
||||
SeaweedStorageDestinationHeader = "x-seaweedfs-destination"
|
||||
MultipartUploadsFolder = ".uploads"
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
var (
|
||||
CircuitBreakerConfigDir = "/etc/s3"
|
||||
CircuitBreakerConfigFile = "circuit_breaker.json"
|
||||
AllowedActions = []string{ACTION_READ, ACTION_WRITE, ACTION_LIST, ACTION_TAGGING, ACTION_ADMIN}
|
||||
AllowedActions = []string{ACTION_READ, ACTION_READ_ACP, ACTION_WRITE, ACTION_WRITE_ACP, ACTION_LIST, ACTION_TAGGING, ACTION_ADMIN}
|
||||
LimitTypeCount = "Count"
|
||||
LimitTypeBytes = "MB"
|
||||
Separator = ":"
|
||||
|
@ -147,7 +147,7 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING)), "DELETE")).Queries("tagging", "")
|
||||
|
||||
// PutObjectACL
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectAclHandler, ACTION_WRITE)), "PUT")).Queries("acl", "")
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectAclHandler, ACTION_WRITE_ACP)), "PUT")).Queries("acl", "")
|
||||
// PutObjectRetention
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectRetentionHandler, ACTION_WRITE)), "PUT")).Queries("retention", "")
|
||||
// PutObjectLegalHold
|
||||
@ -156,7 +156,7 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLockConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("object-lock", "")
|
||||
|
||||
// GetObjectACL
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectAclHandler, ACTION_READ)), "GET")).Queries("acl", "")
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectAclHandler, ACTION_READ_ACP)), "GET")).Queries("acl", "")
|
||||
|
||||
// objects with query
|
||||
|
||||
@ -183,9 +183,9 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
|
||||
bucket.Methods("POST").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE)), "DELETE")).Queries("delete", "")
|
||||
|
||||
// GetBucketACL
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketAclHandler, ACTION_READ)), "GET")).Queries("acl", "")
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketAclHandler, ACTION_READ_ACP)), "GET")).Queries("acl", "")
|
||||
// PutBucketACL
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketAclHandler, ACTION_WRITE)), "PUT")).Queries("acl", "")
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketAclHandler, ACTION_WRITE_ACP)), "PUT")).Queries("acl", "")
|
||||
|
||||
// GetBucketPolicy
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketPolicyHandler, ACTION_READ)), "GET")).Queries("policy", "")
|
||||
|
Loading…
Reference in New Issue
Block a user