mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-11-28 13:31:27 +08:00
Merge branch 'master' into messaging
This commit is contained in:
commit
3e45d4bec4
@ -2,9 +2,12 @@ package filer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"io"
|
||||
)
|
||||
|
||||
func ParseS3ConfigurationFromBytes[T proto.Message](content []byte, config T) error {
|
||||
@ -23,3 +26,18 @@ func ProtoToText(writer io.Writer, config proto.Message) error {
|
||||
|
||||
return m.Marshal(writer, config)
|
||||
}
|
||||
|
||||
// CheckDuplicateAccessKey returns an error message when s3cfg has duplicate access keys
|
||||
func CheckDuplicateAccessKey(s3cfg *iam_pb.S3ApiConfiguration) error {
|
||||
accessKeySet := make(map[string]string)
|
||||
for _, ident := range s3cfg.Identities {
|
||||
for _, cred := range ident.Credentials {
|
||||
if userName, found := accessKeySet[cred.AccessKey]; !found {
|
||||
accessKeySet[cred.AccessKey] = ident.Name
|
||||
} else {
|
||||
return fmt.Errorf("duplicate accessKey[%s], already configured in user[%s]", cred.AccessKey, userName)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -2,9 +2,10 @@ package filer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
. "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"testing"
|
||||
|
||||
. "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -55,3 +56,93 @@ func TestS3Conf(t *testing.T) {
|
||||
assert.Equal(t, "some_access_key1", s3ConfSaved.Identities[0].Credentials[0].AccessKey)
|
||||
assert.Equal(t, "some_secret_key2", s3ConfSaved.Identities[1].Credentials[0].SecretKey)
|
||||
}
|
||||
|
||||
func TestCheckDuplicateAccessKey(t *testing.T) {
|
||||
var tests = []struct {
|
||||
s3cfg *iam_pb.S3ApiConfiguration
|
||||
err string
|
||||
}{
|
||||
{
|
||||
&iam_pb.S3ApiConfiguration{
|
||||
Identities: []*iam_pb.Identity{
|
||||
{
|
||||
Name: "some_name",
|
||||
Credentials: []*iam_pb.Credential{
|
||||
{
|
||||
AccessKey: "some_access_key1",
|
||||
SecretKey: "some_secret_key1",
|
||||
},
|
||||
},
|
||||
Actions: []string{
|
||||
ACTION_ADMIN,
|
||||
ACTION_READ,
|
||||
ACTION_WRITE,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "some_read_only_user",
|
||||
Credentials: []*iam_pb.Credential{
|
||||
{
|
||||
AccessKey: "some_access_key2",
|
||||
SecretKey: "some_secret_key2",
|
||||
},
|
||||
},
|
||||
Actions: []string{
|
||||
ACTION_READ,
|
||||
ACTION_TAGGING,
|
||||
ACTION_LIST,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"",
|
||||
},
|
||||
{
|
||||
&iam_pb.S3ApiConfiguration{
|
||||
Identities: []*iam_pb.Identity{
|
||||
{
|
||||
Name: "some_name",
|
||||
Credentials: []*iam_pb.Credential{
|
||||
{
|
||||
AccessKey: "some_access_key1",
|
||||
SecretKey: "some_secret_key1",
|
||||
},
|
||||
},
|
||||
Actions: []string{
|
||||
ACTION_ADMIN,
|
||||
ACTION_READ,
|
||||
ACTION_WRITE,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "some_read_only_user",
|
||||
Credentials: []*iam_pb.Credential{
|
||||
{
|
||||
AccessKey: "some_access_key1",
|
||||
SecretKey: "some_secret_key1",
|
||||
},
|
||||
},
|
||||
Actions: []string{
|
||||
ACTION_READ,
|
||||
ACTION_TAGGING,
|
||||
ACTION_LIST,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"duplicate accessKey[some_access_key1], already configured in user[some_name]",
|
||||
},
|
||||
}
|
||||
for i, test := range tests {
|
||||
err := CheckDuplicateAccessKey(test.s3cfg)
|
||||
var errString string
|
||||
if err == nil {
|
||||
errString = ""
|
||||
} else {
|
||||
errString = err.Error()
|
||||
}
|
||||
if errString != test.err {
|
||||
t.Errorf("[%d]: got: %s expected: %s", i, errString, test.err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -109,6 +109,11 @@ func (iam *IdentityAccessManagement) LoadS3ApiConfigurationFromBytes(content []b
|
||||
glog.Warningf("unmarshal error: %v", err)
|
||||
return fmt.Errorf("unmarshal error: %v", err)
|
||||
}
|
||||
|
||||
if err := filer.CheckDuplicateAccessKey(s3ApiConfiguration); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := iam.loadS3ApiConfiguration(s3ApiConfiguration); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -90,6 +90,9 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq
|
||||
|
||||
peerAddress := findClientAddress(stream.Context(), 0)
|
||||
|
||||
// use negative client id to differentiate from addClient()/deleteClient() used in SubscribeMetadata()
|
||||
req.ClientId = -req.ClientId
|
||||
|
||||
alreadyKnown, clientName := fs.addClient(req.ClientName, peerAddress, req.ClientId)
|
||||
if alreadyKnown {
|
||||
return fmt.Errorf("duplicated local subscription detected for client %s id %d", clientName, req.ClientId)
|
||||
|
@ -2,14 +2,14 @@ package shell
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
|
||||
)
|
||||
@ -165,15 +165,8 @@ func (c *commandS3Configure) Do(args []string, commandEnv *CommandEnv, writer io
|
||||
s3cfg.Identities = append(s3cfg.Identities, &identity)
|
||||
}
|
||||
|
||||
accessKeySet := make(map[string]string)
|
||||
for _, ident := range s3cfg.Identities {
|
||||
for _, cred := range ident.Credentials {
|
||||
if userName, found := accessKeySet[cred.AccessKey]; !found {
|
||||
accessKeySet[cred.AccessKey] = ident.Name
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("duplicate accessKey[%s], already configured in user[%s]", cred.AccessKey, userName))
|
||||
}
|
||||
}
|
||||
if err = filer.CheckDuplicateAccessKey(s3cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@ -208,8 +209,18 @@ func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, con
|
||||
func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind) {
|
||||
|
||||
workerNum := runtime.NumCPU()
|
||||
if workerNum <= 10 {
|
||||
workerNum = 10
|
||||
val, ok := os.LookupEnv("GOMAXPROCS")
|
||||
if ok {
|
||||
num, err := strconv.Atoi(val)
|
||||
if err != nil || num < 1 {
|
||||
num = 10
|
||||
glog.Warningf("failed to set worker number from GOMAXPROCS , set to default:10")
|
||||
}
|
||||
workerNum = num
|
||||
} else {
|
||||
if workerNum <= 10 {
|
||||
workerNum = 10
|
||||
}
|
||||
}
|
||||
l.concurrentLoadingVolumes(needleMapKind, workerNum)
|
||||
glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
|
||||
|
Loading…
Reference in New Issue
Block a user