mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-11-25 03:29:10 +08:00
Merge branch 'master' into messaging
This commit is contained in:
commit
9c0459685e
@ -18,7 +18,7 @@ type ReaderCache struct {
|
||||
}
|
||||
|
||||
type SingleChunkCacher struct {
|
||||
sync.RWMutex
|
||||
sync.Mutex
|
||||
cond *sync.Cond
|
||||
parent *ReaderCache
|
||||
chunkFileId string
|
||||
@ -183,8 +183,8 @@ func (s *SingleChunkCacher) destroy() {
|
||||
}
|
||||
|
||||
func (s *SingleChunkCacher) readChunkAt(buf []byte, offset int64) (int, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for s.completedTime.IsZero() {
|
||||
s.cond.Wait()
|
||||
|
@ -1,8 +1,8 @@
|
||||
package filer
|
||||
|
||||
type ReaderPattern struct {
|
||||
isStreaming bool
|
||||
lastReadOffset int64
|
||||
isSequentialCounter int64
|
||||
lastReadStopOffset int64
|
||||
}
|
||||
|
||||
// For streaming read: only cache the first chunk
|
||||
@ -10,29 +10,20 @@ type ReaderPattern struct {
|
||||
|
||||
func NewReaderPattern() *ReaderPattern {
|
||||
return &ReaderPattern{
|
||||
isStreaming: true,
|
||||
lastReadOffset: -1,
|
||||
isSequentialCounter: 0,
|
||||
lastReadStopOffset: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (rp *ReaderPattern) MonitorReadAt(offset int64, size int) {
|
||||
isStreaming := true
|
||||
if rp.lastReadOffset > offset {
|
||||
isStreaming = false
|
||||
if rp.lastReadStopOffset == offset {
|
||||
rp.isSequentialCounter++
|
||||
} else {
|
||||
rp.isSequentialCounter--
|
||||
}
|
||||
if rp.lastReadOffset == -1 {
|
||||
if offset != 0 {
|
||||
isStreaming = false
|
||||
}
|
||||
}
|
||||
rp.lastReadOffset = offset
|
||||
rp.isStreaming = isStreaming
|
||||
}
|
||||
|
||||
func (rp *ReaderPattern) IsStreamingMode() bool {
|
||||
return rp.isStreaming
|
||||
rp.lastReadStopOffset = offset + int64(size)
|
||||
}
|
||||
|
||||
func (rp *ReaderPattern) IsRandomMode() bool {
|
||||
return !rp.isStreaming
|
||||
return rp.isSequentialCounter >= 0
|
||||
}
|
||||
|
@ -29,14 +29,14 @@ func newPageWriter(fh *FileHandle, chunkSize int64) *PageWriter {
|
||||
return pw
|
||||
}
|
||||
|
||||
func (pw *PageWriter) AddPage(offset int64, data []byte, isSequentail bool) {
|
||||
func (pw *PageWriter) AddPage(offset int64, data []byte, isSequential bool) {
|
||||
|
||||
glog.V(4).Infof("%v AddPage [%d, %d)", pw.fh.fh, offset, offset+int64(len(data)))
|
||||
|
||||
chunkIndex := offset / pw.chunkSize
|
||||
for i := chunkIndex; len(data) > 0; i++ {
|
||||
writeSize := min(int64(len(data)), (i+1)*pw.chunkSize-offset)
|
||||
pw.addToOneChunk(i, offset, data[:writeSize], isSequentail)
|
||||
pw.addToOneChunk(i, offset, data[:writeSize], isSequential)
|
||||
offset += writeSize
|
||||
data = data[writeSize:]
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
package mount
|
||||
|
||||
type WriterPattern struct {
|
||||
isStreaming bool
|
||||
lastWriteOffset int64
|
||||
isSequentialCounter int64
|
||||
lastWriteStopOffset int64
|
||||
chunkSize int64
|
||||
}
|
||||
|
||||
@ -12,33 +12,21 @@ type WriterPattern struct {
|
||||
|
||||
func NewWriterPattern(chunkSize int64) *WriterPattern {
|
||||
return &WriterPattern{
|
||||
isStreaming: true,
|
||||
lastWriteOffset: -1,
|
||||
isSequentialCounter: 0,
|
||||
lastWriteStopOffset: 0,
|
||||
chunkSize: chunkSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (rp *WriterPattern) MonitorWriteAt(offset int64, size int) {
|
||||
if rp.lastWriteOffset > offset {
|
||||
rp.isStreaming = false
|
||||
if rp.lastWriteStopOffset == offset {
|
||||
rp.isSequentialCounter++
|
||||
} else {
|
||||
rp.isSequentialCounter--
|
||||
}
|
||||
if rp.lastWriteOffset == -1 {
|
||||
if offset != 0 {
|
||||
rp.isStreaming = false
|
||||
}
|
||||
}
|
||||
rp.lastWriteOffset = offset
|
||||
rp.lastWriteStopOffset = offset + int64(size)
|
||||
}
|
||||
|
||||
func (rp *WriterPattern) IsStreamingMode() bool {
|
||||
return rp.isStreaming
|
||||
}
|
||||
|
||||
func (rp *WriterPattern) IsRandomMode() bool {
|
||||
return !rp.isStreaming
|
||||
}
|
||||
|
||||
func (rp *WriterPattern) Reset() {
|
||||
rp.isStreaming = true
|
||||
rp.lastWriteOffset = -1
|
||||
func (rp *WriterPattern) IsSequentialMode() bool {
|
||||
return rp.isSequentialCounter >= 0
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ func (wfs *WFS) Write(cancel <-chan struct{}, in *fuse.WriteIn, data []byte) (wr
|
||||
entry.Attributes.FileSize = uint64(max(offset+int64(len(data)), int64(entry.Attributes.FileSize)))
|
||||
// glog.V(4).Infof("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data))
|
||||
|
||||
fh.dirtyPages.AddPage(offset, data, fh.dirtyPages.writerPattern.IsStreamingMode())
|
||||
fh.dirtyPages.AddPage(offset, data, fh.dirtyPages.writerPattern.IsSequentialMode())
|
||||
|
||||
written = uint32(len(data))
|
||||
|
||||
|
@ -2,6 +2,7 @@ package shell
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
@ -164,6 +165,17 @@ func (c *commandS3Configure) Do(args []string, commandEnv *CommandEnv, writer io
|
||||
s3cfg.Identities = append(s3cfg.Identities, &identity)
|
||||
}
|
||||
|
||||
accessKeySet := make(map[string]string)
|
||||
for _, ident := range s3cfg.Identities {
|
||||
for _, cred := range ident.Credentials {
|
||||
if userName, found := accessKeySet[cred.AccessKey]; !found {
|
||||
accessKeySet[cred.AccessKey] = ident.Name
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("duplicate accessKey[%s], already configured in user[%s]", cred.AccessKey, userName))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
filer.ProtoToText(&buf, s3cfg)
|
||||
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@ -206,7 +207,11 @@ func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, con
|
||||
|
||||
func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind) {
|
||||
|
||||
l.concurrentLoadingVolumes(needleMapKind, 10)
|
||||
workerNum := runtime.NumCPU()
|
||||
if workerNum <= 10 {
|
||||
workerNum = 10
|
||||
}
|
||||
l.concurrentLoadingVolumes(needleMapKind, workerNum)
|
||||
glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
|
||||
|
||||
l.loadAllEcShards()
|
||||
|
Loading…
Reference in New Issue
Block a user