2020-02-25 14:28:45 +08:00
|
|
|
package filer2
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"math"
|
|
|
|
"sync"
|
|
|
|
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2020-03-23 15:01:34 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2020-02-25 14:28:45 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
type BucketName string
|
|
|
|
type BucketOption struct {
|
|
|
|
Name BucketName
|
|
|
|
Replication string
|
2020-04-12 14:37:10 +08:00
|
|
|
fsync bool
|
2020-02-25 14:28:45 +08:00
|
|
|
}
|
|
|
|
type FilerBuckets struct {
|
|
|
|
dirBucketsPath string
|
|
|
|
buckets map[BucketName]*BucketOption
|
|
|
|
sync.RWMutex
|
|
|
|
}
|
|
|
|
|
2020-04-12 14:37:10 +08:00
|
|
|
func (f *Filer) LoadBuckets() {
|
2020-02-25 14:28:45 +08:00
|
|
|
|
|
|
|
f.buckets = &FilerBuckets{
|
|
|
|
buckets: make(map[BucketName]*BucketOption),
|
|
|
|
}
|
|
|
|
|
|
|
|
limit := math.MaxInt32
|
|
|
|
|
2020-04-12 14:37:10 +08:00
|
|
|
entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit)
|
2020-02-25 14:28:45 +08:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
glog.V(1).Infof("no buckets found: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-04-12 14:37:10 +08:00
|
|
|
shouldFsyncMap := make(map[string]bool)
|
|
|
|
for _, bucket := range f.FsyncBuckets {
|
|
|
|
shouldFsyncMap[bucket] = true
|
|
|
|
}
|
|
|
|
|
2020-02-25 14:28:45 +08:00
|
|
|
glog.V(1).Infof("buckets found: %d", len(entries))
|
|
|
|
|
|
|
|
f.buckets.Lock()
|
|
|
|
for _, entry := range entries {
|
2020-04-12 14:37:10 +08:00
|
|
|
_, shouldFsnyc := shouldFsyncMap[entry.Name()]
|
2020-02-25 14:28:45 +08:00
|
|
|
f.buckets.buckets[BucketName(entry.Name())] = &BucketOption{
|
|
|
|
Name: BucketName(entry.Name()),
|
|
|
|
Replication: entry.Replication,
|
2020-04-12 14:37:10 +08:00
|
|
|
fsync: shouldFsnyc,
|
2020-02-25 14:28:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
f.buckets.Unlock()
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-04-12 14:37:10 +08:00
|
|
|
func (f *Filer) ReadBucketOption(buketName string) (replication string, fsync bool) {
|
2020-02-25 14:28:45 +08:00
|
|
|
|
|
|
|
f.buckets.RLock()
|
|
|
|
defer f.buckets.RUnlock()
|
|
|
|
|
|
|
|
option, found := f.buckets.buckets[BucketName(buketName)]
|
|
|
|
|
|
|
|
if !found {
|
2020-04-12 14:37:10 +08:00
|
|
|
return "", false
|
2020-02-25 14:28:45 +08:00
|
|
|
}
|
2020-04-12 14:37:10 +08:00
|
|
|
return option.Replication, option.fsync
|
2020-02-25 14:28:45 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Filer) isBucket(entry *Entry) bool {
|
|
|
|
if !entry.IsDirectory() {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
parent, dirName := entry.FullPath.DirAndName()
|
|
|
|
if parent != f.DirBucketsPath {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
f.buckets.RLock()
|
|
|
|
defer f.buckets.RUnlock()
|
|
|
|
|
|
|
|
_, found := f.buckets.buckets[BucketName(dirName)]
|
|
|
|
|
|
|
|
return found
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Filer) maybeAddBucket(entry *Entry) {
|
|
|
|
if !entry.IsDirectory() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
parent, dirName := entry.FullPath.DirAndName()
|
|
|
|
if parent != f.DirBucketsPath {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
f.addBucket(dirName, &BucketOption{
|
|
|
|
Name: BucketName(dirName),
|
|
|
|
Replication: entry.Replication,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Filer) addBucket(buketName string, bucketOption *BucketOption) {
|
|
|
|
|
|
|
|
f.buckets.Lock()
|
|
|
|
defer f.buckets.Unlock()
|
|
|
|
|
|
|
|
f.buckets.buckets[BucketName(buketName)] = bucketOption
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Filer) deleteBucket(buketName string) {
|
|
|
|
|
|
|
|
f.buckets.Lock()
|
|
|
|
defer f.buckets.Unlock()
|
|
|
|
|
|
|
|
delete(f.buckets.buckets, BucketName(buketName))
|
|
|
|
|
|
|
|
}
|