seaweedfs/weed/filer/filer_conf.go

190 lines
5.1 KiB
Go
Raw Normal View History

2020-11-15 16:26:05 +08:00
package filer
import (
2020-11-16 13:48:17 +08:00
"bytes"
2020-11-16 06:06:03 +08:00
"context"
2021-10-11 18:03:56 +08:00
"fmt"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
2021-10-11 18:03:56 +08:00
"google.golang.org/grpc"
"io"
2020-11-16 06:06:03 +08:00
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
2020-11-15 16:26:05 +08:00
"github.com/viant/ptrie"
2022-08-18 03:42:03 +08:00
jsonpb "google.golang.org/protobuf/encoding/protojson"
2020-11-15 16:26:05 +08:00
)
2020-11-16 06:06:03 +08:00
const (
DirectoryEtcRoot = "/etc/"
2021-07-22 05:38:12 +08:00
DirectoryEtcSeaweedFS = "/etc/seaweedfs"
DirectoryEtcRemote = "/etc/remote"
FilerConfName = "filer.conf"
IamConfigDirectory = "/etc/iam"
2021-07-22 05:38:12 +08:00
IamIdentityFile = "identity.json"
IamPoliciesFile = "policies.json"
2020-11-16 06:06:03 +08:00
)
2020-11-15 16:26:05 +08:00
type FilerConf struct {
rules ptrie.Trie
}
2021-10-11 18:03:56 +08:00
func ReadFilerConf(filerGrpcAddress pb.ServerAddress, grpcDialOption grpc.DialOption, masterClient *wdclient.MasterClient) (*FilerConf, error) {
var buf bytes.Buffer
if err := pb.WithGrpcFilerClient(false, 0, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
2021-10-11 18:03:56 +08:00
if masterClient != nil {
return ReadEntry(masterClient, client, DirectoryEtcSeaweedFS, FilerConfName, &buf)
} else {
content, err := ReadInsideFiler(client, DirectoryEtcSeaweedFS, FilerConfName)
buf = *bytes.NewBuffer(content)
return err
}
}); err != nil && err != filer_pb.ErrNotFound {
return nil, fmt.Errorf("read %s/%s: %v", DirectoryEtcSeaweedFS, FilerConfName, err)
}
fc := NewFilerConf()
if buf.Len() > 0 {
if err := fc.LoadFromBytes(buf.Bytes()); err != nil {
return nil, fmt.Errorf("parse %s/%s: %v", DirectoryEtcSeaweedFS, FilerConfName, err)
}
}
return fc, nil
}
2020-11-16 06:06:03 +08:00
func NewFilerConf() (fc *FilerConf) {
2020-11-15 16:26:05 +08:00
fc = &FilerConf{
rules: ptrie.New(),
}
2020-11-16 06:06:03 +08:00
return fc
}
2020-11-16 08:59:28 +08:00
func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) {
filerConfPath := util.NewFullPath(DirectoryEtcSeaweedFS, FilerConfName)
2020-11-16 06:06:03 +08:00
entry, err := filer.FindEntry(context.Background(), filerConfPath)
if err != nil {
if err == filer_pb.ErrNotFound {
return nil
}
glog.Errorf("read filer conf entry %s: %v", filerConfPath, err)
return
}
2020-11-30 20:34:04 +08:00
if len(entry.Content) > 0 {
return fc.LoadFromBytes(entry.Content)
}
return fc.loadFromChunks(filer, entry.Content, entry.GetChunks(), entry.Size())
2020-11-16 06:06:03 +08:00
}
func (fc *FilerConf) loadFromChunks(filer *Filer, content []byte, chunks []*filer_pb.FileChunk, size uint64) (err error) {
if len(content) == 0 {
content, err = filer.readEntry(chunks, size)
if err != nil {
glog.Errorf("read filer conf content: %v", err)
return
}
2020-11-16 06:06:03 +08:00
}
return fc.LoadFromBytes(content)
2020-11-16 06:06:03 +08:00
}
func (fc *FilerConf) LoadFromBytes(data []byte) (err error) {
2020-11-15 16:26:05 +08:00
conf := &filer_pb.FilerConf{}
2020-11-16 13:48:17 +08:00
2022-08-18 03:42:03 +08:00
if err := jsonpb.Unmarshal(data, conf); err != nil {
2020-12-07 11:47:06 +08:00
return err
2020-11-15 16:26:05 +08:00
}
2020-11-16 06:06:03 +08:00
return fc.doLoadConf(conf)
2020-11-15 16:26:05 +08:00
}
2020-11-16 06:06:03 +08:00
func (fc *FilerConf) doLoadConf(conf *filer_pb.FilerConf) (err error) {
2020-11-15 16:26:05 +08:00
for _, location := range conf.Locations {
err = fc.AddLocationConf(location)
2020-11-15 16:26:05 +08:00
if err != nil {
2020-11-16 06:06:03 +08:00
// this is not recoverable
return nil
2020-11-15 16:26:05 +08:00
}
}
2020-11-16 06:06:03 +08:00
return nil
2020-11-15 16:26:05 +08:00
}
func (fc *FilerConf) AddLocationConf(locConf *filer_pb.FilerConf_PathConf) (err error) {
err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf)
if err != nil {
glog.Errorf("put location prefix: %v", err)
}
return
}
func (fc *FilerConf) DeleteLocationConf(locationPrefix string) {
rules := ptrie.New()
fc.rules.Walk(func(key []byte, value interface{}) bool {
if string(key) == locationPrefix {
return true
}
key = bytes.Clone(key)
_ = rules.Put(key, value)
return true
})
fc.rules = rules
return
}
2020-11-16 08:59:28 +08:00
func (fc *FilerConf) MatchStorageRule(path string) (pathConf *filer_pb.FilerConf_PathConf) {
pathConf = &filer_pb.FilerConf_PathConf{}
2020-11-15 16:26:05 +08:00
fc.rules.MatchPrefix([]byte(path), func(key []byte, value interface{}) bool {
t := value.(*filer_pb.FilerConf_PathConf)
mergePathConf(pathConf, t)
2020-11-15 16:26:05 +08:00
return true
})
return pathConf
}
2021-10-14 04:35:33 +08:00
func (fc *FilerConf) GetCollectionTtls(collection string) (ttls map[string]string) {
ttls = make(map[string]string)
fc.rules.Walk(func(key []byte, value interface{}) bool {
t := value.(*filer_pb.FilerConf_PathConf)
if t.Collection == collection {
ttls[t.LocationPrefix] = t.GetTtl()
}
return true
})
return ttls
}
// merge if values in b is not empty, merge them into a
func mergePathConf(a, b *filer_pb.FilerConf_PathConf) {
a.Collection = util.Nvl(b.Collection, a.Collection)
a.Replication = util.Nvl(b.Replication, a.Replication)
a.Ttl = util.Nvl(b.Ttl, a.Ttl)
a.DiskType = util.Nvl(b.DiskType, a.DiskType)
a.Fsync = b.Fsync || a.Fsync
if b.VolumeGrowthCount > 0 {
a.VolumeGrowthCount = b.VolumeGrowthCount
}
a.ReadOnly = b.ReadOnly || a.ReadOnly
if b.MaxFileNameLength > 0 {
a.MaxFileNameLength = b.MaxFileNameLength
}
a.DataCenter = util.Nvl(b.DataCenter, a.DataCenter)
a.Rack = util.Nvl(b.Rack, a.Rack)
a.DataNode = util.Nvl(b.DataNode, a.DataNode)
}
func (fc *FilerConf) ToProto() *filer_pb.FilerConf {
m := &filer_pb.FilerConf{}
fc.rules.Walk(func(key []byte, value interface{}) bool {
pathConf := value.(*filer_pb.FilerConf_PathConf)
m.Locations = append(m.Locations, pathConf)
return true
})
return m
}
func (fc *FilerConf) ToText(writer io.Writer) error {
2022-08-18 03:42:03 +08:00
return ProtoToText(writer, fc.ToProto())
}