2018-05-06 13:47:16 +08:00
|
|
|
package filesys
|
|
|
|
|
|
|
|
import (
|
2018-05-28 02:52:26 +08:00
|
|
|
"context"
|
2019-01-06 14:23:44 +08:00
|
|
|
"os"
|
|
|
|
"sort"
|
|
|
|
"time"
|
|
|
|
|
2020-08-16 00:32:47 +08:00
|
|
|
"github.com/seaweedfs/fuse"
|
|
|
|
"github.com/seaweedfs/fuse/fs"
|
|
|
|
|
2020-09-01 15:21:19 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
2018-05-08 16:59:43 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2018-05-10 14:18:02 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
2020-03-23 15:01:34 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2018-05-06 13:47:16 +08:00
|
|
|
)
|
|
|
|
|
2018-09-28 00:42:24 +08:00
|
|
|
const blockSize = 512
|
2018-09-28 00:33:36 +08:00
|
|
|
|
2018-05-08 16:59:43 +08:00
|
|
|
var _ = fs.Node(&File{})
|
2021-04-16 17:55:09 +08:00
|
|
|
var _ = fs.NodeIdentifier(&File{})
|
2018-05-22 18:26:38 +08:00
|
|
|
var _ = fs.NodeOpener(&File{})
|
2018-05-16 15:54:27 +08:00
|
|
|
var _ = fs.NodeFsyncer(&File{})
|
2018-05-21 15:00:28 +08:00
|
|
|
var _ = fs.NodeSetattrer(&File{})
|
2019-12-16 13:07:01 +08:00
|
|
|
var _ = fs.NodeGetxattrer(&File{})
|
|
|
|
var _ = fs.NodeSetxattrer(&File{})
|
|
|
|
var _ = fs.NodeRemovexattrer(&File{})
|
|
|
|
var _ = fs.NodeListxattrer(&File{})
|
2020-01-21 12:21:01 +08:00
|
|
|
var _ = fs.NodeForgetter(&File{})
|
2018-05-08 16:59:43 +08:00
|
|
|
|
2018-05-06 13:47:16 +08:00
|
|
|
type File struct {
|
2021-04-14 15:30:16 +08:00
|
|
|
Name string
|
|
|
|
dir *Dir
|
|
|
|
wfs *WFS
|
|
|
|
entry *filer_pb.Entry
|
|
|
|
isOpen int
|
|
|
|
dirtyMetadata bool
|
2021-04-18 01:48:22 +08:00
|
|
|
id uint64
|
2018-05-06 13:47:16 +08:00
|
|
|
}
|
|
|
|
|
2020-03-23 15:01:34 +08:00
|
|
|
func (file *File) fullpath() util.FullPath {
|
2020-03-26 15:08:14 +08:00
|
|
|
return util.NewFullPath(file.dir.FullPath(), file.Name)
|
2018-06-06 17:09:57 +08:00
|
|
|
}
|
2018-05-14 17:02:17 +08:00
|
|
|
|
2021-04-16 17:55:09 +08:00
|
|
|
func (file *File) Id() uint64 {
|
2021-04-18 01:48:22 +08:00
|
|
|
return file.id
|
2021-04-16 17:55:09 +08:00
|
|
|
}
|
|
|
|
|
2020-10-30 16:23:23 +08:00
|
|
|
func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
|
2018-05-14 17:02:17 +08:00
|
|
|
|
2021-01-26 18:50:50 +08:00
|
|
|
glog.V(4).Infof("file Attr %s, open:%v existing:%v", file.fullpath(), file.isOpen, attr)
|
2019-12-14 02:05:43 +08:00
|
|
|
|
2021-04-18 01:48:22 +08:00
|
|
|
entry, err := file.maybeLoadEntry(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2018-05-14 17:02:17 +08:00
|
|
|
}
|
2018-05-08 16:59:43 +08:00
|
|
|
|
2021-03-16 17:55:49 +08:00
|
|
|
if entry == nil {
|
|
|
|
return fuse.ENOENT
|
|
|
|
}
|
|
|
|
|
2021-04-18 01:48:22 +08:00
|
|
|
attr.Inode = file.Id()
|
2020-01-20 15:59:46 +08:00
|
|
|
attr.Valid = time.Second
|
2021-03-16 17:59:26 +08:00
|
|
|
attr.Mode = os.FileMode(entry.Attributes.FileMode)
|
|
|
|
attr.Size = filer.FileSize(entry)
|
2020-01-23 05:42:03 +08:00
|
|
|
if file.isOpen > 0 {
|
2021-03-16 17:59:26 +08:00
|
|
|
attr.Size = entry.Attributes.FileSize
|
2020-01-25 16:32:18 +08:00
|
|
|
glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size)
|
2020-01-20 04:07:04 +08:00
|
|
|
}
|
2021-03-16 17:59:26 +08:00
|
|
|
attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
|
2022-01-13 03:07:39 +08:00
|
|
|
attr.Ctime = time.Unix(entry.Attributes.Mtime, 0)
|
2021-03-16 17:59:26 +08:00
|
|
|
attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
|
|
|
|
attr.Gid = entry.Attributes.Gid
|
|
|
|
attr.Uid = entry.Attributes.Uid
|
2018-09-28 00:33:36 +08:00
|
|
|
attr.Blocks = attr.Size/blockSize + 1
|
2018-10-14 15:18:52 +08:00
|
|
|
attr.BlockSize = uint32(file.wfs.option.ChunkSizeLimit)
|
2020-10-30 16:23:23 +08:00
|
|
|
if entry.HardLinkCounter > 0 {
|
|
|
|
attr.Nlink = uint32(entry.HardLinkCounter)
|
2020-09-24 18:06:44 +08:00
|
|
|
}
|
2018-05-20 04:51:44 +08:00
|
|
|
|
2018-05-14 17:02:17 +08:00
|
|
|
return nil
|
2018-05-08 16:59:43 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2019-12-16 13:07:01 +08:00
|
|
|
func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
|
|
|
|
|
2021-05-13 15:32:37 +08:00
|
|
|
// glog.V(4).Infof("file Getxattr %s", file.fullpath())
|
2019-12-16 13:07:01 +08:00
|
|
|
|
2020-10-30 16:23:23 +08:00
|
|
|
entry, err := file.maybeLoadEntry(ctx)
|
|
|
|
if err != nil {
|
2019-12-16 13:07:01 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-10-30 16:23:23 +08:00
|
|
|
return getxattr(entry, req, resp)
|
2019-12-16 13:07:01 +08:00
|
|
|
}
|
|
|
|
|
2018-05-22 18:26:38 +08:00
|
|
|
func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
|
2018-05-23 18:08:46 +08:00
|
|
|
|
2020-08-19 14:42:09 +08:00
|
|
|
glog.V(4).Infof("file %v open %+v", file.fullpath(), req)
|
2021-12-29 05:44:52 +08:00
|
|
|
// resp.Flags |= fuse.OpenDirectIO
|
2018-05-21 15:00:28 +08:00
|
|
|
|
2021-12-20 17:11:43 +08:00
|
|
|
handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid)
|
2018-06-06 17:09:57 +08:00
|
|
|
|
|
|
|
resp.Handle = fuse.HandleID(handle.handle)
|
|
|
|
|
2020-08-19 14:42:26 +08:00
|
|
|
glog.V(4).Infof("%v file open handle id = %d", file.fullpath(), handle.handle)
|
2018-06-06 17:09:57 +08:00
|
|
|
|
|
|
|
return handle, nil
|
2018-05-21 15:00:28 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
|
|
|
|
|
2022-01-12 17:13:19 +08:00
|
|
|
glog.V(4).Infof("%v file setattr %+v mode=%d", file.fullpath(), req, req.Mode)
|
2020-01-21 12:21:01 +08:00
|
|
|
|
2021-03-03 05:33:56 +08:00
|
|
|
entry, err := file.maybeLoadEntry(ctx)
|
2020-10-30 16:23:23 +08:00
|
|
|
if err != nil {
|
2018-09-17 03:37:06 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-05-22 18:26:38 +08:00
|
|
|
if req.Valid.Size() {
|
|
|
|
|
2021-03-03 05:33:56 +08:00
|
|
|
glog.V(4).Infof("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(entry.Chunks))
|
2021-03-16 17:59:26 +08:00
|
|
|
if req.Size < filer.FileSize(entry) {
|
2018-05-25 14:20:12 +08:00
|
|
|
// fmt.Printf("truncate %v \n", fullPath)
|
2020-01-20 16:00:08 +08:00
|
|
|
var chunks []*filer_pb.FileChunk
|
2020-08-16 00:32:47 +08:00
|
|
|
var truncatedChunks []*filer_pb.FileChunk
|
2021-03-03 05:33:56 +08:00
|
|
|
for _, chunk := range entry.Chunks {
|
2020-01-24 14:12:57 +08:00
|
|
|
int64Size := int64(chunk.Size)
|
|
|
|
if chunk.Offset+int64Size > int64(req.Size) {
|
2020-08-16 00:32:47 +08:00
|
|
|
// this chunk is truncated
|
2020-01-24 14:12:57 +08:00
|
|
|
int64Size = int64(req.Size) - chunk.Offset
|
2020-08-16 00:32:47 +08:00
|
|
|
if int64Size > 0 {
|
|
|
|
chunks = append(chunks, chunk)
|
2020-08-16 10:55:28 +08:00
|
|
|
glog.V(4).Infof("truncated chunk %+v from %d to %d\n", chunk.GetFileIdString(), chunk.Size, int64Size)
|
2020-08-16 00:32:47 +08:00
|
|
|
chunk.Size = uint64(int64Size)
|
|
|
|
} else {
|
2020-08-16 10:55:28 +08:00
|
|
|
glog.V(4).Infof("truncated whole chunk %+v\n", chunk.GetFileIdString())
|
2020-08-16 00:32:47 +08:00
|
|
|
truncatedChunks = append(truncatedChunks, chunk)
|
|
|
|
}
|
2020-01-20 16:00:08 +08:00
|
|
|
}
|
|
|
|
}
|
2022-01-18 15:02:30 +08:00
|
|
|
// set the new chunks and reset entry cache
|
2021-03-03 05:33:56 +08:00
|
|
|
entry.Chunks = chunks
|
2022-01-18 15:02:30 +08:00
|
|
|
file.wfs.handlesLock.Lock()
|
|
|
|
existingHandle, found := file.wfs.handles[file.Id()]
|
|
|
|
file.wfs.handlesLock.Unlock()
|
|
|
|
if found {
|
|
|
|
existingHandle.entryViewCache = nil
|
|
|
|
}
|
|
|
|
|
2018-05-22 18:26:38 +08:00
|
|
|
}
|
2022-01-13 03:07:39 +08:00
|
|
|
entry.Attributes.Mtime = time.Now().Unix()
|
2021-03-16 17:59:26 +08:00
|
|
|
entry.Attributes.FileSize = req.Size
|
2020-08-16 12:09:31 +08:00
|
|
|
file.dirtyMetadata = true
|
2018-05-22 18:26:38 +08:00
|
|
|
}
|
2020-08-16 12:09:31 +08:00
|
|
|
|
2021-07-01 16:21:14 +08:00
|
|
|
if req.Valid.Mode() && entry.Attributes.FileMode != uint32(req.Mode) {
|
2021-03-16 17:59:26 +08:00
|
|
|
entry.Attributes.FileMode = uint32(req.Mode)
|
2022-01-13 11:31:25 +08:00
|
|
|
entry.Attributes.Mtime = time.Now().Unix()
|
2020-08-16 05:15:07 +08:00
|
|
|
file.dirtyMetadata = true
|
2018-05-22 18:26:38 +08:00
|
|
|
}
|
|
|
|
|
2021-07-01 03:33:04 +08:00
|
|
|
if req.Valid.Uid() && entry.Attributes.Uid != req.Uid {
|
2021-03-16 17:59:26 +08:00
|
|
|
entry.Attributes.Uid = req.Uid
|
2022-01-13 11:31:25 +08:00
|
|
|
entry.Attributes.Mtime = time.Now().Unix()
|
2020-08-16 05:15:07 +08:00
|
|
|
file.dirtyMetadata = true
|
2018-05-22 18:26:38 +08:00
|
|
|
}
|
|
|
|
|
2021-07-01 03:33:04 +08:00
|
|
|
if req.Valid.Gid() && entry.Attributes.Gid != req.Gid {
|
2021-03-16 17:59:26 +08:00
|
|
|
entry.Attributes.Gid = req.Gid
|
2022-01-13 11:31:25 +08:00
|
|
|
entry.Attributes.Mtime = time.Now().Unix()
|
2020-08-16 05:15:07 +08:00
|
|
|
file.dirtyMetadata = true
|
2018-05-22 18:26:38 +08:00
|
|
|
}
|
|
|
|
|
2018-12-29 14:37:18 +08:00
|
|
|
if req.Valid.Crtime() {
|
2021-03-16 17:59:26 +08:00
|
|
|
entry.Attributes.Crtime = req.Crtime.Unix()
|
2022-01-13 11:31:25 +08:00
|
|
|
entry.Attributes.Mtime = time.Now().Unix()
|
2020-08-16 05:15:07 +08:00
|
|
|
file.dirtyMetadata = true
|
2018-12-29 14:37:18 +08:00
|
|
|
}
|
|
|
|
|
2021-07-01 03:33:04 +08:00
|
|
|
if req.Valid.Mtime() && entry.Attributes.Mtime != req.Mtime.Unix() {
|
2021-03-16 17:59:26 +08:00
|
|
|
entry.Attributes.Mtime = req.Mtime.Unix()
|
2020-08-16 05:15:07 +08:00
|
|
|
file.dirtyMetadata = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if req.Valid.Handle() {
|
|
|
|
// fmt.Printf("file handle => %d\n", req.Handle)
|
2018-05-21 15:00:28 +08:00
|
|
|
}
|
|
|
|
|
2020-01-23 05:42:03 +08:00
|
|
|
if file.isOpen > 0 {
|
2019-02-16 02:00:27 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-08-16 10:56:24 +08:00
|
|
|
if !file.dirtyMetadata {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-03-03 05:33:56 +08:00
|
|
|
return file.saveEntry(entry)
|
2018-07-19 17:17:36 +08:00
|
|
|
|
2019-12-16 13:07:01 +08:00
|
|
|
}
|
2018-07-19 17:17:36 +08:00
|
|
|
|
2019-12-16 13:07:01 +08:00
|
|
|
func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
|
2018-07-19 17:17:36 +08:00
|
|
|
|
2019-12-16 13:07:01 +08:00
|
|
|
glog.V(4).Infof("file Setxattr %s: %s", file.fullpath(), req.Name)
|
2018-05-21 15:00:28 +08:00
|
|
|
|
2020-10-30 16:23:23 +08:00
|
|
|
entry, err := file.maybeLoadEntry(ctx)
|
|
|
|
if err != nil {
|
2019-12-16 13:07:01 +08:00
|
|
|
return err
|
|
|
|
}
|
2018-05-21 15:00:28 +08:00
|
|
|
|
2020-10-30 16:23:23 +08:00
|
|
|
if err := setxattr(entry, req); err != nil {
|
2019-12-16 13:07:01 +08:00
|
|
|
return err
|
|
|
|
}
|
2021-06-02 18:40:08 +08:00
|
|
|
file.dirtyMetadata = true
|
|
|
|
|
|
|
|
if file.isOpen > 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2019-12-16 13:07:01 +08:00
|
|
|
|
2020-10-30 16:23:23 +08:00
|
|
|
return file.saveEntry(entry)
|
2018-05-20 04:51:44 +08:00
|
|
|
|
2018-05-06 13:47:16 +08:00
|
|
|
}
|
2018-09-17 03:37:06 +08:00
|
|
|
|
2019-12-16 13:07:01 +08:00
|
|
|
func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
|
2019-12-14 02:05:43 +08:00
|
|
|
|
2019-12-16 13:07:01 +08:00
|
|
|
glog.V(4).Infof("file Removexattr %s: %s", file.fullpath(), req.Name)
|
2019-12-14 02:05:43 +08:00
|
|
|
|
2020-10-30 16:23:23 +08:00
|
|
|
entry, err := file.maybeLoadEntry(ctx)
|
|
|
|
if err != nil {
|
2019-12-16 13:07:01 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-10-30 16:23:23 +08:00
|
|
|
if err := removexattr(entry, req); err != nil {
|
2019-12-16 13:07:01 +08:00
|
|
|
return err
|
|
|
|
}
|
2021-06-02 18:40:08 +08:00
|
|
|
file.dirtyMetadata = true
|
|
|
|
|
|
|
|
if file.isOpen > 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2019-12-14 02:05:43 +08:00
|
|
|
|
2020-10-30 16:23:23 +08:00
|
|
|
return file.saveEntry(entry)
|
2019-12-14 02:05:43 +08:00
|
|
|
|
2019-12-16 13:07:01 +08:00
|
|
|
}
|
2018-09-17 03:37:06 +08:00
|
|
|
|
2019-12-16 13:07:01 +08:00
|
|
|
func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
|
2018-09-17 03:37:06 +08:00
|
|
|
|
2019-12-16 14:24:06 +08:00
|
|
|
glog.V(4).Infof("file Listxattr %s", file.fullpath())
|
2018-09-17 03:37:06 +08:00
|
|
|
|
2020-10-30 16:23:23 +08:00
|
|
|
entry, err := file.maybeLoadEntry(ctx)
|
|
|
|
if err != nil {
|
2019-12-16 13:07:01 +08:00
|
|
|
return err
|
|
|
|
}
|
2018-09-17 03:37:06 +08:00
|
|
|
|
2020-10-30 16:23:23 +08:00
|
|
|
if err := listxattr(entry, req, resp); err != nil {
|
2019-12-16 13:07:01 +08:00
|
|
|
return err
|
|
|
|
}
|
2018-09-17 03:37:06 +08:00
|
|
|
|
2019-12-18 12:38:56 +08:00
|
|
|
return nil
|
2018-11-08 03:35:13 +08:00
|
|
|
|
2019-12-16 13:07:01 +08:00
|
|
|
}
|
2018-09-17 03:37:06 +08:00
|
|
|
|
2019-12-16 13:07:01 +08:00
|
|
|
func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
|
2022-01-10 16:52:16 +08:00
|
|
|
|
2019-12-16 13:07:01 +08:00
|
|
|
// write the file chunks to the filerGrpcAddress
|
2020-08-14 15:22:21 +08:00
|
|
|
glog.V(4).Infof("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req)
|
2019-12-16 13:07:01 +08:00
|
|
|
|
2022-01-10 16:52:16 +08:00
|
|
|
return file.wfs.Fsync(file, req.Header)
|
|
|
|
|
2019-12-16 13:07:01 +08:00
|
|
|
}
|
|
|
|
|
2020-01-21 12:21:01 +08:00
|
|
|
func (file *File) Forget() {
|
2020-05-02 16:33:49 +08:00
|
|
|
t := util.NewFullPath(file.dir.FullPath(), file.Name)
|
2020-08-31 11:12:04 +08:00
|
|
|
glog.V(4).Infof("Forget file %s", t)
|
2022-01-13 03:51:13 +08:00
|
|
|
file.wfs.ReleaseHandle(t, fuse.HandleID(t.AsInode(file.entry.FileMode())))
|
|
|
|
|
2020-01-21 12:21:01 +08:00
|
|
|
}
|
|
|
|
|
2021-03-16 17:59:26 +08:00
|
|
|
func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, err error) {
|
2021-04-18 01:48:22 +08:00
|
|
|
|
|
|
|
file.wfs.handlesLock.Lock()
|
|
|
|
handle, found := file.wfs.handles[file.Id()]
|
|
|
|
file.wfs.handlesLock.Unlock()
|
2021-04-20 01:58:25 +08:00
|
|
|
entry = file.entry
|
2021-04-18 01:48:22 +08:00
|
|
|
if found {
|
2021-05-13 15:32:37 +08:00
|
|
|
// glog.V(4).Infof("maybeLoadEntry found opened file %s/%s", file.dir.FullPath(), file.Name)
|
2021-04-18 01:48:22 +08:00
|
|
|
entry = handle.f.entry
|
2020-10-26 01:33:49 +08:00
|
|
|
}
|
2021-04-18 01:48:22 +08:00
|
|
|
|
2020-10-30 16:23:23 +08:00
|
|
|
if entry != nil {
|
|
|
|
if len(entry.HardLinkId) == 0 {
|
2020-10-30 07:32:45 +08:00
|
|
|
// only always reload hard link
|
2020-10-30 16:23:23 +08:00
|
|
|
return entry, nil
|
2020-10-30 07:32:45 +08:00
|
|
|
}
|
|
|
|
}
|
2020-10-30 16:23:23 +08:00
|
|
|
entry, err = file.wfs.maybeLoadEntry(file.dir.FullPath(), file.Name)
|
2020-10-26 01:33:49 +08:00
|
|
|
if err != nil {
|
|
|
|
glog.V(3).Infof("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
|
2020-10-30 16:23:23 +08:00
|
|
|
return entry, err
|
2020-10-26 01:33:49 +08:00
|
|
|
}
|
|
|
|
if entry != nil {
|
2021-04-15 11:49:15 +08:00
|
|
|
// file.entry = entry
|
2020-10-30 07:32:45 +08:00
|
|
|
} else {
|
|
|
|
glog.Warningf("maybeLoadEntry not found entry %s/%s: %v", file.dir.FullPath(), file.Name, err)
|
2018-09-17 03:37:06 +08:00
|
|
|
}
|
2020-10-30 16:23:23 +08:00
|
|
|
return entry, nil
|
2018-09-17 03:37:06 +08:00
|
|
|
}
|
2019-01-06 07:16:39 +08:00
|
|
|
|
2020-11-04 04:21:42 +08:00
|
|
|
func lessThan(a, b *filer_pb.FileChunk) bool {
|
|
|
|
if a.Mtime == b.Mtime {
|
|
|
|
return a.Fid.FileKey < b.Fid.FileKey
|
|
|
|
}
|
|
|
|
return a.Mtime < b.Mtime
|
|
|
|
}
|
|
|
|
|
2019-01-06 07:16:39 +08:00
|
|
|
func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
|
2019-01-06 14:23:44 +08:00
|
|
|
|
2020-11-04 04:21:42 +08:00
|
|
|
// find the earliest incoming chunk
|
|
|
|
newChunks := chunks
|
|
|
|
earliestChunk := newChunks[0]
|
2020-11-16 08:59:28 +08:00
|
|
|
for i := 1; i < len(newChunks); i++ {
|
2020-11-04 04:21:42 +08:00
|
|
|
if lessThan(earliestChunk, newChunks[i]) {
|
|
|
|
earliestChunk = newChunks[i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-10 15:08:38 +08:00
|
|
|
entry := file.getEntry()
|
|
|
|
if entry == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-11-04 04:21:42 +08:00
|
|
|
// pick out-of-order chunks from existing chunks
|
2021-03-10 15:08:38 +08:00
|
|
|
for _, chunk := range entry.Chunks {
|
2020-11-04 04:21:42 +08:00
|
|
|
if lessThan(earliestChunk, chunk) {
|
|
|
|
chunks = append(chunks, chunk)
|
2020-08-24 06:48:02 +08:00
|
|
|
}
|
2020-11-04 04:21:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// sort incoming chunks
|
|
|
|
sort.Slice(chunks, func(i, j int) bool {
|
|
|
|
return lessThan(chunks[i], chunks[j])
|
2019-01-06 14:23:44 +08:00
|
|
|
})
|
|
|
|
|
2021-03-10 15:08:38 +08:00
|
|
|
glog.V(4).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(entry.Chunks), len(chunks))
|
2019-06-22 03:05:00 +08:00
|
|
|
|
2021-03-10 15:08:38 +08:00
|
|
|
entry.Chunks = append(entry.Chunks, newChunks...)
|
2019-01-06 07:16:39 +08:00
|
|
|
}
|
2019-01-06 07:21:56 +08:00
|
|
|
|
2021-03-16 17:59:26 +08:00
|
|
|
func (file *File) saveEntry(entry *filer_pb.Entry) error {
|
2021-12-26 16:15:03 +08:00
|
|
|
return file.wfs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
2019-12-16 13:07:01 +08:00
|
|
|
|
2021-03-16 17:59:26 +08:00
|
|
|
file.wfs.mapPbIdFromLocalToFiler(entry)
|
|
|
|
defer file.wfs.mapPbIdFromFilerToLocal(entry)
|
2020-09-03 15:07:22 +08:00
|
|
|
|
2021-05-10 13:55:30 +08:00
|
|
|
request := &filer_pb.CreateEntryRequest{
|
2020-08-29 14:48:48 +08:00
|
|
|
Directory: file.dir.FullPath(),
|
2021-03-16 17:59:26 +08:00
|
|
|
Entry: entry,
|
2020-08-29 14:48:48 +08:00
|
|
|
Signatures: []int32{file.wfs.signature},
|
2019-12-16 13:07:01 +08:00
|
|
|
}
|
|
|
|
|
2020-08-16 12:09:31 +08:00
|
|
|
glog.V(4).Infof("save file entry: %v", request)
|
2021-05-10 13:55:30 +08:00
|
|
|
_, err := client.CreateEntry(context.Background(), request)
|
2019-12-16 13:07:01 +08:00
|
|
|
if err != nil {
|
2020-08-19 14:42:09 +08:00
|
|
|
glog.Errorf("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
|
2019-12-16 13:07:01 +08:00
|
|
|
return fuse.EIO
|
|
|
|
}
|
|
|
|
|
2021-05-10 13:55:30 +08:00
|
|
|
file.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
|
2020-04-23 06:40:47 +08:00
|
|
|
|
2021-06-02 18:40:08 +08:00
|
|
|
file.dirtyMetadata = false
|
|
|
|
|
2019-12-16 13:07:01 +08:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
2021-03-10 15:08:38 +08:00
|
|
|
|
2021-03-16 17:59:26 +08:00
|
|
|
func (file *File) getEntry() *filer_pb.Entry {
|
2021-03-10 15:08:38 +08:00
|
|
|
return file.entry
|
|
|
|
}
|
2021-08-10 13:11:57 +08:00
|
|
|
|
|
|
|
func (file *File) downloadRemoteEntry(entry *filer_pb.Entry) (*filer_pb.Entry, error) {
|
2021-12-26 16:15:03 +08:00
|
|
|
err := file.wfs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
2021-08-10 13:11:57 +08:00
|
|
|
|
2021-10-31 10:27:25 +08:00
|
|
|
request := &filer_pb.CacheRemoteObjectToLocalClusterRequest{
|
2021-08-10 13:11:57 +08:00
|
|
|
Directory: file.dir.FullPath(),
|
|
|
|
Name: entry.Name,
|
|
|
|
}
|
|
|
|
|
|
|
|
glog.V(4).Infof("download entry: %v", request)
|
2021-10-31 10:27:25 +08:00
|
|
|
resp, err := client.CacheRemoteObjectToLocalCluster(context.Background(), request)
|
2021-08-10 13:11:57 +08:00
|
|
|
if err != nil {
|
2021-10-31 10:27:25 +08:00
|
|
|
glog.Errorf("CacheRemoteObjectToLocalCluster file %s/%s: %v", file.dir.FullPath(), file.Name, err)
|
2021-08-10 13:11:57 +08:00
|
|
|
return fuse.EIO
|
|
|
|
}
|
|
|
|
|
|
|
|
entry = resp.Entry
|
|
|
|
|
|
|
|
file.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, resp.Entry))
|
|
|
|
|
|
|
|
file.dirtyMetadata = false
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
return entry, err
|
|
|
|
}
|