2018-05-06 13:47:16 +08:00
|
|
|
package filesys
|
|
|
|
|
|
|
|
import (
|
2018-05-06 14:39:29 +08:00
|
|
|
"context"
|
2018-05-06 13:47:16 +08:00
|
|
|
"fmt"
|
2018-05-06 14:39:29 +08:00
|
|
|
|
|
|
|
"bazil.org/fuse"
|
2018-05-08 16:59:43 +08:00
|
|
|
"bazil.org/fuse/fs"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2018-05-10 14:18:02 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
2018-05-14 17:02:17 +08:00
|
|
|
"path/filepath"
|
|
|
|
"os"
|
|
|
|
"time"
|
2018-05-16 15:08:44 +08:00
|
|
|
"bytes"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
2018-05-21 15:00:28 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/filer2"
|
2018-05-06 13:47:16 +08:00
|
|
|
)
|
|
|
|
|
2018-05-08 16:59:43 +08:00
|
|
|
var _ = fs.Node(&File{})
|
|
|
|
// var _ = fs.NodeOpener(&File{})
|
2018-05-16 15:54:27 +08:00
|
|
|
var _ = fs.NodeFsyncer(&File{})
|
2018-05-08 16:59:43 +08:00
|
|
|
var _ = fs.Handle(&File{})
|
|
|
|
var _ = fs.HandleReadAller(&File{})
|
|
|
|
// var _ = fs.HandleReader(&File{})
|
2018-05-16 15:08:44 +08:00
|
|
|
var _ = fs.HandleFlusher(&File{})
|
2018-05-08 16:59:43 +08:00
|
|
|
var _ = fs.HandleWriter(&File{})
|
2018-05-20 04:51:44 +08:00
|
|
|
var _ = fs.HandleReleaser(&File{})
|
2018-05-21 15:00:28 +08:00
|
|
|
var _ = fs.NodeSetattrer(&File{})
|
2018-05-08 16:59:43 +08:00
|
|
|
|
2018-05-06 13:47:16 +08:00
|
|
|
type File struct {
|
2018-05-16 15:08:44 +08:00
|
|
|
Chunks []*filer_pb.FileChunk
|
2018-05-06 13:47:16 +08:00
|
|
|
Name string
|
2018-05-14 17:02:17 +08:00
|
|
|
dir *Dir
|
2018-05-06 13:47:16 +08:00
|
|
|
wfs *WFS
|
|
|
|
}
|
|
|
|
|
|
|
|
func (file *File) Attr(context context.Context, attr *fuse.Attr) error {
|
2018-05-14 17:02:17 +08:00
|
|
|
fullPath := filepath.Join(file.dir.Path, file.Name)
|
|
|
|
item := file.wfs.listDirectoryEntriesCache.Get(fullPath)
|
|
|
|
var attributes *filer_pb.FuseAttributes
|
|
|
|
if item != nil {
|
|
|
|
attributes = item.Value().(*filer_pb.FuseAttributes)
|
|
|
|
glog.V(1).Infof("read cached file %v attributes", file.Name)
|
|
|
|
} else {
|
|
|
|
err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
|
|
|
|
|
|
|
request := &filer_pb.GetFileAttributesRequest{
|
|
|
|
Name: file.Name,
|
|
|
|
ParentDir: file.dir.Path,
|
|
|
|
}
|
|
|
|
|
|
|
|
glog.V(1).Infof("read file size: %v", request)
|
|
|
|
resp, err := client.GetFileAttributes(context, request)
|
|
|
|
if err != nil {
|
2018-05-20 04:51:44 +08:00
|
|
|
glog.V(0).Infof("read file attributes %v: %v", request, err)
|
2018-05-14 17:02:17 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
attributes = resp.Attributes
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
2018-05-08 16:59:43 +08:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-05-14 17:02:17 +08:00
|
|
|
}
|
2018-05-08 16:59:43 +08:00
|
|
|
|
2018-05-14 17:02:17 +08:00
|
|
|
attr.Mode = os.FileMode(attributes.FileMode)
|
|
|
|
attr.Size = attributes.FileSize
|
|
|
|
attr.Mtime = time.Unix(attributes.Mtime, 0)
|
|
|
|
attr.Gid = attributes.Gid
|
|
|
|
attr.Uid = attributes.Uid
|
2018-05-20 04:51:44 +08:00
|
|
|
|
2018-05-14 17:02:17 +08:00
|
|
|
return nil
|
2018-05-08 16:59:43 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-05-21 15:00:28 +08:00
|
|
|
func (file *File) xOpen(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
|
|
|
|
fullPath := filepath.Join(file.dir.Path, file.Name)
|
|
|
|
|
|
|
|
fmt.Printf("Open %v %+v\n", fullPath, req)
|
|
|
|
|
|
|
|
return file, nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
|
|
|
|
fullPath := filepath.Join(file.dir.Path, file.Name)
|
|
|
|
|
|
|
|
fmt.Printf("Setattr %v %+v\n", fullPath, req)
|
|
|
|
if req.Valid.Size() && req.Size == 0 {
|
|
|
|
fmt.Printf("truncate %v \n", fullPath)
|
|
|
|
file.Chunks = nil
|
|
|
|
resp.Attr.Size = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-05-08 16:59:43 +08:00
|
|
|
func (file *File) ReadAll(ctx context.Context) (content []byte, err error) {
|
|
|
|
|
2018-05-21 15:00:28 +08:00
|
|
|
fmt.Printf("read all file %+v/%v\n", file.dir.Path, file.Name)
|
2018-05-20 04:51:44 +08:00
|
|
|
|
2018-05-16 15:08:44 +08:00
|
|
|
if len(file.Chunks) == 0 {
|
2018-05-20 04:51:44 +08:00
|
|
|
glog.V(0).Infof("empty file %v/%v", file.dir.Path, file.Name)
|
2018-05-16 15:08:44 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-05-10 14:18:02 +08:00
|
|
|
err = file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
2018-05-08 16:59:43 +08:00
|
|
|
|
2018-05-16 15:08:44 +08:00
|
|
|
// FIXME: need to either use Read() or implement differently
|
2018-05-21 15:00:28 +08:00
|
|
|
chunks, _ := filer2.CompactFileChunks(file.Chunks)
|
|
|
|
glog.V(1).Infof("read file %v/%v %d/%d chunks", file.dir.Path, file.Name, len(chunks), len(file.Chunks))
|
2018-05-10 14:18:02 +08:00
|
|
|
request := &filer_pb.GetFileContentRequest{
|
2018-05-21 15:00:28 +08:00
|
|
|
FileId: chunks[0].FileId,
|
2018-05-08 16:59:43 +08:00
|
|
|
}
|
|
|
|
|
2018-05-21 15:00:28 +08:00
|
|
|
glog.V(1).Infof("read file content %d chunk %s [%d,%d): %v", len(chunks),
|
|
|
|
chunks[0].FileId, chunks[0].Offset, chunks[0].Offset+int64(chunks[0].Size), request)
|
2018-05-08 16:59:43 +08:00
|
|
|
resp, err := client.GetFileContent(ctx, request)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
content = resp.Content
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
return content, err
|
2018-05-06 13:47:16 +08:00
|
|
|
}
|
|
|
|
|
2018-05-16 15:54:27 +08:00
|
|
|
func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
|
|
|
|
// fsync works at OS level
|
2018-05-16 15:08:44 +08:00
|
|
|
// write the file chunks to the filer
|
|
|
|
fmt.Printf("flush file %+v\n", req)
|
|
|
|
|
2018-05-16 15:54:27 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (file *File) Flush(ctx context.Context, req *fuse.FlushRequest) error {
|
|
|
|
// fflush works at file level
|
|
|
|
// send the data to the OS
|
2018-05-20 04:51:44 +08:00
|
|
|
glog.V(3).Infof("file flush %v", req)
|
|
|
|
|
|
|
|
if len(file.Chunks) == 0 {
|
|
|
|
glog.V(2).Infof("file flush skipping empty %v", req)
|
|
|
|
return nil
|
|
|
|
}
|
2018-05-16 15:54:27 +08:00
|
|
|
|
2018-05-16 15:08:44 +08:00
|
|
|
err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
|
|
|
|
2018-05-21 15:00:28 +08:00
|
|
|
request := &filer_pb.SetFileChunksRequest{
|
2018-05-16 15:08:44 +08:00
|
|
|
Directory: file.dir.Path,
|
|
|
|
Entry: &filer_pb.Entry{
|
|
|
|
Name: file.Name,
|
|
|
|
Chunks: file.Chunks,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
glog.V(1).Infof("append chunks: %v", request)
|
2018-05-21 15:00:28 +08:00
|
|
|
if _, err := client.SetFileChunks(ctx, request); err != nil {
|
2018-05-16 15:08:44 +08:00
|
|
|
return fmt.Errorf("create file: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-05-08 16:59:43 +08:00
|
|
|
func (file *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
|
2018-05-16 15:08:44 +08:00
|
|
|
// write the request to volume servers
|
2018-05-20 04:51:44 +08:00
|
|
|
// fmt.Printf("write file %+v\n", req)
|
2018-05-16 15:08:44 +08:00
|
|
|
|
|
|
|
var fileId, host string
|
|
|
|
|
|
|
|
if err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
|
|
|
|
|
|
|
request := &filer_pb.AssignVolumeRequest{
|
|
|
|
Count: 1,
|
|
|
|
Replication: "000",
|
|
|
|
Collection: "",
|
|
|
|
}
|
|
|
|
|
|
|
|
glog.V(1).Infof("assign volume: %v", request)
|
|
|
|
resp, err := client.AssignVolume(ctx, request)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
fileId, host = resp.FileId, resp.Url
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return fmt.Errorf("filer assign volume: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
|
|
|
|
bufReader := bytes.NewReader(req.Data)
|
|
|
|
uploadResult, err := operation.Upload(fileUrl, file.Name, bufReader, false, "application/octet-stream", nil, "")
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("upload data: %v", err)
|
|
|
|
}
|
|
|
|
if uploadResult.Error != "" {
|
|
|
|
return fmt.Errorf("upload result: %v", uploadResult.Error)
|
|
|
|
}
|
|
|
|
|
2018-05-21 15:00:28 +08:00
|
|
|
resp.Size = int(uploadResult.Size)
|
2018-05-16 15:08:44 +08:00
|
|
|
|
|
|
|
file.Chunks = append(file.Chunks, &filer_pb.FileChunk{
|
|
|
|
FileId: fileId,
|
|
|
|
Offset: req.Offset,
|
|
|
|
Size: uint64(uploadResult.Size),
|
2018-05-16 15:54:44 +08:00
|
|
|
Mtime: time.Now().UnixNano(),
|
2018-05-16 15:08:44 +08:00
|
|
|
})
|
|
|
|
|
2018-05-21 15:00:28 +08:00
|
|
|
glog.V(1).Infof("uploaded %s/%s to: %v, [%d,%d)", file.dir.Path, file.Name, fileUrl, req.Offset, req.Offset+int64(resp.Size))
|
2018-05-20 04:51:44 +08:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (file *File) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
|
|
|
|
|
|
|
|
// fmt.Printf("release file %+v\n", req)
|
|
|
|
|
2018-05-08 16:59:43 +08:00
|
|
|
return nil
|
2018-05-06 13:47:16 +08:00
|
|
|
}
|