mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-11-25 11:39:12 +08:00
reduce repeated calls to dir.FullPath()
This commit is contained in:
parent
d084334ffd
commit
aba47fd9e8
@ -251,10 +251,10 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
|
||||
|
||||
func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) {
|
||||
|
||||
glog.V(4).Infof("dir Lookup %s: %s by %s", dir.FullPath(), req.Name, req.Header.String())
|
||||
|
||||
fullFilePath := util.NewFullPath(dir.FullPath(), req.Name)
|
||||
dirPath := util.FullPath(dir.FullPath())
|
||||
glog.V(4).Infof("dir Lookup %s: %s by %s", dirPath, req.Name, req.Header.String())
|
||||
|
||||
fullFilePath := dirPath.Child(req.Name)
|
||||
visitErr := meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath)
|
||||
if visitErr != nil {
|
||||
glog.Errorf("dir Lookup %s: %v", dirPath, visitErr)
|
||||
@ -305,7 +305,8 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
|
||||
|
||||
func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
|
||||
|
||||
glog.V(4).Infof("dir ReadDirAll %s", dir.FullPath())
|
||||
dirPath := util.FullPath(dir.FullPath())
|
||||
glog.V(4).Infof("dir ReadDirAll %s", dirPath)
|
||||
|
||||
processEachEntryFn := func(entry *filer_pb.Entry, isLast bool) error {
|
||||
if entry.IsDirectory {
|
||||
@ -318,12 +319,11 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
dirPath := util.FullPath(dir.FullPath())
|
||||
if err = meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath); err != nil {
|
||||
glog.Errorf("dir ReadDirAll %s: %v", dirPath, err)
|
||||
return nil, fuse.EIO
|
||||
}
|
||||
listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), util.FullPath(dir.FullPath()), "", false, int64(math.MaxInt32), func(entry *filer.Entry) bool {
|
||||
listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), dirPath, "", false, int64(math.MaxInt32), func(entry *filer.Entry) bool {
|
||||
processEachEntryFn(entry.ToProtoEntry(), false)
|
||||
return true
|
||||
})
|
||||
|
@ -111,15 +111,16 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
|
||||
}
|
||||
|
||||
fileSize := int64(filer.FileSize(entry))
|
||||
fileFullPath := fh.f.fullpath()
|
||||
|
||||
if fileSize == 0 {
|
||||
glog.V(1).Infof("empty fh %v", fh.f.fullpath())
|
||||
glog.V(1).Infof("empty fh %v", fileFullPath)
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if offset+int64(len(buff)) <= int64(len(entry.Content)) {
|
||||
totalRead := copy(buff, entry.Content[offset:])
|
||||
glog.V(4).Infof("file handle read cached %s [%d,%d] %d", fh.f.fullpath(), offset, offset+int64(totalRead), totalRead)
|
||||
glog.V(4).Infof("file handle read cached %s [%d,%d] %d", fileFullPath, offset, offset+int64(totalRead), totalRead)
|
||||
return int64(totalRead), nil
|
||||
}
|
||||
|
||||
@ -142,10 +143,10 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
|
||||
totalRead, err := reader.ReadAt(buff, offset)
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
|
||||
glog.Errorf("file handle read %s: %v", fileFullPath, err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("file handle read %s [%d,%d] %d : %v", fh.f.fullpath(), offset, offset+int64(totalRead), totalRead, err)
|
||||
glog.V(4).Infof("file handle read %s [%d,%d] %d : %v", fileFullPath, offset, offset+int64(totalRead), totalRead, err)
|
||||
|
||||
return int64(totalRead), err
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user