2016-06-03 11:05:34 +08:00
|
|
|
package weed_server
|
|
|
|
|
|
|
|
import (
|
2016-07-21 12:20:22 +08:00
|
|
|
"io"
|
2016-06-03 11:05:34 +08:00
|
|
|
"net/http"
|
2016-07-21 12:20:22 +08:00
|
|
|
"net/url"
|
2016-06-03 11:05:34 +08:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
|
2016-09-08 11:35:54 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
2016-06-03 11:05:34 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2016-07-21 12:20:22 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
2016-07-18 16:28:24 +08:00
|
|
|
ui "github.com/chrislusf/seaweedfs/weed/server/filer_ui"
|
2016-07-21 12:20:22 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2016-06-03 11:05:34 +08:00
|
|
|
"github.com/syndtr/goleveldb/leveldb"
|
|
|
|
)
|
|
|
|
|
2016-07-21 12:20:22 +08:00
|
|
|
// listDirectoryHandler lists directories and folers under a directory
|
|
|
|
// files are sorted by name and paginated via "lastFileName" and "limit".
|
|
|
|
// sub directories are listed on the first page, when "lastFileName"
|
|
|
|
// is empty.
|
|
|
|
func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Request) {
|
2016-06-03 11:05:34 +08:00
|
|
|
if !strings.HasSuffix(r.URL.Path, "/") {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
limit, limit_err := strconv.Atoi(r.FormValue("limit"))
|
|
|
|
if limit_err != nil {
|
|
|
|
limit = 100
|
|
|
|
}
|
|
|
|
|
2016-07-20 16:46:28 +08:00
|
|
|
lastFileName := r.FormValue("lastFileName")
|
|
|
|
files, err := fs.filer.ListFiles(r.URL.Path, lastFileName, limit)
|
2016-06-03 11:05:34 +08:00
|
|
|
|
|
|
|
if err == leveldb.ErrNotFound {
|
2016-07-18 16:28:24 +08:00
|
|
|
glog.V(0).Infof("Error %s", err)
|
2016-07-20 16:46:28 +08:00
|
|
|
w.WriteHeader(http.StatusNotFound)
|
2016-06-03 11:05:34 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-07-18 16:28:24 +08:00
|
|
|
directories, err2 := fs.filer.ListDirectories(r.URL.Path)
|
|
|
|
if err2 == leveldb.ErrNotFound {
|
|
|
|
glog.V(0).Infof("Error %s", err)
|
2016-07-20 16:46:28 +08:00
|
|
|
w.WriteHeader(http.StatusNotFound)
|
2016-06-03 11:05:34 +08:00
|
|
|
return
|
|
|
|
}
|
2016-07-18 16:28:24 +08:00
|
|
|
|
2016-07-20 16:46:28 +08:00
|
|
|
shouldDisplayLoadMore := len(files) > 0
|
|
|
|
|
|
|
|
lastFileName = ""
|
|
|
|
if len(files) > 0 {
|
|
|
|
lastFileName = files[len(files)-1].Name
|
|
|
|
|
|
|
|
files2, err3 := fs.filer.ListFiles(r.URL.Path, lastFileName, limit)
|
|
|
|
if err3 == leveldb.ErrNotFound {
|
|
|
|
glog.V(0).Infof("Error %s", err)
|
|
|
|
w.WriteHeader(http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
shouldDisplayLoadMore = len(files2) > 0
|
|
|
|
}
|
|
|
|
|
2016-07-18 16:28:24 +08:00
|
|
|
args := struct {
|
2016-07-20 16:46:28 +08:00
|
|
|
Path string
|
|
|
|
Files interface{}
|
|
|
|
Directories interface{}
|
|
|
|
Limit int
|
|
|
|
LastFileName string
|
|
|
|
ShouldDisplayLoadMore bool
|
2016-07-18 16:28:24 +08:00
|
|
|
}{
|
|
|
|
r.URL.Path,
|
|
|
|
files,
|
|
|
|
directories,
|
2016-07-20 16:46:28 +08:00
|
|
|
limit,
|
|
|
|
lastFileName,
|
|
|
|
shouldDisplayLoadMore,
|
2016-06-03 11:05:34 +08:00
|
|
|
}
|
2017-01-05 11:58:27 +08:00
|
|
|
|
|
|
|
if r.Header.Get("Accept") == "application/json" {
|
2016-12-26 16:49:43 +08:00
|
|
|
writeJsonQuiet(w, r, http.StatusOK, args)
|
|
|
|
} else {
|
|
|
|
ui.StatusTpl.Execute(w, args)
|
|
|
|
}
|
2016-06-03 11:05:34 +08:00
|
|
|
}
|
2016-07-21 12:20:22 +08:00
|
|
|
|
|
|
|
func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, isGetMethod bool) {
|
|
|
|
if strings.HasSuffix(r.URL.Path, "/") {
|
|
|
|
if fs.disableDirListing {
|
|
|
|
w.WriteHeader(http.StatusMethodNotAllowed)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
fs.listDirectoryHandler(w, r)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
fileId, err := fs.filer.FindFile(r.URL.Path)
|
2016-09-08 11:35:54 +08:00
|
|
|
if err == filer.ErrNotFound {
|
2016-07-21 12:20:22 +08:00
|
|
|
glog.V(3).Infoln("Not found in db", r.URL.Path)
|
|
|
|
w.WriteHeader(http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
urlLocation, err := operation.LookupFileId(fs.getMasterNode(), fileId)
|
|
|
|
if err != nil {
|
|
|
|
glog.V(1).Infoln("operation LookupFileId %s failed, err is %s", fileId, err.Error())
|
|
|
|
w.WriteHeader(http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
urlString := urlLocation
|
|
|
|
if fs.redirectOnRead {
|
|
|
|
http.Redirect(w, r, urlString, http.StatusFound)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
u, _ := url.Parse(urlString)
|
|
|
|
q := u.Query()
|
|
|
|
for key, values := range r.URL.Query() {
|
|
|
|
for _, value := range values {
|
|
|
|
q.Add(key, value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
u.RawQuery = q.Encode()
|
|
|
|
request := &http.Request{
|
|
|
|
Method: r.Method,
|
|
|
|
URL: u,
|
|
|
|
Proto: r.Proto,
|
|
|
|
ProtoMajor: r.ProtoMajor,
|
|
|
|
ProtoMinor: r.ProtoMinor,
|
|
|
|
Header: r.Header,
|
|
|
|
Body: r.Body,
|
|
|
|
Host: r.Host,
|
|
|
|
ContentLength: r.ContentLength,
|
|
|
|
}
|
|
|
|
glog.V(3).Infoln("retrieving from", u)
|
|
|
|
resp, do_err := util.Do(request)
|
|
|
|
if do_err != nil {
|
|
|
|
glog.V(0).Infoln("failing to connect to volume server", do_err.Error())
|
|
|
|
writeJsonError(w, r, http.StatusInternalServerError, do_err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
|
|
|
for k, v := range resp.Header {
|
|
|
|
w.Header()[k] = v
|
|
|
|
}
|
|
|
|
w.WriteHeader(resp.StatusCode)
|
|
|
|
io.Copy(w, resp.Body)
|
|
|
|
|
|
|
|
}
|