seaweedfs/weed/server/filer_server_handlers_read.go

254 lines
6.7 KiB
Go
Raw Normal View History

2016-06-03 11:05:34 +08:00
package weed_server
import (
2019-03-16 06:55:34 +08:00
"context"
2016-07-21 12:20:22 +08:00
"io"
2019-04-15 14:28:24 +08:00
"io/ioutil"
"mime"
"mime/multipart"
2016-06-03 11:05:34 +08:00
"net/http"
2016-07-21 12:20:22 +08:00
"net/url"
"path"
"strconv"
2016-06-03 11:05:34 +08:00
"strings"
2018-05-28 02:52:26 +08:00
"github.com/chrislusf/seaweedfs/weed/filer2"
2016-06-03 11:05:34 +08:00
"github.com/chrislusf/seaweedfs/weed/glog"
2019-06-16 03:21:44 +08:00
"github.com/chrislusf/seaweedfs/weed/stats"
2016-07-21 12:20:22 +08:00
"github.com/chrislusf/seaweedfs/weed/util"
2016-06-03 11:05:34 +08:00
)
2016-07-21 12:20:22 +08:00
func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, isGetMethod bool) {
2018-05-14 14:56:16 +08:00
path := r.URL.Path
isForDirectory := strings.HasSuffix(path, "/")
if isForDirectory && len(path) > 1 {
2018-05-14 14:56:16 +08:00
path = path[:len(path)-1]
}
2019-03-16 06:55:34 +08:00
entry, err := fs.filer.FindEntry(context.Background(), filer2.FullPath(path))
if err != nil {
2018-10-12 15:45:28 +08:00
if path == "/" {
fs.listDirectoryHandler(w, r)
return
}
if err == filer2.ErrNotFound {
glog.V(1).Infof("Not found %s: %v", path, err)
stats.FilerRequestCounter.WithLabelValues("read.notfound").Inc()
w.WriteHeader(http.StatusNotFound)
} else {
glog.V(0).Infof("Internal %s: %v", path, err)
stats.FilerRequestCounter.WithLabelValues("read.internalerror").Inc()
w.WriteHeader(http.StatusInternalServerError)
}
2018-05-14 14:56:16 +08:00
return
}
if entry.IsDirectory() {
2018-07-07 17:18:47 +08:00
if fs.option.DisableDirListing {
2016-07-21 12:20:22 +08:00
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
fs.listDirectoryHandler(w, r)
return
}
if isForDirectory {
w.WriteHeader(http.StatusNotFound)
return
}
2018-05-14 14:56:16 +08:00
if len(entry.Chunks) == 0 {
glog.V(1).Infof("no file chunks for %s, attr=%+v", path, entry.Attr)
2019-06-16 03:21:44 +08:00
stats.FilerRequestCounter.WithLabelValues("read.nocontent").Inc()
2018-05-14 14:56:16 +08:00
w.WriteHeader(http.StatusNoContent)
2016-07-21 12:20:22 +08:00
return
}
w.Header().Set("Accept-Ranges", "bytes")
if r.Method == "HEAD" {
w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10))
w.Header().Set("Last-Modified", entry.Attr.Mtime.Format(http.TimeFormat))
setEtag(w, filer2.ETag(entry.Chunks))
return
}
if len(entry.Chunks) == 1 {
fs.handleSingleChunk(w, r, entry)
return
}
fs.handleMultipleChunks(w, r, entry)
}
func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) {
2019-06-23 11:04:56 +08:00
fileId := entry.Chunks[0].GetFileIdString()
2018-05-14 14:56:16 +08:00
urlString, err := fs.filer.MasterClient.LookupFileId(fileId)
2016-07-21 12:20:22 +08:00
if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", fileId, err)
2016-07-21 12:20:22 +08:00
w.WriteHeader(http.StatusNotFound)
return
}
2018-07-07 17:18:47 +08:00
if fs.option.RedirectOnRead {
2019-06-23 13:53:52 +08:00
stats.FilerRequestCounter.WithLabelValues("redirect").Inc()
2016-07-21 12:20:22 +08:00
http.Redirect(w, r, urlString, http.StatusFound)
return
}
2016-07-21 12:20:22 +08:00
u, _ := url.Parse(urlString)
q := u.Query()
for key, values := range r.URL.Query() {
for _, value := range values {
q.Add(key, value)
}
}
u.RawQuery = q.Encode()
request := &http.Request{
Method: r.Method,
URL: u,
Proto: r.Proto,
ProtoMajor: r.ProtoMajor,
ProtoMinor: r.ProtoMinor,
Header: r.Header,
Body: r.Body,
Host: r.Host,
ContentLength: r.ContentLength,
}
glog.V(3).Infoln("retrieving from", u)
resp, do_err := util.Do(request)
if do_err != nil {
glog.V(0).Infoln("failing to connect to volume server", do_err.Error())
writeJsonError(w, r, http.StatusInternalServerError, do_err)
return
}
2019-04-15 14:28:24 +08:00
defer func() {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}()
2016-07-21 12:20:22 +08:00
for k, v := range resp.Header {
w.Header()[k] = v
}
if entry.Attr.Mime != "" {
w.Header().Set("Content-Type", entry.Attr.Mime)
}
2016-07-21 12:20:22 +08:00
w.WriteHeader(resp.StatusCode)
io.Copy(w, resp.Body)
}
func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) {
mimeType := entry.Attr.Mime
if mimeType == "" {
if ext := path.Ext(entry.Name()); ext != "" {
mimeType = mime.TypeByExtension(ext)
}
}
if mimeType != "" {
w.Header().Set("Content-Type", mimeType)
}
2018-09-10 07:25:43 +08:00
setEtag(w, filer2.ETag(entry.Chunks))
totalSize := int64(filer2.TotalSize(entry.Chunks))
rangeReq := r.Header.Get("Range")
if rangeReq == "" {
w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
if err := fs.writeContent(w, entry, 0, int(totalSize)); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
//the rest is dealing with partial content request
//mostly copy from src/pkg/net/http/fs.go
ranges, err := parseRange(rangeReq, totalSize)
if err != nil {
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
return
}
if sumRangesSize(ranges) > totalSize {
// The total number of bytes in all the ranges
// is larger than the size of the file by
// itself, so this is probably an attack, or a
// dumb client. Ignore the range request.
return
}
if len(ranges) == 0 {
return
}
if len(ranges) == 1 {
// RFC 2616, Section 14.16:
// "When an HTTP message includes the content of a single
// range (for example, a response to a request for a
// single range, or to a request for a set of ranges
// that overlap without any holes), this content is
// transmitted with a Content-Range header, and a
// Content-Length header showing the number of bytes
// actually transferred.
// ...
// A response to a request for a single range MUST NOT
// be sent using the multipart/byteranges media type."
ra := ranges[0]
w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
w.Header().Set("Content-Range", ra.contentRange(totalSize))
w.WriteHeader(http.StatusPartialContent)
err = fs.writeContent(w, entry, ra.start, int(ra.length))
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
// process multiple ranges
for _, ra := range ranges {
if ra.start > totalSize {
http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable)
return
}
}
sendSize := rangesMIMESize(ranges, mimeType, totalSize)
pr, pw := io.Pipe()
mw := multipart.NewWriter(pw)
w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
sendContent := pr
defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
go func() {
for _, ra := range ranges {
part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize))
if e != nil {
pw.CloseWithError(e)
return
}
if e = fs.writeContent(part, entry, ra.start, int(ra.length)); e != nil {
pw.CloseWithError(e)
return
}
}
mw.Close()
pw.Close()
}()
if w.Header().Get("Content-Encoding") == "" {
w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
}
w.WriteHeader(http.StatusPartialContent)
if _, err := io.CopyN(w, sendContent, sendSize); err != nil {
http.Error(w, "Internal Error", http.StatusInternalServerError)
return
}
}
func (fs *FilerServer) writeContent(w io.Writer, entry *filer2.Entry, offset int64, size int) error {
return filer2.StreamContent(fs.filer.MasterClient, w, entry.Chunks, offset, size)
2019-04-22 06:43:43 +08:00
}